xref: /openbmc/linux/drivers/iommu/iommu.c (revision 910499e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  */
6 
7 #define pr_fmt(fmt)    "iommu: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <linux/module.h>
26 #include <trace/events/iommu.h>
27 
28 static struct kset *iommu_group_kset;
29 static DEFINE_IDA(iommu_group_ida);
30 
31 static unsigned int iommu_def_domain_type __read_mostly;
32 static bool iommu_dma_strict __read_mostly = true;
33 static u32 iommu_cmd_line __read_mostly;
34 
35 struct iommu_group {
36 	struct kobject kobj;
37 	struct kobject *devices_kobj;
38 	struct list_head devices;
39 	struct mutex mutex;
40 	struct blocking_notifier_head notifier;
41 	void *iommu_data;
42 	void (*iommu_data_release)(void *iommu_data);
43 	char *name;
44 	int id;
45 	struct iommu_domain *default_domain;
46 	struct iommu_domain *domain;
47 	struct list_head entry;
48 };
49 
50 struct group_device {
51 	struct list_head list;
52 	struct device *dev;
53 	char *name;
54 };
55 
56 struct iommu_group_attribute {
57 	struct attribute attr;
58 	ssize_t (*show)(struct iommu_group *group, char *buf);
59 	ssize_t (*store)(struct iommu_group *group,
60 			 const char *buf, size_t count);
61 };
62 
63 static const char * const iommu_group_resv_type_string[] = {
64 	[IOMMU_RESV_DIRECT]			= "direct",
65 	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
66 	[IOMMU_RESV_RESERVED]			= "reserved",
67 	[IOMMU_RESV_MSI]			= "msi",
68 	[IOMMU_RESV_SW_MSI]			= "msi",
69 };
70 
71 #define IOMMU_CMD_LINE_DMA_API		BIT(0)
72 
73 static void iommu_set_cmd_line_dma_api(void)
74 {
75 	iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
76 }
77 
78 static bool iommu_cmd_line_dma_api(void)
79 {
80 	return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
81 }
82 
83 static int iommu_alloc_default_domain(struct iommu_group *group,
84 				      struct device *dev);
85 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
86 						 unsigned type);
87 static int __iommu_attach_device(struct iommu_domain *domain,
88 				 struct device *dev);
89 static int __iommu_attach_group(struct iommu_domain *domain,
90 				struct iommu_group *group);
91 static void __iommu_detach_group(struct iommu_domain *domain,
92 				 struct iommu_group *group);
93 static int iommu_create_device_direct_mappings(struct iommu_group *group,
94 					       struct device *dev);
95 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
96 static ssize_t iommu_group_store_type(struct iommu_group *group,
97 				      const char *buf, size_t count);
98 
99 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
100 struct iommu_group_attribute iommu_group_attr_##_name =		\
101 	__ATTR(_name, _mode, _show, _store)
102 
103 #define to_iommu_group_attr(_attr)	\
104 	container_of(_attr, struct iommu_group_attribute, attr)
105 #define to_iommu_group(_kobj)		\
106 	container_of(_kobj, struct iommu_group, kobj)
107 
108 static LIST_HEAD(iommu_device_list);
109 static DEFINE_SPINLOCK(iommu_device_lock);
110 
111 /*
112  * Use a function instead of an array here because the domain-type is a
113  * bit-field, so an array would waste memory.
114  */
115 static const char *iommu_domain_type_str(unsigned int t)
116 {
117 	switch (t) {
118 	case IOMMU_DOMAIN_BLOCKED:
119 		return "Blocked";
120 	case IOMMU_DOMAIN_IDENTITY:
121 		return "Passthrough";
122 	case IOMMU_DOMAIN_UNMANAGED:
123 		return "Unmanaged";
124 	case IOMMU_DOMAIN_DMA:
125 		return "Translated";
126 	default:
127 		return "Unknown";
128 	}
129 }
130 
131 static int __init iommu_subsys_init(void)
132 {
133 	bool cmd_line = iommu_cmd_line_dma_api();
134 
135 	if (!cmd_line) {
136 		if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
137 			iommu_set_default_passthrough(false);
138 		else
139 			iommu_set_default_translated(false);
140 
141 		if (iommu_default_passthrough() && mem_encrypt_active()) {
142 			pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
143 			iommu_set_default_translated(false);
144 		}
145 	}
146 
147 	pr_info("Default domain type: %s %s\n",
148 		iommu_domain_type_str(iommu_def_domain_type),
149 		cmd_line ? "(set via kernel command line)" : "");
150 
151 	return 0;
152 }
153 subsys_initcall(iommu_subsys_init);
154 
155 int iommu_device_register(struct iommu_device *iommu)
156 {
157 	spin_lock(&iommu_device_lock);
158 	list_add_tail(&iommu->list, &iommu_device_list);
159 	spin_unlock(&iommu_device_lock);
160 	return 0;
161 }
162 EXPORT_SYMBOL_GPL(iommu_device_register);
163 
164 void iommu_device_unregister(struct iommu_device *iommu)
165 {
166 	spin_lock(&iommu_device_lock);
167 	list_del(&iommu->list);
168 	spin_unlock(&iommu_device_lock);
169 }
170 EXPORT_SYMBOL_GPL(iommu_device_unregister);
171 
172 static struct dev_iommu *dev_iommu_get(struct device *dev)
173 {
174 	struct dev_iommu *param = dev->iommu;
175 
176 	if (param)
177 		return param;
178 
179 	param = kzalloc(sizeof(*param), GFP_KERNEL);
180 	if (!param)
181 		return NULL;
182 
183 	mutex_init(&param->lock);
184 	dev->iommu = param;
185 	return param;
186 }
187 
188 static void dev_iommu_free(struct device *dev)
189 {
190 	iommu_fwspec_free(dev);
191 	kfree(dev->iommu);
192 	dev->iommu = NULL;
193 }
194 
195 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
196 {
197 	const struct iommu_ops *ops = dev->bus->iommu_ops;
198 	struct iommu_device *iommu_dev;
199 	struct iommu_group *group;
200 	int ret;
201 
202 	if (!ops)
203 		return -ENODEV;
204 
205 	if (!dev_iommu_get(dev))
206 		return -ENOMEM;
207 
208 	if (!try_module_get(ops->owner)) {
209 		ret = -EINVAL;
210 		goto err_free;
211 	}
212 
213 	iommu_dev = ops->probe_device(dev);
214 	if (IS_ERR(iommu_dev)) {
215 		ret = PTR_ERR(iommu_dev);
216 		goto out_module_put;
217 	}
218 
219 	dev->iommu->iommu_dev = iommu_dev;
220 
221 	group = iommu_group_get_for_dev(dev);
222 	if (IS_ERR(group)) {
223 		ret = PTR_ERR(group);
224 		goto out_release;
225 	}
226 	iommu_group_put(group);
227 
228 	if (group_list && !group->default_domain && list_empty(&group->entry))
229 		list_add_tail(&group->entry, group_list);
230 
231 	iommu_device_link(iommu_dev, dev);
232 
233 	return 0;
234 
235 out_release:
236 	ops->release_device(dev);
237 
238 out_module_put:
239 	module_put(ops->owner);
240 
241 err_free:
242 	dev_iommu_free(dev);
243 
244 	return ret;
245 }
246 
247 int iommu_probe_device(struct device *dev)
248 {
249 	const struct iommu_ops *ops = dev->bus->iommu_ops;
250 	struct iommu_group *group;
251 	int ret;
252 
253 	ret = __iommu_probe_device(dev, NULL);
254 	if (ret)
255 		goto err_out;
256 
257 	group = iommu_group_get(dev);
258 	if (!group) {
259 		ret = -ENODEV;
260 		goto err_release;
261 	}
262 
263 	/*
264 	 * Try to allocate a default domain - needs support from the
265 	 * IOMMU driver. There are still some drivers which don't
266 	 * support default domains, so the return value is not yet
267 	 * checked.
268 	 */
269 	iommu_alloc_default_domain(group, dev);
270 
271 	if (group->default_domain) {
272 		ret = __iommu_attach_device(group->default_domain, dev);
273 		if (ret) {
274 			iommu_group_put(group);
275 			goto err_release;
276 		}
277 	}
278 
279 	iommu_create_device_direct_mappings(group, dev);
280 
281 	iommu_group_put(group);
282 
283 	if (ops->probe_finalize)
284 		ops->probe_finalize(dev);
285 
286 	return 0;
287 
288 err_release:
289 	iommu_release_device(dev);
290 
291 err_out:
292 	return ret;
293 
294 }
295 
296 void iommu_release_device(struct device *dev)
297 {
298 	const struct iommu_ops *ops = dev->bus->iommu_ops;
299 
300 	if (!dev->iommu)
301 		return;
302 
303 	iommu_device_unlink(dev->iommu->iommu_dev, dev);
304 
305 	ops->release_device(dev);
306 
307 	iommu_group_remove_device(dev);
308 	module_put(ops->owner);
309 	dev_iommu_free(dev);
310 }
311 
312 static int __init iommu_set_def_domain_type(char *str)
313 {
314 	bool pt;
315 	int ret;
316 
317 	ret = kstrtobool(str, &pt);
318 	if (ret)
319 		return ret;
320 
321 	if (pt)
322 		iommu_set_default_passthrough(true);
323 	else
324 		iommu_set_default_translated(true);
325 
326 	return 0;
327 }
328 early_param("iommu.passthrough", iommu_set_def_domain_type);
329 
330 static int __init iommu_dma_setup(char *str)
331 {
332 	return kstrtobool(str, &iommu_dma_strict);
333 }
334 early_param("iommu.strict", iommu_dma_setup);
335 
336 static ssize_t iommu_group_attr_show(struct kobject *kobj,
337 				     struct attribute *__attr, char *buf)
338 {
339 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
340 	struct iommu_group *group = to_iommu_group(kobj);
341 	ssize_t ret = -EIO;
342 
343 	if (attr->show)
344 		ret = attr->show(group, buf);
345 	return ret;
346 }
347 
348 static ssize_t iommu_group_attr_store(struct kobject *kobj,
349 				      struct attribute *__attr,
350 				      const char *buf, size_t count)
351 {
352 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
353 	struct iommu_group *group = to_iommu_group(kobj);
354 	ssize_t ret = -EIO;
355 
356 	if (attr->store)
357 		ret = attr->store(group, buf, count);
358 	return ret;
359 }
360 
361 static const struct sysfs_ops iommu_group_sysfs_ops = {
362 	.show = iommu_group_attr_show,
363 	.store = iommu_group_attr_store,
364 };
365 
366 static int iommu_group_create_file(struct iommu_group *group,
367 				   struct iommu_group_attribute *attr)
368 {
369 	return sysfs_create_file(&group->kobj, &attr->attr);
370 }
371 
372 static void iommu_group_remove_file(struct iommu_group *group,
373 				    struct iommu_group_attribute *attr)
374 {
375 	sysfs_remove_file(&group->kobj, &attr->attr);
376 }
377 
378 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
379 {
380 	return sprintf(buf, "%s\n", group->name);
381 }
382 
383 /**
384  * iommu_insert_resv_region - Insert a new region in the
385  * list of reserved regions.
386  * @new: new region to insert
387  * @regions: list of regions
388  *
389  * Elements are sorted by start address and overlapping segments
390  * of the same type are merged.
391  */
392 static int iommu_insert_resv_region(struct iommu_resv_region *new,
393 				    struct list_head *regions)
394 {
395 	struct iommu_resv_region *iter, *tmp, *nr, *top;
396 	LIST_HEAD(stack);
397 
398 	nr = iommu_alloc_resv_region(new->start, new->length,
399 				     new->prot, new->type);
400 	if (!nr)
401 		return -ENOMEM;
402 
403 	/* First add the new element based on start address sorting */
404 	list_for_each_entry(iter, regions, list) {
405 		if (nr->start < iter->start ||
406 		    (nr->start == iter->start && nr->type <= iter->type))
407 			break;
408 	}
409 	list_add_tail(&nr->list, &iter->list);
410 
411 	/* Merge overlapping segments of type nr->type in @regions, if any */
412 	list_for_each_entry_safe(iter, tmp, regions, list) {
413 		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
414 
415 		/* no merge needed on elements of different types than @new */
416 		if (iter->type != new->type) {
417 			list_move_tail(&iter->list, &stack);
418 			continue;
419 		}
420 
421 		/* look for the last stack element of same type as @iter */
422 		list_for_each_entry_reverse(top, &stack, list)
423 			if (top->type == iter->type)
424 				goto check_overlap;
425 
426 		list_move_tail(&iter->list, &stack);
427 		continue;
428 
429 check_overlap:
430 		top_end = top->start + top->length - 1;
431 
432 		if (iter->start > top_end + 1) {
433 			list_move_tail(&iter->list, &stack);
434 		} else {
435 			top->length = max(top_end, iter_end) - top->start + 1;
436 			list_del(&iter->list);
437 			kfree(iter);
438 		}
439 	}
440 	list_splice(&stack, regions);
441 	return 0;
442 }
443 
444 static int
445 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
446 				 struct list_head *group_resv_regions)
447 {
448 	struct iommu_resv_region *entry;
449 	int ret = 0;
450 
451 	list_for_each_entry(entry, dev_resv_regions, list) {
452 		ret = iommu_insert_resv_region(entry, group_resv_regions);
453 		if (ret)
454 			break;
455 	}
456 	return ret;
457 }
458 
459 int iommu_get_group_resv_regions(struct iommu_group *group,
460 				 struct list_head *head)
461 {
462 	struct group_device *device;
463 	int ret = 0;
464 
465 	mutex_lock(&group->mutex);
466 	list_for_each_entry(device, &group->devices, list) {
467 		struct list_head dev_resv_regions;
468 
469 		INIT_LIST_HEAD(&dev_resv_regions);
470 		iommu_get_resv_regions(device->dev, &dev_resv_regions);
471 		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
472 		iommu_put_resv_regions(device->dev, &dev_resv_regions);
473 		if (ret)
474 			break;
475 	}
476 	mutex_unlock(&group->mutex);
477 	return ret;
478 }
479 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
480 
481 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
482 					     char *buf)
483 {
484 	struct iommu_resv_region *region, *next;
485 	struct list_head group_resv_regions;
486 	char *str = buf;
487 
488 	INIT_LIST_HEAD(&group_resv_regions);
489 	iommu_get_group_resv_regions(group, &group_resv_regions);
490 
491 	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
492 		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
493 			       (long long int)region->start,
494 			       (long long int)(region->start +
495 						region->length - 1),
496 			       iommu_group_resv_type_string[region->type]);
497 		kfree(region);
498 	}
499 
500 	return (str - buf);
501 }
502 
503 static ssize_t iommu_group_show_type(struct iommu_group *group,
504 				     char *buf)
505 {
506 	char *type = "unknown\n";
507 
508 	mutex_lock(&group->mutex);
509 	if (group->default_domain) {
510 		switch (group->default_domain->type) {
511 		case IOMMU_DOMAIN_BLOCKED:
512 			type = "blocked\n";
513 			break;
514 		case IOMMU_DOMAIN_IDENTITY:
515 			type = "identity\n";
516 			break;
517 		case IOMMU_DOMAIN_UNMANAGED:
518 			type = "unmanaged\n";
519 			break;
520 		case IOMMU_DOMAIN_DMA:
521 			type = "DMA\n";
522 			break;
523 		}
524 	}
525 	mutex_unlock(&group->mutex);
526 	strcpy(buf, type);
527 
528 	return strlen(type);
529 }
530 
531 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
532 
533 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
534 			iommu_group_show_resv_regions, NULL);
535 
536 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
537 			iommu_group_store_type);
538 
539 static void iommu_group_release(struct kobject *kobj)
540 {
541 	struct iommu_group *group = to_iommu_group(kobj);
542 
543 	pr_debug("Releasing group %d\n", group->id);
544 
545 	if (group->iommu_data_release)
546 		group->iommu_data_release(group->iommu_data);
547 
548 	ida_simple_remove(&iommu_group_ida, group->id);
549 
550 	if (group->default_domain)
551 		iommu_domain_free(group->default_domain);
552 
553 	kfree(group->name);
554 	kfree(group);
555 }
556 
557 static struct kobj_type iommu_group_ktype = {
558 	.sysfs_ops = &iommu_group_sysfs_ops,
559 	.release = iommu_group_release,
560 };
561 
562 /**
563  * iommu_group_alloc - Allocate a new group
564  *
565  * This function is called by an iommu driver to allocate a new iommu
566  * group.  The iommu group represents the minimum granularity of the iommu.
567  * Upon successful return, the caller holds a reference to the supplied
568  * group in order to hold the group until devices are added.  Use
569  * iommu_group_put() to release this extra reference count, allowing the
570  * group to be automatically reclaimed once it has no devices or external
571  * references.
572  */
573 struct iommu_group *iommu_group_alloc(void)
574 {
575 	struct iommu_group *group;
576 	int ret;
577 
578 	group = kzalloc(sizeof(*group), GFP_KERNEL);
579 	if (!group)
580 		return ERR_PTR(-ENOMEM);
581 
582 	group->kobj.kset = iommu_group_kset;
583 	mutex_init(&group->mutex);
584 	INIT_LIST_HEAD(&group->devices);
585 	INIT_LIST_HEAD(&group->entry);
586 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
587 
588 	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
589 	if (ret < 0) {
590 		kfree(group);
591 		return ERR_PTR(ret);
592 	}
593 	group->id = ret;
594 
595 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
596 				   NULL, "%d", group->id);
597 	if (ret) {
598 		ida_simple_remove(&iommu_group_ida, group->id);
599 		kobject_put(&group->kobj);
600 		return ERR_PTR(ret);
601 	}
602 
603 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
604 	if (!group->devices_kobj) {
605 		kobject_put(&group->kobj); /* triggers .release & free */
606 		return ERR_PTR(-ENOMEM);
607 	}
608 
609 	/*
610 	 * The devices_kobj holds a reference on the group kobject, so
611 	 * as long as that exists so will the group.  We can therefore
612 	 * use the devices_kobj for reference counting.
613 	 */
614 	kobject_put(&group->kobj);
615 
616 	ret = iommu_group_create_file(group,
617 				      &iommu_group_attr_reserved_regions);
618 	if (ret)
619 		return ERR_PTR(ret);
620 
621 	ret = iommu_group_create_file(group, &iommu_group_attr_type);
622 	if (ret)
623 		return ERR_PTR(ret);
624 
625 	pr_debug("Allocated group %d\n", group->id);
626 
627 	return group;
628 }
629 EXPORT_SYMBOL_GPL(iommu_group_alloc);
630 
631 struct iommu_group *iommu_group_get_by_id(int id)
632 {
633 	struct kobject *group_kobj;
634 	struct iommu_group *group;
635 	const char *name;
636 
637 	if (!iommu_group_kset)
638 		return NULL;
639 
640 	name = kasprintf(GFP_KERNEL, "%d", id);
641 	if (!name)
642 		return NULL;
643 
644 	group_kobj = kset_find_obj(iommu_group_kset, name);
645 	kfree(name);
646 
647 	if (!group_kobj)
648 		return NULL;
649 
650 	group = container_of(group_kobj, struct iommu_group, kobj);
651 	BUG_ON(group->id != id);
652 
653 	kobject_get(group->devices_kobj);
654 	kobject_put(&group->kobj);
655 
656 	return group;
657 }
658 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
659 
660 /**
661  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
662  * @group: the group
663  *
664  * iommu drivers can store data in the group for use when doing iommu
665  * operations.  This function provides a way to retrieve it.  Caller
666  * should hold a group reference.
667  */
668 void *iommu_group_get_iommudata(struct iommu_group *group)
669 {
670 	return group->iommu_data;
671 }
672 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
673 
674 /**
675  * iommu_group_set_iommudata - set iommu_data for a group
676  * @group: the group
677  * @iommu_data: new data
678  * @release: release function for iommu_data
679  *
680  * iommu drivers can store data in the group for use when doing iommu
681  * operations.  This function provides a way to set the data after
682  * the group has been allocated.  Caller should hold a group reference.
683  */
684 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
685 			       void (*release)(void *iommu_data))
686 {
687 	group->iommu_data = iommu_data;
688 	group->iommu_data_release = release;
689 }
690 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
691 
692 /**
693  * iommu_group_set_name - set name for a group
694  * @group: the group
695  * @name: name
696  *
697  * Allow iommu driver to set a name for a group.  When set it will
698  * appear in a name attribute file under the group in sysfs.
699  */
700 int iommu_group_set_name(struct iommu_group *group, const char *name)
701 {
702 	int ret;
703 
704 	if (group->name) {
705 		iommu_group_remove_file(group, &iommu_group_attr_name);
706 		kfree(group->name);
707 		group->name = NULL;
708 		if (!name)
709 			return 0;
710 	}
711 
712 	group->name = kstrdup(name, GFP_KERNEL);
713 	if (!group->name)
714 		return -ENOMEM;
715 
716 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
717 	if (ret) {
718 		kfree(group->name);
719 		group->name = NULL;
720 		return ret;
721 	}
722 
723 	return 0;
724 }
725 EXPORT_SYMBOL_GPL(iommu_group_set_name);
726 
727 static int iommu_create_device_direct_mappings(struct iommu_group *group,
728 					       struct device *dev)
729 {
730 	struct iommu_domain *domain = group->default_domain;
731 	struct iommu_resv_region *entry;
732 	struct list_head mappings;
733 	unsigned long pg_size;
734 	int ret = 0;
735 
736 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
737 		return 0;
738 
739 	BUG_ON(!domain->pgsize_bitmap);
740 
741 	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
742 	INIT_LIST_HEAD(&mappings);
743 
744 	iommu_get_resv_regions(dev, &mappings);
745 
746 	/* We need to consider overlapping regions for different devices */
747 	list_for_each_entry(entry, &mappings, list) {
748 		dma_addr_t start, end, addr;
749 		size_t map_size = 0;
750 
751 		if (domain->ops->apply_resv_region)
752 			domain->ops->apply_resv_region(dev, domain, entry);
753 
754 		start = ALIGN(entry->start, pg_size);
755 		end   = ALIGN(entry->start + entry->length, pg_size);
756 
757 		if (entry->type != IOMMU_RESV_DIRECT &&
758 		    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
759 			continue;
760 
761 		for (addr = start; addr <= end; addr += pg_size) {
762 			phys_addr_t phys_addr;
763 
764 			if (addr == end)
765 				goto map_end;
766 
767 			phys_addr = iommu_iova_to_phys(domain, addr);
768 			if (!phys_addr) {
769 				map_size += pg_size;
770 				continue;
771 			}
772 
773 map_end:
774 			if (map_size) {
775 				ret = iommu_map(domain, addr - map_size,
776 						addr - map_size, map_size,
777 						entry->prot);
778 				if (ret)
779 					goto out;
780 				map_size = 0;
781 			}
782 		}
783 
784 	}
785 
786 	iommu_flush_iotlb_all(domain);
787 
788 out:
789 	iommu_put_resv_regions(dev, &mappings);
790 
791 	return ret;
792 }
793 
794 static bool iommu_is_attach_deferred(struct iommu_domain *domain,
795 				     struct device *dev)
796 {
797 	if (domain->ops->is_attach_deferred)
798 		return domain->ops->is_attach_deferred(domain, dev);
799 
800 	return false;
801 }
802 
803 /**
804  * iommu_group_add_device - add a device to an iommu group
805  * @group: the group into which to add the device (reference should be held)
806  * @dev: the device
807  *
808  * This function is called by an iommu driver to add a device into a
809  * group.  Adding a device increments the group reference count.
810  */
811 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
812 {
813 	int ret, i = 0;
814 	struct group_device *device;
815 
816 	device = kzalloc(sizeof(*device), GFP_KERNEL);
817 	if (!device)
818 		return -ENOMEM;
819 
820 	device->dev = dev;
821 
822 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
823 	if (ret)
824 		goto err_free_device;
825 
826 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
827 rename:
828 	if (!device->name) {
829 		ret = -ENOMEM;
830 		goto err_remove_link;
831 	}
832 
833 	ret = sysfs_create_link_nowarn(group->devices_kobj,
834 				       &dev->kobj, device->name);
835 	if (ret) {
836 		if (ret == -EEXIST && i >= 0) {
837 			/*
838 			 * Account for the slim chance of collision
839 			 * and append an instance to the name.
840 			 */
841 			kfree(device->name);
842 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
843 						 kobject_name(&dev->kobj), i++);
844 			goto rename;
845 		}
846 		goto err_free_name;
847 	}
848 
849 	kobject_get(group->devices_kobj);
850 
851 	dev->iommu_group = group;
852 
853 	mutex_lock(&group->mutex);
854 	list_add_tail(&device->list, &group->devices);
855 	if (group->domain  && !iommu_is_attach_deferred(group->domain, dev))
856 		ret = __iommu_attach_device(group->domain, dev);
857 	mutex_unlock(&group->mutex);
858 	if (ret)
859 		goto err_put_group;
860 
861 	/* Notify any listeners about change to group. */
862 	blocking_notifier_call_chain(&group->notifier,
863 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
864 
865 	trace_add_device_to_group(group->id, dev);
866 
867 	dev_info(dev, "Adding to iommu group %d\n", group->id);
868 
869 	return 0;
870 
871 err_put_group:
872 	mutex_lock(&group->mutex);
873 	list_del(&device->list);
874 	mutex_unlock(&group->mutex);
875 	dev->iommu_group = NULL;
876 	kobject_put(group->devices_kobj);
877 	sysfs_remove_link(group->devices_kobj, device->name);
878 err_free_name:
879 	kfree(device->name);
880 err_remove_link:
881 	sysfs_remove_link(&dev->kobj, "iommu_group");
882 err_free_device:
883 	kfree(device);
884 	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
885 	return ret;
886 }
887 EXPORT_SYMBOL_GPL(iommu_group_add_device);
888 
889 /**
890  * iommu_group_remove_device - remove a device from it's current group
891  * @dev: device to be removed
892  *
893  * This function is called by an iommu driver to remove the device from
894  * it's current group.  This decrements the iommu group reference count.
895  */
896 void iommu_group_remove_device(struct device *dev)
897 {
898 	struct iommu_group *group = dev->iommu_group;
899 	struct group_device *tmp_device, *device = NULL;
900 
901 	dev_info(dev, "Removing from iommu group %d\n", group->id);
902 
903 	/* Pre-notify listeners that a device is being removed. */
904 	blocking_notifier_call_chain(&group->notifier,
905 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
906 
907 	mutex_lock(&group->mutex);
908 	list_for_each_entry(tmp_device, &group->devices, list) {
909 		if (tmp_device->dev == dev) {
910 			device = tmp_device;
911 			list_del(&device->list);
912 			break;
913 		}
914 	}
915 	mutex_unlock(&group->mutex);
916 
917 	if (!device)
918 		return;
919 
920 	sysfs_remove_link(group->devices_kobj, device->name);
921 	sysfs_remove_link(&dev->kobj, "iommu_group");
922 
923 	trace_remove_device_from_group(group->id, dev);
924 
925 	kfree(device->name);
926 	kfree(device);
927 	dev->iommu_group = NULL;
928 	kobject_put(group->devices_kobj);
929 }
930 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
931 
932 static int iommu_group_device_count(struct iommu_group *group)
933 {
934 	struct group_device *entry;
935 	int ret = 0;
936 
937 	list_for_each_entry(entry, &group->devices, list)
938 		ret++;
939 
940 	return ret;
941 }
942 
943 /**
944  * iommu_group_for_each_dev - iterate over each device in the group
945  * @group: the group
946  * @data: caller opaque data to be passed to callback function
947  * @fn: caller supplied callback function
948  *
949  * This function is called by group users to iterate over group devices.
950  * Callers should hold a reference count to the group during callback.
951  * The group->mutex is held across callbacks, which will block calls to
952  * iommu_group_add/remove_device.
953  */
954 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
955 				      int (*fn)(struct device *, void *))
956 {
957 	struct group_device *device;
958 	int ret = 0;
959 
960 	list_for_each_entry(device, &group->devices, list) {
961 		ret = fn(device->dev, data);
962 		if (ret)
963 			break;
964 	}
965 	return ret;
966 }
967 
968 
969 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
970 			     int (*fn)(struct device *, void *))
971 {
972 	int ret;
973 
974 	mutex_lock(&group->mutex);
975 	ret = __iommu_group_for_each_dev(group, data, fn);
976 	mutex_unlock(&group->mutex);
977 
978 	return ret;
979 }
980 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
981 
982 /**
983  * iommu_group_get - Return the group for a device and increment reference
984  * @dev: get the group that this device belongs to
985  *
986  * This function is called by iommu drivers and users to get the group
987  * for the specified device.  If found, the group is returned and the group
988  * reference in incremented, else NULL.
989  */
990 struct iommu_group *iommu_group_get(struct device *dev)
991 {
992 	struct iommu_group *group = dev->iommu_group;
993 
994 	if (group)
995 		kobject_get(group->devices_kobj);
996 
997 	return group;
998 }
999 EXPORT_SYMBOL_GPL(iommu_group_get);
1000 
1001 /**
1002  * iommu_group_ref_get - Increment reference on a group
1003  * @group: the group to use, must not be NULL
1004  *
1005  * This function is called by iommu drivers to take additional references on an
1006  * existing group.  Returns the given group for convenience.
1007  */
1008 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1009 {
1010 	kobject_get(group->devices_kobj);
1011 	return group;
1012 }
1013 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1014 
1015 /**
1016  * iommu_group_put - Decrement group reference
1017  * @group: the group to use
1018  *
1019  * This function is called by iommu drivers and users to release the
1020  * iommu group.  Once the reference count is zero, the group is released.
1021  */
1022 void iommu_group_put(struct iommu_group *group)
1023 {
1024 	if (group)
1025 		kobject_put(group->devices_kobj);
1026 }
1027 EXPORT_SYMBOL_GPL(iommu_group_put);
1028 
1029 /**
1030  * iommu_group_register_notifier - Register a notifier for group changes
1031  * @group: the group to watch
1032  * @nb: notifier block to signal
1033  *
1034  * This function allows iommu group users to track changes in a group.
1035  * See include/linux/iommu.h for actions sent via this notifier.  Caller
1036  * should hold a reference to the group throughout notifier registration.
1037  */
1038 int iommu_group_register_notifier(struct iommu_group *group,
1039 				  struct notifier_block *nb)
1040 {
1041 	return blocking_notifier_chain_register(&group->notifier, nb);
1042 }
1043 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1044 
1045 /**
1046  * iommu_group_unregister_notifier - Unregister a notifier
1047  * @group: the group to watch
1048  * @nb: notifier block to signal
1049  *
1050  * Unregister a previously registered group notifier block.
1051  */
1052 int iommu_group_unregister_notifier(struct iommu_group *group,
1053 				    struct notifier_block *nb)
1054 {
1055 	return blocking_notifier_chain_unregister(&group->notifier, nb);
1056 }
1057 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1058 
1059 /**
1060  * iommu_register_device_fault_handler() - Register a device fault handler
1061  * @dev: the device
1062  * @handler: the fault handler
1063  * @data: private data passed as argument to the handler
1064  *
1065  * When an IOMMU fault event is received, this handler gets called with the
1066  * fault event and data as argument. The handler should return 0 on success. If
1067  * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1068  * complete the fault by calling iommu_page_response() with one of the following
1069  * response code:
1070  * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1071  * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1072  * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1073  *   page faults if possible.
1074  *
1075  * Return 0 if the fault handler was installed successfully, or an error.
1076  */
1077 int iommu_register_device_fault_handler(struct device *dev,
1078 					iommu_dev_fault_handler_t handler,
1079 					void *data)
1080 {
1081 	struct dev_iommu *param = dev->iommu;
1082 	int ret = 0;
1083 
1084 	if (!param)
1085 		return -EINVAL;
1086 
1087 	mutex_lock(&param->lock);
1088 	/* Only allow one fault handler registered for each device */
1089 	if (param->fault_param) {
1090 		ret = -EBUSY;
1091 		goto done_unlock;
1092 	}
1093 
1094 	get_device(dev);
1095 	param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1096 	if (!param->fault_param) {
1097 		put_device(dev);
1098 		ret = -ENOMEM;
1099 		goto done_unlock;
1100 	}
1101 	param->fault_param->handler = handler;
1102 	param->fault_param->data = data;
1103 	mutex_init(&param->fault_param->lock);
1104 	INIT_LIST_HEAD(&param->fault_param->faults);
1105 
1106 done_unlock:
1107 	mutex_unlock(&param->lock);
1108 
1109 	return ret;
1110 }
1111 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1112 
1113 /**
1114  * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1115  * @dev: the device
1116  *
1117  * Remove the device fault handler installed with
1118  * iommu_register_device_fault_handler().
1119  *
1120  * Return 0 on success, or an error.
1121  */
1122 int iommu_unregister_device_fault_handler(struct device *dev)
1123 {
1124 	struct dev_iommu *param = dev->iommu;
1125 	int ret = 0;
1126 
1127 	if (!param)
1128 		return -EINVAL;
1129 
1130 	mutex_lock(&param->lock);
1131 
1132 	if (!param->fault_param)
1133 		goto unlock;
1134 
1135 	/* we cannot unregister handler if there are pending faults */
1136 	if (!list_empty(&param->fault_param->faults)) {
1137 		ret = -EBUSY;
1138 		goto unlock;
1139 	}
1140 
1141 	kfree(param->fault_param);
1142 	param->fault_param = NULL;
1143 	put_device(dev);
1144 unlock:
1145 	mutex_unlock(&param->lock);
1146 
1147 	return ret;
1148 }
1149 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1150 
1151 /**
1152  * iommu_report_device_fault() - Report fault event to device driver
1153  * @dev: the device
1154  * @evt: fault event data
1155  *
1156  * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1157  * handler. When this function fails and the fault is recoverable, it is the
1158  * caller's responsibility to complete the fault.
1159  *
1160  * Return 0 on success, or an error.
1161  */
1162 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1163 {
1164 	struct dev_iommu *param = dev->iommu;
1165 	struct iommu_fault_event *evt_pending = NULL;
1166 	struct iommu_fault_param *fparam;
1167 	int ret = 0;
1168 
1169 	if (!param || !evt)
1170 		return -EINVAL;
1171 
1172 	/* we only report device fault if there is a handler registered */
1173 	mutex_lock(&param->lock);
1174 	fparam = param->fault_param;
1175 	if (!fparam || !fparam->handler) {
1176 		ret = -EINVAL;
1177 		goto done_unlock;
1178 	}
1179 
1180 	if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1181 	    (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1182 		evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1183 				      GFP_KERNEL);
1184 		if (!evt_pending) {
1185 			ret = -ENOMEM;
1186 			goto done_unlock;
1187 		}
1188 		mutex_lock(&fparam->lock);
1189 		list_add_tail(&evt_pending->list, &fparam->faults);
1190 		mutex_unlock(&fparam->lock);
1191 	}
1192 
1193 	ret = fparam->handler(&evt->fault, fparam->data);
1194 	if (ret && evt_pending) {
1195 		mutex_lock(&fparam->lock);
1196 		list_del(&evt_pending->list);
1197 		mutex_unlock(&fparam->lock);
1198 		kfree(evt_pending);
1199 	}
1200 done_unlock:
1201 	mutex_unlock(&param->lock);
1202 	return ret;
1203 }
1204 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1205 
1206 int iommu_page_response(struct device *dev,
1207 			struct iommu_page_response *msg)
1208 {
1209 	bool needs_pasid;
1210 	int ret = -EINVAL;
1211 	struct iommu_fault_event *evt;
1212 	struct iommu_fault_page_request *prm;
1213 	struct dev_iommu *param = dev->iommu;
1214 	bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1215 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1216 
1217 	if (!domain || !domain->ops->page_response)
1218 		return -ENODEV;
1219 
1220 	if (!param || !param->fault_param)
1221 		return -EINVAL;
1222 
1223 	if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1224 	    msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1225 		return -EINVAL;
1226 
1227 	/* Only send response if there is a fault report pending */
1228 	mutex_lock(&param->fault_param->lock);
1229 	if (list_empty(&param->fault_param->faults)) {
1230 		dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1231 		goto done_unlock;
1232 	}
1233 	/*
1234 	 * Check if we have a matching page request pending to respond,
1235 	 * otherwise return -EINVAL
1236 	 */
1237 	list_for_each_entry(evt, &param->fault_param->faults, list) {
1238 		prm = &evt->fault.prm;
1239 		if (prm->grpid != msg->grpid)
1240 			continue;
1241 
1242 		/*
1243 		 * If the PASID is required, the corresponding request is
1244 		 * matched using the group ID, the PASID valid bit and the PASID
1245 		 * value. Otherwise only the group ID matches request and
1246 		 * response.
1247 		 */
1248 		needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1249 		if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1250 			continue;
1251 
1252 		if (!needs_pasid && has_pasid) {
1253 			/* No big deal, just clear it. */
1254 			msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1255 			msg->pasid = 0;
1256 		}
1257 
1258 		ret = domain->ops->page_response(dev, evt, msg);
1259 		list_del(&evt->list);
1260 		kfree(evt);
1261 		break;
1262 	}
1263 
1264 done_unlock:
1265 	mutex_unlock(&param->fault_param->lock);
1266 	return ret;
1267 }
1268 EXPORT_SYMBOL_GPL(iommu_page_response);
1269 
1270 /**
1271  * iommu_group_id - Return ID for a group
1272  * @group: the group to ID
1273  *
1274  * Return the unique ID for the group matching the sysfs group number.
1275  */
1276 int iommu_group_id(struct iommu_group *group)
1277 {
1278 	return group->id;
1279 }
1280 EXPORT_SYMBOL_GPL(iommu_group_id);
1281 
1282 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1283 					       unsigned long *devfns);
1284 
1285 /*
1286  * To consider a PCI device isolated, we require ACS to support Source
1287  * Validation, Request Redirection, Completer Redirection, and Upstream
1288  * Forwarding.  This effectively means that devices cannot spoof their
1289  * requester ID, requests and completions cannot be redirected, and all
1290  * transactions are forwarded upstream, even as it passes through a
1291  * bridge where the target device is downstream.
1292  */
1293 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1294 
1295 /*
1296  * For multifunction devices which are not isolated from each other, find
1297  * all the other non-isolated functions and look for existing groups.  For
1298  * each function, we also need to look for aliases to or from other devices
1299  * that may already have a group.
1300  */
1301 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1302 							unsigned long *devfns)
1303 {
1304 	struct pci_dev *tmp = NULL;
1305 	struct iommu_group *group;
1306 
1307 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1308 		return NULL;
1309 
1310 	for_each_pci_dev(tmp) {
1311 		if (tmp == pdev || tmp->bus != pdev->bus ||
1312 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1313 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1314 			continue;
1315 
1316 		group = get_pci_alias_group(tmp, devfns);
1317 		if (group) {
1318 			pci_dev_put(tmp);
1319 			return group;
1320 		}
1321 	}
1322 
1323 	return NULL;
1324 }
1325 
1326 /*
1327  * Look for aliases to or from the given device for existing groups. DMA
1328  * aliases are only supported on the same bus, therefore the search
1329  * space is quite small (especially since we're really only looking at pcie
1330  * device, and therefore only expect multiple slots on the root complex or
1331  * downstream switch ports).  It's conceivable though that a pair of
1332  * multifunction devices could have aliases between them that would cause a
1333  * loop.  To prevent this, we use a bitmap to track where we've been.
1334  */
1335 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1336 					       unsigned long *devfns)
1337 {
1338 	struct pci_dev *tmp = NULL;
1339 	struct iommu_group *group;
1340 
1341 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1342 		return NULL;
1343 
1344 	group = iommu_group_get(&pdev->dev);
1345 	if (group)
1346 		return group;
1347 
1348 	for_each_pci_dev(tmp) {
1349 		if (tmp == pdev || tmp->bus != pdev->bus)
1350 			continue;
1351 
1352 		/* We alias them or they alias us */
1353 		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1354 			group = get_pci_alias_group(tmp, devfns);
1355 			if (group) {
1356 				pci_dev_put(tmp);
1357 				return group;
1358 			}
1359 
1360 			group = get_pci_function_alias_group(tmp, devfns);
1361 			if (group) {
1362 				pci_dev_put(tmp);
1363 				return group;
1364 			}
1365 		}
1366 	}
1367 
1368 	return NULL;
1369 }
1370 
1371 struct group_for_pci_data {
1372 	struct pci_dev *pdev;
1373 	struct iommu_group *group;
1374 };
1375 
1376 /*
1377  * DMA alias iterator callback, return the last seen device.  Stop and return
1378  * the IOMMU group if we find one along the way.
1379  */
1380 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1381 {
1382 	struct group_for_pci_data *data = opaque;
1383 
1384 	data->pdev = pdev;
1385 	data->group = iommu_group_get(&pdev->dev);
1386 
1387 	return data->group != NULL;
1388 }
1389 
1390 /*
1391  * Generic device_group call-back function. It just allocates one
1392  * iommu-group per device.
1393  */
1394 struct iommu_group *generic_device_group(struct device *dev)
1395 {
1396 	return iommu_group_alloc();
1397 }
1398 EXPORT_SYMBOL_GPL(generic_device_group);
1399 
1400 /*
1401  * Use standard PCI bus topology, isolation features, and DMA alias quirks
1402  * to find or create an IOMMU group for a device.
1403  */
1404 struct iommu_group *pci_device_group(struct device *dev)
1405 {
1406 	struct pci_dev *pdev = to_pci_dev(dev);
1407 	struct group_for_pci_data data;
1408 	struct pci_bus *bus;
1409 	struct iommu_group *group = NULL;
1410 	u64 devfns[4] = { 0 };
1411 
1412 	if (WARN_ON(!dev_is_pci(dev)))
1413 		return ERR_PTR(-EINVAL);
1414 
1415 	/*
1416 	 * Find the upstream DMA alias for the device.  A device must not
1417 	 * be aliased due to topology in order to have its own IOMMU group.
1418 	 * If we find an alias along the way that already belongs to a
1419 	 * group, use it.
1420 	 */
1421 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1422 		return data.group;
1423 
1424 	pdev = data.pdev;
1425 
1426 	/*
1427 	 * Continue upstream from the point of minimum IOMMU granularity
1428 	 * due to aliases to the point where devices are protected from
1429 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1430 	 * group, use it.
1431 	 */
1432 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1433 		if (!bus->self)
1434 			continue;
1435 
1436 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1437 			break;
1438 
1439 		pdev = bus->self;
1440 
1441 		group = iommu_group_get(&pdev->dev);
1442 		if (group)
1443 			return group;
1444 	}
1445 
1446 	/*
1447 	 * Look for existing groups on device aliases.  If we alias another
1448 	 * device or another device aliases us, use the same group.
1449 	 */
1450 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1451 	if (group)
1452 		return group;
1453 
1454 	/*
1455 	 * Look for existing groups on non-isolated functions on the same
1456 	 * slot and aliases of those funcions, if any.  No need to clear
1457 	 * the search bitmap, the tested devfns are still valid.
1458 	 */
1459 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1460 	if (group)
1461 		return group;
1462 
1463 	/* No shared group found, allocate new */
1464 	return iommu_group_alloc();
1465 }
1466 EXPORT_SYMBOL_GPL(pci_device_group);
1467 
1468 /* Get the IOMMU group for device on fsl-mc bus */
1469 struct iommu_group *fsl_mc_device_group(struct device *dev)
1470 {
1471 	struct device *cont_dev = fsl_mc_cont_dev(dev);
1472 	struct iommu_group *group;
1473 
1474 	group = iommu_group_get(cont_dev);
1475 	if (!group)
1476 		group = iommu_group_alloc();
1477 	return group;
1478 }
1479 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1480 
1481 static int iommu_get_def_domain_type(struct device *dev)
1482 {
1483 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1484 
1485 	if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
1486 		return IOMMU_DOMAIN_DMA;
1487 
1488 	if (ops->def_domain_type)
1489 		return ops->def_domain_type(dev);
1490 
1491 	return 0;
1492 }
1493 
1494 static int iommu_group_alloc_default_domain(struct bus_type *bus,
1495 					    struct iommu_group *group,
1496 					    unsigned int type)
1497 {
1498 	struct iommu_domain *dom;
1499 
1500 	dom = __iommu_domain_alloc(bus, type);
1501 	if (!dom && type != IOMMU_DOMAIN_DMA) {
1502 		dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1503 		if (dom)
1504 			pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1505 				type, group->name);
1506 	}
1507 
1508 	if (!dom)
1509 		return -ENOMEM;
1510 
1511 	group->default_domain = dom;
1512 	if (!group->domain)
1513 		group->domain = dom;
1514 
1515 	if (!iommu_dma_strict) {
1516 		int attr = 1;
1517 		iommu_domain_set_attr(dom,
1518 				      DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1519 				      &attr);
1520 	}
1521 
1522 	return 0;
1523 }
1524 
1525 static int iommu_alloc_default_domain(struct iommu_group *group,
1526 				      struct device *dev)
1527 {
1528 	unsigned int type;
1529 
1530 	if (group->default_domain)
1531 		return 0;
1532 
1533 	type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
1534 
1535 	return iommu_group_alloc_default_domain(dev->bus, group, type);
1536 }
1537 
1538 /**
1539  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1540  * @dev: target device
1541  *
1542  * This function is intended to be called by IOMMU drivers and extended to
1543  * support common, bus-defined algorithms when determining or creating the
1544  * IOMMU group for a device.  On success, the caller will hold a reference
1545  * to the returned IOMMU group, which will already include the provided
1546  * device.  The reference should be released with iommu_group_put().
1547  */
1548 static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1549 {
1550 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1551 	struct iommu_group *group;
1552 	int ret;
1553 
1554 	group = iommu_group_get(dev);
1555 	if (group)
1556 		return group;
1557 
1558 	if (!ops)
1559 		return ERR_PTR(-EINVAL);
1560 
1561 	group = ops->device_group(dev);
1562 	if (WARN_ON_ONCE(group == NULL))
1563 		return ERR_PTR(-EINVAL);
1564 
1565 	if (IS_ERR(group))
1566 		return group;
1567 
1568 	ret = iommu_group_add_device(group, dev);
1569 	if (ret)
1570 		goto out_put_group;
1571 
1572 	return group;
1573 
1574 out_put_group:
1575 	iommu_group_put(group);
1576 
1577 	return ERR_PTR(ret);
1578 }
1579 
1580 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1581 {
1582 	return group->default_domain;
1583 }
1584 
1585 static int probe_iommu_group(struct device *dev, void *data)
1586 {
1587 	struct list_head *group_list = data;
1588 	struct iommu_group *group;
1589 	int ret;
1590 
1591 	/* Device is probed already if in a group */
1592 	group = iommu_group_get(dev);
1593 	if (group) {
1594 		iommu_group_put(group);
1595 		return 0;
1596 	}
1597 
1598 	ret = __iommu_probe_device(dev, group_list);
1599 	if (ret == -ENODEV)
1600 		ret = 0;
1601 
1602 	return ret;
1603 }
1604 
1605 static int remove_iommu_group(struct device *dev, void *data)
1606 {
1607 	iommu_release_device(dev);
1608 
1609 	return 0;
1610 }
1611 
1612 static int iommu_bus_notifier(struct notifier_block *nb,
1613 			      unsigned long action, void *data)
1614 {
1615 	unsigned long group_action = 0;
1616 	struct device *dev = data;
1617 	struct iommu_group *group;
1618 
1619 	/*
1620 	 * ADD/DEL call into iommu driver ops if provided, which may
1621 	 * result in ADD/DEL notifiers to group->notifier
1622 	 */
1623 	if (action == BUS_NOTIFY_ADD_DEVICE) {
1624 		int ret;
1625 
1626 		ret = iommu_probe_device(dev);
1627 		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1628 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1629 		iommu_release_device(dev);
1630 		return NOTIFY_OK;
1631 	}
1632 
1633 	/*
1634 	 * Remaining BUS_NOTIFYs get filtered and republished to the
1635 	 * group, if anyone is listening
1636 	 */
1637 	group = iommu_group_get(dev);
1638 	if (!group)
1639 		return 0;
1640 
1641 	switch (action) {
1642 	case BUS_NOTIFY_BIND_DRIVER:
1643 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1644 		break;
1645 	case BUS_NOTIFY_BOUND_DRIVER:
1646 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1647 		break;
1648 	case BUS_NOTIFY_UNBIND_DRIVER:
1649 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1650 		break;
1651 	case BUS_NOTIFY_UNBOUND_DRIVER:
1652 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1653 		break;
1654 	}
1655 
1656 	if (group_action)
1657 		blocking_notifier_call_chain(&group->notifier,
1658 					     group_action, dev);
1659 
1660 	iommu_group_put(group);
1661 	return 0;
1662 }
1663 
1664 struct __group_domain_type {
1665 	struct device *dev;
1666 	unsigned int type;
1667 };
1668 
1669 static int probe_get_default_domain_type(struct device *dev, void *data)
1670 {
1671 	struct __group_domain_type *gtype = data;
1672 	unsigned int type = iommu_get_def_domain_type(dev);
1673 
1674 	if (type) {
1675 		if (gtype->type && gtype->type != type) {
1676 			dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1677 				 iommu_domain_type_str(type),
1678 				 dev_name(gtype->dev),
1679 				 iommu_domain_type_str(gtype->type));
1680 			gtype->type = 0;
1681 		}
1682 
1683 		if (!gtype->dev) {
1684 			gtype->dev  = dev;
1685 			gtype->type = type;
1686 		}
1687 	}
1688 
1689 	return 0;
1690 }
1691 
1692 static void probe_alloc_default_domain(struct bus_type *bus,
1693 				       struct iommu_group *group)
1694 {
1695 	struct __group_domain_type gtype;
1696 
1697 	memset(&gtype, 0, sizeof(gtype));
1698 
1699 	/* Ask for default domain requirements of all devices in the group */
1700 	__iommu_group_for_each_dev(group, &gtype,
1701 				   probe_get_default_domain_type);
1702 
1703 	if (!gtype.type)
1704 		gtype.type = iommu_def_domain_type;
1705 
1706 	iommu_group_alloc_default_domain(bus, group, gtype.type);
1707 
1708 }
1709 
1710 static int iommu_group_do_dma_attach(struct device *dev, void *data)
1711 {
1712 	struct iommu_domain *domain = data;
1713 	int ret = 0;
1714 
1715 	if (!iommu_is_attach_deferred(domain, dev))
1716 		ret = __iommu_attach_device(domain, dev);
1717 
1718 	return ret;
1719 }
1720 
1721 static int __iommu_group_dma_attach(struct iommu_group *group)
1722 {
1723 	return __iommu_group_for_each_dev(group, group->default_domain,
1724 					  iommu_group_do_dma_attach);
1725 }
1726 
1727 static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1728 {
1729 	struct iommu_domain *domain = data;
1730 
1731 	if (domain->ops->probe_finalize)
1732 		domain->ops->probe_finalize(dev);
1733 
1734 	return 0;
1735 }
1736 
1737 static void __iommu_group_dma_finalize(struct iommu_group *group)
1738 {
1739 	__iommu_group_for_each_dev(group, group->default_domain,
1740 				   iommu_group_do_probe_finalize);
1741 }
1742 
1743 static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1744 {
1745 	struct iommu_group *group = data;
1746 
1747 	iommu_create_device_direct_mappings(group, dev);
1748 
1749 	return 0;
1750 }
1751 
1752 static int iommu_group_create_direct_mappings(struct iommu_group *group)
1753 {
1754 	return __iommu_group_for_each_dev(group, group,
1755 					  iommu_do_create_direct_mappings);
1756 }
1757 
1758 int bus_iommu_probe(struct bus_type *bus)
1759 {
1760 	struct iommu_group *group, *next;
1761 	LIST_HEAD(group_list);
1762 	int ret;
1763 
1764 	/*
1765 	 * This code-path does not allocate the default domain when
1766 	 * creating the iommu group, so do it after the groups are
1767 	 * created.
1768 	 */
1769 	ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1770 	if (ret)
1771 		return ret;
1772 
1773 	list_for_each_entry_safe(group, next, &group_list, entry) {
1774 		/* Remove item from the list */
1775 		list_del_init(&group->entry);
1776 
1777 		mutex_lock(&group->mutex);
1778 
1779 		/* Try to allocate default domain */
1780 		probe_alloc_default_domain(bus, group);
1781 
1782 		if (!group->default_domain) {
1783 			mutex_unlock(&group->mutex);
1784 			continue;
1785 		}
1786 
1787 		iommu_group_create_direct_mappings(group);
1788 
1789 		ret = __iommu_group_dma_attach(group);
1790 
1791 		mutex_unlock(&group->mutex);
1792 
1793 		if (ret)
1794 			break;
1795 
1796 		__iommu_group_dma_finalize(group);
1797 	}
1798 
1799 	return ret;
1800 }
1801 
1802 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1803 {
1804 	struct notifier_block *nb;
1805 	int err;
1806 
1807 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1808 	if (!nb)
1809 		return -ENOMEM;
1810 
1811 	nb->notifier_call = iommu_bus_notifier;
1812 
1813 	err = bus_register_notifier(bus, nb);
1814 	if (err)
1815 		goto out_free;
1816 
1817 	err = bus_iommu_probe(bus);
1818 	if (err)
1819 		goto out_err;
1820 
1821 
1822 	return 0;
1823 
1824 out_err:
1825 	/* Clean up */
1826 	bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1827 	bus_unregister_notifier(bus, nb);
1828 
1829 out_free:
1830 	kfree(nb);
1831 
1832 	return err;
1833 }
1834 
1835 /**
1836  * bus_set_iommu - set iommu-callbacks for the bus
1837  * @bus: bus.
1838  * @ops: the callbacks provided by the iommu-driver
1839  *
1840  * This function is called by an iommu driver to set the iommu methods
1841  * used for a particular bus. Drivers for devices on that bus can use
1842  * the iommu-api after these ops are registered.
1843  * This special function is needed because IOMMUs are usually devices on
1844  * the bus itself, so the iommu drivers are not initialized when the bus
1845  * is set up. With this function the iommu-driver can set the iommu-ops
1846  * afterwards.
1847  */
1848 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1849 {
1850 	int err;
1851 
1852 	if (ops == NULL) {
1853 		bus->iommu_ops = NULL;
1854 		return 0;
1855 	}
1856 
1857 	if (bus->iommu_ops != NULL)
1858 		return -EBUSY;
1859 
1860 	bus->iommu_ops = ops;
1861 
1862 	/* Do IOMMU specific setup for this bus-type */
1863 	err = iommu_bus_init(bus, ops);
1864 	if (err)
1865 		bus->iommu_ops = NULL;
1866 
1867 	return err;
1868 }
1869 EXPORT_SYMBOL_GPL(bus_set_iommu);
1870 
1871 bool iommu_present(struct bus_type *bus)
1872 {
1873 	return bus->iommu_ops != NULL;
1874 }
1875 EXPORT_SYMBOL_GPL(iommu_present);
1876 
1877 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1878 {
1879 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1880 		return false;
1881 
1882 	return bus->iommu_ops->capable(cap);
1883 }
1884 EXPORT_SYMBOL_GPL(iommu_capable);
1885 
1886 /**
1887  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1888  * @domain: iommu domain
1889  * @handler: fault handler
1890  * @token: user data, will be passed back to the fault handler
1891  *
1892  * This function should be used by IOMMU users which want to be notified
1893  * whenever an IOMMU fault happens.
1894  *
1895  * The fault handler itself should return 0 on success, and an appropriate
1896  * error code otherwise.
1897  */
1898 void iommu_set_fault_handler(struct iommu_domain *domain,
1899 					iommu_fault_handler_t handler,
1900 					void *token)
1901 {
1902 	BUG_ON(!domain);
1903 
1904 	domain->handler = handler;
1905 	domain->handler_token = token;
1906 }
1907 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1908 
1909 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1910 						 unsigned type)
1911 {
1912 	struct iommu_domain *domain;
1913 
1914 	if (bus == NULL || bus->iommu_ops == NULL)
1915 		return NULL;
1916 
1917 	domain = bus->iommu_ops->domain_alloc(type);
1918 	if (!domain)
1919 		return NULL;
1920 
1921 	domain->ops  = bus->iommu_ops;
1922 	domain->type = type;
1923 	/* Assume all sizes by default; the driver may override this later */
1924 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1925 
1926 	return domain;
1927 }
1928 
1929 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1930 {
1931 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1932 }
1933 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1934 
1935 void iommu_domain_free(struct iommu_domain *domain)
1936 {
1937 	domain->ops->domain_free(domain);
1938 }
1939 EXPORT_SYMBOL_GPL(iommu_domain_free);
1940 
1941 static int __iommu_attach_device(struct iommu_domain *domain,
1942 				 struct device *dev)
1943 {
1944 	int ret;
1945 
1946 	if (unlikely(domain->ops->attach_dev == NULL))
1947 		return -ENODEV;
1948 
1949 	ret = domain->ops->attach_dev(domain, dev);
1950 	if (!ret)
1951 		trace_attach_device_to_domain(dev);
1952 	return ret;
1953 }
1954 
1955 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1956 {
1957 	struct iommu_group *group;
1958 	int ret;
1959 
1960 	group = iommu_group_get(dev);
1961 	if (!group)
1962 		return -ENODEV;
1963 
1964 	/*
1965 	 * Lock the group to make sure the device-count doesn't
1966 	 * change while we are attaching
1967 	 */
1968 	mutex_lock(&group->mutex);
1969 	ret = -EINVAL;
1970 	if (iommu_group_device_count(group) != 1)
1971 		goto out_unlock;
1972 
1973 	ret = __iommu_attach_group(domain, group);
1974 
1975 out_unlock:
1976 	mutex_unlock(&group->mutex);
1977 	iommu_group_put(group);
1978 
1979 	return ret;
1980 }
1981 EXPORT_SYMBOL_GPL(iommu_attach_device);
1982 
1983 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
1984 {
1985 	const struct iommu_ops *ops = domain->ops;
1986 
1987 	if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
1988 		return __iommu_attach_device(domain, dev);
1989 
1990 	return 0;
1991 }
1992 
1993 /*
1994  * Check flags and other user provided data for valid combinations. We also
1995  * make sure no reserved fields or unused flags are set. This is to ensure
1996  * not breaking userspace in the future when these fields or flags are used.
1997  */
1998 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
1999 {
2000 	u32 mask;
2001 	int i;
2002 
2003 	if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
2004 		return -EINVAL;
2005 
2006 	mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
2007 	if (info->cache & ~mask)
2008 		return -EINVAL;
2009 
2010 	if (info->granularity >= IOMMU_INV_GRANU_NR)
2011 		return -EINVAL;
2012 
2013 	switch (info->granularity) {
2014 	case IOMMU_INV_GRANU_ADDR:
2015 		if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
2016 			return -EINVAL;
2017 
2018 		mask = IOMMU_INV_ADDR_FLAGS_PASID |
2019 			IOMMU_INV_ADDR_FLAGS_ARCHID |
2020 			IOMMU_INV_ADDR_FLAGS_LEAF;
2021 
2022 		if (info->granu.addr_info.flags & ~mask)
2023 			return -EINVAL;
2024 		break;
2025 	case IOMMU_INV_GRANU_PASID:
2026 		mask = IOMMU_INV_PASID_FLAGS_PASID |
2027 			IOMMU_INV_PASID_FLAGS_ARCHID;
2028 		if (info->granu.pasid_info.flags & ~mask)
2029 			return -EINVAL;
2030 
2031 		break;
2032 	case IOMMU_INV_GRANU_DOMAIN:
2033 		if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
2034 			return -EINVAL;
2035 		break;
2036 	default:
2037 		return -EINVAL;
2038 	}
2039 
2040 	/* Check reserved padding fields */
2041 	for (i = 0; i < sizeof(info->padding); i++) {
2042 		if (info->padding[i])
2043 			return -EINVAL;
2044 	}
2045 
2046 	return 0;
2047 }
2048 
2049 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
2050 				void __user *uinfo)
2051 {
2052 	struct iommu_cache_invalidate_info inv_info = { 0 };
2053 	u32 minsz;
2054 	int ret;
2055 
2056 	if (unlikely(!domain->ops->cache_invalidate))
2057 		return -ENODEV;
2058 
2059 	/*
2060 	 * No new spaces can be added before the variable sized union, the
2061 	 * minimum size is the offset to the union.
2062 	 */
2063 	minsz = offsetof(struct iommu_cache_invalidate_info, granu);
2064 
2065 	/* Copy minsz from user to get flags and argsz */
2066 	if (copy_from_user(&inv_info, uinfo, minsz))
2067 		return -EFAULT;
2068 
2069 	/* Fields before the variable size union are mandatory */
2070 	if (inv_info.argsz < minsz)
2071 		return -EINVAL;
2072 
2073 	/* PASID and address granu require additional info beyond minsz */
2074 	if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
2075 	    inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
2076 		return -EINVAL;
2077 
2078 	if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
2079 	    inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
2080 		return -EINVAL;
2081 
2082 	/*
2083 	 * User might be using a newer UAPI header which has a larger data
2084 	 * size, we shall support the existing flags within the current
2085 	 * size. Copy the remaining user data _after_ minsz but not more
2086 	 * than the current kernel supported size.
2087 	 */
2088 	if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
2089 			   min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
2090 		return -EFAULT;
2091 
2092 	/* Now the argsz is validated, check the content */
2093 	ret = iommu_check_cache_invl_data(&inv_info);
2094 	if (ret)
2095 		return ret;
2096 
2097 	return domain->ops->cache_invalidate(domain, dev, &inv_info);
2098 }
2099 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
2100 
2101 static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
2102 {
2103 	u64 mask;
2104 	int i;
2105 
2106 	if (data->version != IOMMU_GPASID_BIND_VERSION_1)
2107 		return -EINVAL;
2108 
2109 	/* Check the range of supported formats */
2110 	if (data->format >= IOMMU_PASID_FORMAT_LAST)
2111 		return -EINVAL;
2112 
2113 	/* Check all flags */
2114 	mask = IOMMU_SVA_GPASID_VAL;
2115 	if (data->flags & ~mask)
2116 		return -EINVAL;
2117 
2118 	/* Check reserved padding fields */
2119 	for (i = 0; i < sizeof(data->padding); i++) {
2120 		if (data->padding[i])
2121 			return -EINVAL;
2122 	}
2123 
2124 	return 0;
2125 }
2126 
2127 static int iommu_sva_prepare_bind_data(void __user *udata,
2128 				       struct iommu_gpasid_bind_data *data)
2129 {
2130 	u32 minsz;
2131 
2132 	/*
2133 	 * No new spaces can be added before the variable sized union, the
2134 	 * minimum size is the offset to the union.
2135 	 */
2136 	minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
2137 
2138 	/* Copy minsz from user to get flags and argsz */
2139 	if (copy_from_user(data, udata, minsz))
2140 		return -EFAULT;
2141 
2142 	/* Fields before the variable size union are mandatory */
2143 	if (data->argsz < minsz)
2144 		return -EINVAL;
2145 	/*
2146 	 * User might be using a newer UAPI header, we shall let IOMMU vendor
2147 	 * driver decide on what size it needs. Since the guest PASID bind data
2148 	 * can be vendor specific, larger argsz could be the result of extension
2149 	 * for one vendor but it should not affect another vendor.
2150 	 * Copy the remaining user data _after_ minsz
2151 	 */
2152 	if (copy_from_user((void *)data + minsz, udata + minsz,
2153 			   min_t(u32, data->argsz, sizeof(*data)) - minsz))
2154 		return -EFAULT;
2155 
2156 	return iommu_check_bind_data(data);
2157 }
2158 
2159 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
2160 			       void __user *udata)
2161 {
2162 	struct iommu_gpasid_bind_data data = { 0 };
2163 	int ret;
2164 
2165 	if (unlikely(!domain->ops->sva_bind_gpasid))
2166 		return -ENODEV;
2167 
2168 	ret = iommu_sva_prepare_bind_data(udata, &data);
2169 	if (ret)
2170 		return ret;
2171 
2172 	return domain->ops->sva_bind_gpasid(domain, dev, &data);
2173 }
2174 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
2175 
2176 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2177 			     ioasid_t pasid)
2178 {
2179 	if (unlikely(!domain->ops->sva_unbind_gpasid))
2180 		return -ENODEV;
2181 
2182 	return domain->ops->sva_unbind_gpasid(dev, pasid);
2183 }
2184 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
2185 
2186 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2187 				 void __user *udata)
2188 {
2189 	struct iommu_gpasid_bind_data data = { 0 };
2190 	int ret;
2191 
2192 	if (unlikely(!domain->ops->sva_bind_gpasid))
2193 		return -ENODEV;
2194 
2195 	ret = iommu_sva_prepare_bind_data(udata, &data);
2196 	if (ret)
2197 		return ret;
2198 
2199 	return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
2200 }
2201 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
2202 
2203 static void __iommu_detach_device(struct iommu_domain *domain,
2204 				  struct device *dev)
2205 {
2206 	if (iommu_is_attach_deferred(domain, dev))
2207 		return;
2208 
2209 	if (unlikely(domain->ops->detach_dev == NULL))
2210 		return;
2211 
2212 	domain->ops->detach_dev(domain, dev);
2213 	trace_detach_device_from_domain(dev);
2214 }
2215 
2216 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2217 {
2218 	struct iommu_group *group;
2219 
2220 	group = iommu_group_get(dev);
2221 	if (!group)
2222 		return;
2223 
2224 	mutex_lock(&group->mutex);
2225 	if (iommu_group_device_count(group) != 1) {
2226 		WARN_ON(1);
2227 		goto out_unlock;
2228 	}
2229 
2230 	__iommu_detach_group(domain, group);
2231 
2232 out_unlock:
2233 	mutex_unlock(&group->mutex);
2234 	iommu_group_put(group);
2235 }
2236 EXPORT_SYMBOL_GPL(iommu_detach_device);
2237 
2238 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2239 {
2240 	struct iommu_domain *domain;
2241 	struct iommu_group *group;
2242 
2243 	group = iommu_group_get(dev);
2244 	if (!group)
2245 		return NULL;
2246 
2247 	domain = group->domain;
2248 
2249 	iommu_group_put(group);
2250 
2251 	return domain;
2252 }
2253 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2254 
2255 /*
2256  * For IOMMU_DOMAIN_DMA implementations which already provide their own
2257  * guarantees that the group and its default domain are valid and correct.
2258  */
2259 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2260 {
2261 	return dev->iommu_group->default_domain;
2262 }
2263 
2264 /*
2265  * IOMMU groups are really the natural working unit of the IOMMU, but
2266  * the IOMMU API works on domains and devices.  Bridge that gap by
2267  * iterating over the devices in a group.  Ideally we'd have a single
2268  * device which represents the requestor ID of the group, but we also
2269  * allow IOMMU drivers to create policy defined minimum sets, where
2270  * the physical hardware may be able to distiguish members, but we
2271  * wish to group them at a higher level (ex. untrusted multi-function
2272  * PCI devices).  Thus we attach each device.
2273  */
2274 static int iommu_group_do_attach_device(struct device *dev, void *data)
2275 {
2276 	struct iommu_domain *domain = data;
2277 
2278 	return __iommu_attach_device(domain, dev);
2279 }
2280 
2281 static int __iommu_attach_group(struct iommu_domain *domain,
2282 				struct iommu_group *group)
2283 {
2284 	int ret;
2285 
2286 	if (group->default_domain && group->domain != group->default_domain)
2287 		return -EBUSY;
2288 
2289 	ret = __iommu_group_for_each_dev(group, domain,
2290 					 iommu_group_do_attach_device);
2291 	if (ret == 0)
2292 		group->domain = domain;
2293 
2294 	return ret;
2295 }
2296 
2297 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2298 {
2299 	int ret;
2300 
2301 	mutex_lock(&group->mutex);
2302 	ret = __iommu_attach_group(domain, group);
2303 	mutex_unlock(&group->mutex);
2304 
2305 	return ret;
2306 }
2307 EXPORT_SYMBOL_GPL(iommu_attach_group);
2308 
2309 static int iommu_group_do_detach_device(struct device *dev, void *data)
2310 {
2311 	struct iommu_domain *domain = data;
2312 
2313 	__iommu_detach_device(domain, dev);
2314 
2315 	return 0;
2316 }
2317 
2318 static void __iommu_detach_group(struct iommu_domain *domain,
2319 				 struct iommu_group *group)
2320 {
2321 	int ret;
2322 
2323 	if (!group->default_domain) {
2324 		__iommu_group_for_each_dev(group, domain,
2325 					   iommu_group_do_detach_device);
2326 		group->domain = NULL;
2327 		return;
2328 	}
2329 
2330 	if (group->domain == group->default_domain)
2331 		return;
2332 
2333 	/* Detach by re-attaching to the default domain */
2334 	ret = __iommu_group_for_each_dev(group, group->default_domain,
2335 					 iommu_group_do_attach_device);
2336 	if (ret != 0)
2337 		WARN_ON(1);
2338 	else
2339 		group->domain = group->default_domain;
2340 }
2341 
2342 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2343 {
2344 	mutex_lock(&group->mutex);
2345 	__iommu_detach_group(domain, group);
2346 	mutex_unlock(&group->mutex);
2347 }
2348 EXPORT_SYMBOL_GPL(iommu_detach_group);
2349 
2350 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2351 {
2352 	if (unlikely(domain->ops->iova_to_phys == NULL))
2353 		return 0;
2354 
2355 	return domain->ops->iova_to_phys(domain, iova);
2356 }
2357 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2358 
2359 static size_t iommu_pgsize(struct iommu_domain *domain,
2360 			   unsigned long addr_merge, size_t size)
2361 {
2362 	unsigned int pgsize_idx;
2363 	size_t pgsize;
2364 
2365 	/* Max page size that still fits into 'size' */
2366 	pgsize_idx = __fls(size);
2367 
2368 	/* need to consider alignment requirements ? */
2369 	if (likely(addr_merge)) {
2370 		/* Max page size allowed by address */
2371 		unsigned int align_pgsize_idx = __ffs(addr_merge);
2372 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
2373 	}
2374 
2375 	/* build a mask of acceptable page sizes */
2376 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
2377 
2378 	/* throw away page sizes not supported by the hardware */
2379 	pgsize &= domain->pgsize_bitmap;
2380 
2381 	/* make sure we're still sane */
2382 	BUG_ON(!pgsize);
2383 
2384 	/* pick the biggest page */
2385 	pgsize_idx = __fls(pgsize);
2386 	pgsize = 1UL << pgsize_idx;
2387 
2388 	return pgsize;
2389 }
2390 
2391 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2392 		       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2393 {
2394 	const struct iommu_ops *ops = domain->ops;
2395 	unsigned long orig_iova = iova;
2396 	unsigned int min_pagesz;
2397 	size_t orig_size = size;
2398 	phys_addr_t orig_paddr = paddr;
2399 	int ret = 0;
2400 
2401 	if (unlikely(ops->map == NULL ||
2402 		     domain->pgsize_bitmap == 0UL))
2403 		return -ENODEV;
2404 
2405 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2406 		return -EINVAL;
2407 
2408 	/* find out the minimum page size supported */
2409 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2410 
2411 	/*
2412 	 * both the virtual address and the physical one, as well as
2413 	 * the size of the mapping, must be aligned (at least) to the
2414 	 * size of the smallest page supported by the hardware
2415 	 */
2416 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2417 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2418 		       iova, &paddr, size, min_pagesz);
2419 		return -EINVAL;
2420 	}
2421 
2422 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2423 
2424 	while (size) {
2425 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
2426 
2427 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
2428 			 iova, &paddr, pgsize);
2429 		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2430 
2431 		if (ret)
2432 			break;
2433 
2434 		iova += pgsize;
2435 		paddr += pgsize;
2436 		size -= pgsize;
2437 	}
2438 
2439 	/* unroll mapping in case something went wrong */
2440 	if (ret)
2441 		iommu_unmap(domain, orig_iova, orig_size - size);
2442 	else
2443 		trace_map(orig_iova, orig_paddr, orig_size);
2444 
2445 	return ret;
2446 }
2447 
2448 static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
2449 		      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2450 {
2451 	const struct iommu_ops *ops = domain->ops;
2452 	int ret;
2453 
2454 	ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2455 	if (ret == 0 && ops->iotlb_sync_map)
2456 		ops->iotlb_sync_map(domain, iova, size);
2457 
2458 	return ret;
2459 }
2460 
2461 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2462 	      phys_addr_t paddr, size_t size, int prot)
2463 {
2464 	might_sleep();
2465 	return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2466 }
2467 EXPORT_SYMBOL_GPL(iommu_map);
2468 
2469 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2470 	      phys_addr_t paddr, size_t size, int prot)
2471 {
2472 	return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2473 }
2474 EXPORT_SYMBOL_GPL(iommu_map_atomic);
2475 
2476 static size_t __iommu_unmap(struct iommu_domain *domain,
2477 			    unsigned long iova, size_t size,
2478 			    struct iommu_iotlb_gather *iotlb_gather)
2479 {
2480 	const struct iommu_ops *ops = domain->ops;
2481 	size_t unmapped_page, unmapped = 0;
2482 	unsigned long orig_iova = iova;
2483 	unsigned int min_pagesz;
2484 
2485 	if (unlikely(ops->unmap == NULL ||
2486 		     domain->pgsize_bitmap == 0UL))
2487 		return 0;
2488 
2489 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2490 		return 0;
2491 
2492 	/* find out the minimum page size supported */
2493 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2494 
2495 	/*
2496 	 * The virtual address, as well as the size of the mapping, must be
2497 	 * aligned (at least) to the size of the smallest page supported
2498 	 * by the hardware
2499 	 */
2500 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
2501 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2502 		       iova, size, min_pagesz);
2503 		return 0;
2504 	}
2505 
2506 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2507 
2508 	/*
2509 	 * Keep iterating until we either unmap 'size' bytes (or more)
2510 	 * or we hit an area that isn't mapped.
2511 	 */
2512 	while (unmapped < size) {
2513 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2514 
2515 		unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2516 		if (!unmapped_page)
2517 			break;
2518 
2519 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2520 			 iova, unmapped_page);
2521 
2522 		iova += unmapped_page;
2523 		unmapped += unmapped_page;
2524 	}
2525 
2526 	trace_unmap(orig_iova, size, unmapped);
2527 	return unmapped;
2528 }
2529 
2530 size_t iommu_unmap(struct iommu_domain *domain,
2531 		   unsigned long iova, size_t size)
2532 {
2533 	struct iommu_iotlb_gather iotlb_gather;
2534 	size_t ret;
2535 
2536 	iommu_iotlb_gather_init(&iotlb_gather);
2537 	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2538 	iommu_iotlb_sync(domain, &iotlb_gather);
2539 
2540 	return ret;
2541 }
2542 EXPORT_SYMBOL_GPL(iommu_unmap);
2543 
2544 size_t iommu_unmap_fast(struct iommu_domain *domain,
2545 			unsigned long iova, size_t size,
2546 			struct iommu_iotlb_gather *iotlb_gather)
2547 {
2548 	return __iommu_unmap(domain, iova, size, iotlb_gather);
2549 }
2550 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2551 
2552 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2553 			     struct scatterlist *sg, unsigned int nents, int prot,
2554 			     gfp_t gfp)
2555 {
2556 	const struct iommu_ops *ops = domain->ops;
2557 	size_t len = 0, mapped = 0;
2558 	phys_addr_t start;
2559 	unsigned int i = 0;
2560 	int ret;
2561 
2562 	while (i <= nents) {
2563 		phys_addr_t s_phys = sg_phys(sg);
2564 
2565 		if (len && s_phys != start + len) {
2566 			ret = __iommu_map(domain, iova + mapped, start,
2567 					len, prot, gfp);
2568 
2569 			if (ret)
2570 				goto out_err;
2571 
2572 			mapped += len;
2573 			len = 0;
2574 		}
2575 
2576 		if (len) {
2577 			len += sg->length;
2578 		} else {
2579 			len = sg->length;
2580 			start = s_phys;
2581 		}
2582 
2583 		if (++i < nents)
2584 			sg = sg_next(sg);
2585 	}
2586 
2587 	if (ops->iotlb_sync_map)
2588 		ops->iotlb_sync_map(domain, iova, mapped);
2589 	return mapped;
2590 
2591 out_err:
2592 	/* undo mappings already done */
2593 	iommu_unmap(domain, iova, mapped);
2594 
2595 	return 0;
2596 
2597 }
2598 
2599 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2600 		    struct scatterlist *sg, unsigned int nents, int prot)
2601 {
2602 	might_sleep();
2603 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2604 }
2605 EXPORT_SYMBOL_GPL(iommu_map_sg);
2606 
2607 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2608 		    struct scatterlist *sg, unsigned int nents, int prot)
2609 {
2610 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2611 }
2612 
2613 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2614 			       phys_addr_t paddr, u64 size, int prot)
2615 {
2616 	if (unlikely(domain->ops->domain_window_enable == NULL))
2617 		return -ENODEV;
2618 
2619 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2620 						 prot);
2621 }
2622 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2623 
2624 /**
2625  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2626  * @domain: the iommu domain where the fault has happened
2627  * @dev: the device where the fault has happened
2628  * @iova: the faulting address
2629  * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2630  *
2631  * This function should be called by the low-level IOMMU implementations
2632  * whenever IOMMU faults happen, to allow high-level users, that are
2633  * interested in such events, to know about them.
2634  *
2635  * This event may be useful for several possible use cases:
2636  * - mere logging of the event
2637  * - dynamic TLB/PTE loading
2638  * - if restarting of the faulting device is required
2639  *
2640  * Returns 0 on success and an appropriate error code otherwise (if dynamic
2641  * PTE/TLB loading will one day be supported, implementations will be able
2642  * to tell whether it succeeded or not according to this return value).
2643  *
2644  * Specifically, -ENOSYS is returned if a fault handler isn't installed
2645  * (though fault handlers can also return -ENOSYS, in case they want to
2646  * elicit the default behavior of the IOMMU drivers).
2647  */
2648 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2649 		       unsigned long iova, int flags)
2650 {
2651 	int ret = -ENOSYS;
2652 
2653 	/*
2654 	 * if upper layers showed interest and installed a fault handler,
2655 	 * invoke it.
2656 	 */
2657 	if (domain->handler)
2658 		ret = domain->handler(domain, dev, iova, flags,
2659 						domain->handler_token);
2660 
2661 	trace_io_page_fault(dev, iova, flags);
2662 	return ret;
2663 }
2664 EXPORT_SYMBOL_GPL(report_iommu_fault);
2665 
2666 static int __init iommu_init(void)
2667 {
2668 	iommu_group_kset = kset_create_and_add("iommu_groups",
2669 					       NULL, kernel_kobj);
2670 	BUG_ON(!iommu_group_kset);
2671 
2672 	iommu_debugfs_setup();
2673 
2674 	return 0;
2675 }
2676 core_initcall(iommu_init);
2677 
2678 int iommu_domain_get_attr(struct iommu_domain *domain,
2679 			  enum iommu_attr attr, void *data)
2680 {
2681 	struct iommu_domain_geometry *geometry;
2682 	bool *paging;
2683 	int ret = 0;
2684 
2685 	switch (attr) {
2686 	case DOMAIN_ATTR_GEOMETRY:
2687 		geometry  = data;
2688 		*geometry = domain->geometry;
2689 
2690 		break;
2691 	case DOMAIN_ATTR_PAGING:
2692 		paging  = data;
2693 		*paging = (domain->pgsize_bitmap != 0UL);
2694 		break;
2695 	default:
2696 		if (!domain->ops->domain_get_attr)
2697 			return -EINVAL;
2698 
2699 		ret = domain->ops->domain_get_attr(domain, attr, data);
2700 	}
2701 
2702 	return ret;
2703 }
2704 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2705 
2706 int iommu_domain_set_attr(struct iommu_domain *domain,
2707 			  enum iommu_attr attr, void *data)
2708 {
2709 	int ret = 0;
2710 
2711 	switch (attr) {
2712 	default:
2713 		if (domain->ops->domain_set_attr == NULL)
2714 			return -EINVAL;
2715 
2716 		ret = domain->ops->domain_set_attr(domain, attr, data);
2717 	}
2718 
2719 	return ret;
2720 }
2721 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2722 
2723 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2724 {
2725 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2726 
2727 	if (ops && ops->get_resv_regions)
2728 		ops->get_resv_regions(dev, list);
2729 }
2730 
2731 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2732 {
2733 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2734 
2735 	if (ops && ops->put_resv_regions)
2736 		ops->put_resv_regions(dev, list);
2737 }
2738 
2739 /**
2740  * generic_iommu_put_resv_regions - Reserved region driver helper
2741  * @dev: device for which to free reserved regions
2742  * @list: reserved region list for device
2743  *
2744  * IOMMU drivers can use this to implement their .put_resv_regions() callback
2745  * for simple reservations. Memory allocated for each reserved region will be
2746  * freed. If an IOMMU driver allocates additional resources per region, it is
2747  * going to have to implement a custom callback.
2748  */
2749 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2750 {
2751 	struct iommu_resv_region *entry, *next;
2752 
2753 	list_for_each_entry_safe(entry, next, list, list)
2754 		kfree(entry);
2755 }
2756 EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2757 
2758 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2759 						  size_t length, int prot,
2760 						  enum iommu_resv_type type)
2761 {
2762 	struct iommu_resv_region *region;
2763 
2764 	region = kzalloc(sizeof(*region), GFP_KERNEL);
2765 	if (!region)
2766 		return NULL;
2767 
2768 	INIT_LIST_HEAD(&region->list);
2769 	region->start = start;
2770 	region->length = length;
2771 	region->prot = prot;
2772 	region->type = type;
2773 	return region;
2774 }
2775 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2776 
2777 void iommu_set_default_passthrough(bool cmd_line)
2778 {
2779 	if (cmd_line)
2780 		iommu_set_cmd_line_dma_api();
2781 
2782 	iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2783 }
2784 
2785 void iommu_set_default_translated(bool cmd_line)
2786 {
2787 	if (cmd_line)
2788 		iommu_set_cmd_line_dma_api();
2789 
2790 	iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2791 }
2792 
2793 bool iommu_default_passthrough(void)
2794 {
2795 	return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2796 }
2797 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2798 
2799 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2800 {
2801 	const struct iommu_ops *ops = NULL;
2802 	struct iommu_device *iommu;
2803 
2804 	spin_lock(&iommu_device_lock);
2805 	list_for_each_entry(iommu, &iommu_device_list, list)
2806 		if (iommu->fwnode == fwnode) {
2807 			ops = iommu->ops;
2808 			break;
2809 		}
2810 	spin_unlock(&iommu_device_lock);
2811 	return ops;
2812 }
2813 
2814 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2815 		      const struct iommu_ops *ops)
2816 {
2817 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2818 
2819 	if (fwspec)
2820 		return ops == fwspec->ops ? 0 : -EINVAL;
2821 
2822 	if (!dev_iommu_get(dev))
2823 		return -ENOMEM;
2824 
2825 	/* Preallocate for the overwhelmingly common case of 1 ID */
2826 	fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2827 	if (!fwspec)
2828 		return -ENOMEM;
2829 
2830 	of_node_get(to_of_node(iommu_fwnode));
2831 	fwspec->iommu_fwnode = iommu_fwnode;
2832 	fwspec->ops = ops;
2833 	dev_iommu_fwspec_set(dev, fwspec);
2834 	return 0;
2835 }
2836 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2837 
2838 void iommu_fwspec_free(struct device *dev)
2839 {
2840 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2841 
2842 	if (fwspec) {
2843 		fwnode_handle_put(fwspec->iommu_fwnode);
2844 		kfree(fwspec);
2845 		dev_iommu_fwspec_set(dev, NULL);
2846 	}
2847 }
2848 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2849 
2850 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2851 {
2852 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2853 	int i, new_num;
2854 
2855 	if (!fwspec)
2856 		return -EINVAL;
2857 
2858 	new_num = fwspec->num_ids + num_ids;
2859 	if (new_num > 1) {
2860 		fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2861 				  GFP_KERNEL);
2862 		if (!fwspec)
2863 			return -ENOMEM;
2864 
2865 		dev_iommu_fwspec_set(dev, fwspec);
2866 	}
2867 
2868 	for (i = 0; i < num_ids; i++)
2869 		fwspec->ids[fwspec->num_ids + i] = ids[i];
2870 
2871 	fwspec->num_ids = new_num;
2872 	return 0;
2873 }
2874 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2875 
2876 /*
2877  * Per device IOMMU features.
2878  */
2879 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2880 {
2881 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2882 
2883 	if (ops && ops->dev_enable_feat)
2884 		return ops->dev_enable_feat(dev, feat);
2885 
2886 	return -ENODEV;
2887 }
2888 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2889 
2890 /*
2891  * The device drivers should do the necessary cleanups before calling this.
2892  * For example, before disabling the aux-domain feature, the device driver
2893  * should detach all aux-domains. Otherwise, this will return -EBUSY.
2894  */
2895 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2896 {
2897 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2898 
2899 	if (ops && ops->dev_disable_feat)
2900 		return ops->dev_disable_feat(dev, feat);
2901 
2902 	return -EBUSY;
2903 }
2904 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2905 
2906 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2907 {
2908 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2909 
2910 	if (ops && ops->dev_feat_enabled)
2911 		return ops->dev_feat_enabled(dev, feat);
2912 
2913 	return false;
2914 }
2915 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2916 
2917 /*
2918  * Aux-domain specific attach/detach.
2919  *
2920  * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2921  * true. Also, as long as domains are attached to a device through this
2922  * interface, any tries to call iommu_attach_device() should fail
2923  * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2924  * This should make us safe against a device being attached to a guest as a
2925  * whole while there are still pasid users on it (aux and sva).
2926  */
2927 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2928 {
2929 	int ret = -ENODEV;
2930 
2931 	if (domain->ops->aux_attach_dev)
2932 		ret = domain->ops->aux_attach_dev(domain, dev);
2933 
2934 	if (!ret)
2935 		trace_attach_device_to_domain(dev);
2936 
2937 	return ret;
2938 }
2939 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2940 
2941 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2942 {
2943 	if (domain->ops->aux_detach_dev) {
2944 		domain->ops->aux_detach_dev(domain, dev);
2945 		trace_detach_device_from_domain(dev);
2946 	}
2947 }
2948 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2949 
2950 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2951 {
2952 	int ret = -ENODEV;
2953 
2954 	if (domain->ops->aux_get_pasid)
2955 		ret = domain->ops->aux_get_pasid(domain, dev);
2956 
2957 	return ret;
2958 }
2959 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2960 
2961 /**
2962  * iommu_sva_bind_device() - Bind a process address space to a device
2963  * @dev: the device
2964  * @mm: the mm to bind, caller must hold a reference to it
2965  *
2966  * Create a bond between device and address space, allowing the device to access
2967  * the mm using the returned PASID. If a bond already exists between @device and
2968  * @mm, it is returned and an additional reference is taken. Caller must call
2969  * iommu_sva_unbind_device() to release each reference.
2970  *
2971  * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2972  * initialize the required SVA features.
2973  *
2974  * On error, returns an ERR_PTR value.
2975  */
2976 struct iommu_sva *
2977 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2978 {
2979 	struct iommu_group *group;
2980 	struct iommu_sva *handle = ERR_PTR(-EINVAL);
2981 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2982 
2983 	if (!ops || !ops->sva_bind)
2984 		return ERR_PTR(-ENODEV);
2985 
2986 	group = iommu_group_get(dev);
2987 	if (!group)
2988 		return ERR_PTR(-ENODEV);
2989 
2990 	/* Ensure device count and domain don't change while we're binding */
2991 	mutex_lock(&group->mutex);
2992 
2993 	/*
2994 	 * To keep things simple, SVA currently doesn't support IOMMU groups
2995 	 * with more than one device. Existing SVA-capable systems are not
2996 	 * affected by the problems that required IOMMU groups (lack of ACS
2997 	 * isolation, device ID aliasing and other hardware issues).
2998 	 */
2999 	if (iommu_group_device_count(group) != 1)
3000 		goto out_unlock;
3001 
3002 	handle = ops->sva_bind(dev, mm, drvdata);
3003 
3004 out_unlock:
3005 	mutex_unlock(&group->mutex);
3006 	iommu_group_put(group);
3007 
3008 	return handle;
3009 }
3010 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
3011 
3012 /**
3013  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
3014  * @handle: the handle returned by iommu_sva_bind_device()
3015  *
3016  * Put reference to a bond between device and address space. The device should
3017  * not be issuing any more transaction for this PASID. All outstanding page
3018  * requests for this PASID must have been flushed to the IOMMU.
3019  */
3020 void iommu_sva_unbind_device(struct iommu_sva *handle)
3021 {
3022 	struct iommu_group *group;
3023 	struct device *dev = handle->dev;
3024 	const struct iommu_ops *ops = dev->bus->iommu_ops;
3025 
3026 	if (!ops || !ops->sva_unbind)
3027 		return;
3028 
3029 	group = iommu_group_get(dev);
3030 	if (!group)
3031 		return;
3032 
3033 	mutex_lock(&group->mutex);
3034 	ops->sva_unbind(handle);
3035 	mutex_unlock(&group->mutex);
3036 
3037 	iommu_group_put(group);
3038 }
3039 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
3040 
3041 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
3042 {
3043 	const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
3044 
3045 	if (!ops || !ops->sva_get_pasid)
3046 		return IOMMU_PASID_INVALID;
3047 
3048 	return ops->sva_get_pasid(handle);
3049 }
3050 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
3051 
3052 /*
3053  * Changes the default domain of an iommu group that has *only* one device
3054  *
3055  * @group: The group for which the default domain should be changed
3056  * @prev_dev: The device in the group (this is used to make sure that the device
3057  *	 hasn't changed after the caller has called this function)
3058  * @type: The type of the new default domain that gets associated with the group
3059  *
3060  * Returns 0 on success and error code on failure
3061  *
3062  * Note:
3063  * 1. Presently, this function is called only when user requests to change the
3064  *    group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
3065  *    Please take a closer look if intended to use for other purposes.
3066  */
3067 static int iommu_change_dev_def_domain(struct iommu_group *group,
3068 				       struct device *prev_dev, int type)
3069 {
3070 	struct iommu_domain *prev_dom;
3071 	struct group_device *grp_dev;
3072 	int ret, dev_def_dom;
3073 	struct device *dev;
3074 
3075 	if (!group)
3076 		return -EINVAL;
3077 
3078 	mutex_lock(&group->mutex);
3079 
3080 	if (group->default_domain != group->domain) {
3081 		dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
3082 		ret = -EBUSY;
3083 		goto out;
3084 	}
3085 
3086 	/*
3087 	 * iommu group wasn't locked while acquiring device lock in
3088 	 * iommu_group_store_type(). So, make sure that the device count hasn't
3089 	 * changed while acquiring device lock.
3090 	 *
3091 	 * Changing default domain of an iommu group with two or more devices
3092 	 * isn't supported because there could be a potential deadlock. Consider
3093 	 * the following scenario. T1 is trying to acquire device locks of all
3094 	 * the devices in the group and before it could acquire all of them,
3095 	 * there could be another thread T2 (from different sub-system and use
3096 	 * case) that has already acquired some of the device locks and might be
3097 	 * waiting for T1 to release other device locks.
3098 	 */
3099 	if (iommu_group_device_count(group) != 1) {
3100 		dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
3101 		ret = -EINVAL;
3102 		goto out;
3103 	}
3104 
3105 	/* Since group has only one device */
3106 	grp_dev = list_first_entry(&group->devices, struct group_device, list);
3107 	dev = grp_dev->dev;
3108 
3109 	if (prev_dev != dev) {
3110 		dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
3111 		ret = -EBUSY;
3112 		goto out;
3113 	}
3114 
3115 	prev_dom = group->default_domain;
3116 	if (!prev_dom) {
3117 		ret = -EINVAL;
3118 		goto out;
3119 	}
3120 
3121 	dev_def_dom = iommu_get_def_domain_type(dev);
3122 	if (!type) {
3123 		/*
3124 		 * If the user hasn't requested any specific type of domain and
3125 		 * if the device supports both the domains, then default to the
3126 		 * domain the device was booted with
3127 		 */
3128 		type = dev_def_dom ? : iommu_def_domain_type;
3129 	} else if (dev_def_dom && type != dev_def_dom) {
3130 		dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
3131 				    iommu_domain_type_str(type));
3132 		ret = -EINVAL;
3133 		goto out;
3134 	}
3135 
3136 	/*
3137 	 * Switch to a new domain only if the requested domain type is different
3138 	 * from the existing default domain type
3139 	 */
3140 	if (prev_dom->type == type) {
3141 		ret = 0;
3142 		goto out;
3143 	}
3144 
3145 	/* Sets group->default_domain to the newly allocated domain */
3146 	ret = iommu_group_alloc_default_domain(dev->bus, group, type);
3147 	if (ret)
3148 		goto out;
3149 
3150 	ret = iommu_create_device_direct_mappings(group, dev);
3151 	if (ret)
3152 		goto free_new_domain;
3153 
3154 	ret = __iommu_attach_device(group->default_domain, dev);
3155 	if (ret)
3156 		goto free_new_domain;
3157 
3158 	group->domain = group->default_domain;
3159 
3160 	/*
3161 	 * Release the mutex here because ops->probe_finalize() call-back of
3162 	 * some vendor IOMMU drivers calls arm_iommu_attach_device() which
3163 	 * in-turn might call back into IOMMU core code, where it tries to take
3164 	 * group->mutex, resulting in a deadlock.
3165 	 */
3166 	mutex_unlock(&group->mutex);
3167 
3168 	/* Make sure dma_ops is appropriatley set */
3169 	iommu_group_do_probe_finalize(dev, group->default_domain);
3170 	iommu_domain_free(prev_dom);
3171 	return 0;
3172 
3173 free_new_domain:
3174 	iommu_domain_free(group->default_domain);
3175 	group->default_domain = prev_dom;
3176 	group->domain = prev_dom;
3177 
3178 out:
3179 	mutex_unlock(&group->mutex);
3180 
3181 	return ret;
3182 }
3183 
3184 /*
3185  * Changing the default domain through sysfs requires the users to ubind the
3186  * drivers from the devices in the iommu group. Return failure if this doesn't
3187  * meet.
3188  *
3189  * We need to consider the race between this and the device release path.
3190  * device_lock(dev) is used here to guarantee that the device release path
3191  * will not be entered at the same time.
3192  */
3193 static ssize_t iommu_group_store_type(struct iommu_group *group,
3194 				      const char *buf, size_t count)
3195 {
3196 	struct group_device *grp_dev;
3197 	struct device *dev;
3198 	int ret, req_type;
3199 
3200 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3201 		return -EACCES;
3202 
3203 	if (WARN_ON(!group))
3204 		return -EINVAL;
3205 
3206 	if (sysfs_streq(buf, "identity"))
3207 		req_type = IOMMU_DOMAIN_IDENTITY;
3208 	else if (sysfs_streq(buf, "DMA"))
3209 		req_type = IOMMU_DOMAIN_DMA;
3210 	else if (sysfs_streq(buf, "auto"))
3211 		req_type = 0;
3212 	else
3213 		return -EINVAL;
3214 
3215 	/*
3216 	 * Lock/Unlock the group mutex here before device lock to
3217 	 * 1. Make sure that the iommu group has only one device (this is a
3218 	 *    prerequisite for step 2)
3219 	 * 2. Get struct *dev which is needed to lock device
3220 	 */
3221 	mutex_lock(&group->mutex);
3222 	if (iommu_group_device_count(group) != 1) {
3223 		mutex_unlock(&group->mutex);
3224 		pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
3225 		return -EINVAL;
3226 	}
3227 
3228 	/* Since group has only one device */
3229 	grp_dev = list_first_entry(&group->devices, struct group_device, list);
3230 	dev = grp_dev->dev;
3231 	get_device(dev);
3232 
3233 	/*
3234 	 * Don't hold the group mutex because taking group mutex first and then
3235 	 * the device lock could potentially cause a deadlock as below. Assume
3236 	 * two threads T1 and T2. T1 is trying to change default domain of an
3237 	 * iommu group and T2 is trying to hot unplug a device or release [1] VF
3238 	 * of a PCIe device which is in the same iommu group. T1 takes group
3239 	 * mutex and before it could take device lock assume T2 has taken device
3240 	 * lock and is yet to take group mutex. Now, both the threads will be
3241 	 * waiting for the other thread to release lock. Below, lock order was
3242 	 * suggested.
3243 	 * device_lock(dev);
3244 	 *	mutex_lock(&group->mutex);
3245 	 *		iommu_change_dev_def_domain();
3246 	 *	mutex_unlock(&group->mutex);
3247 	 * device_unlock(dev);
3248 	 *
3249 	 * [1] Typical device release path
3250 	 * device_lock() from device/driver core code
3251 	 *  -> bus_notifier()
3252 	 *   -> iommu_bus_notifier()
3253 	 *    -> iommu_release_device()
3254 	 *     -> ops->release_device() vendor driver calls back iommu core code
3255 	 *      -> mutex_lock() from iommu core code
3256 	 */
3257 	mutex_unlock(&group->mutex);
3258 
3259 	/* Check if the device in the group still has a driver bound to it */
3260 	device_lock(dev);
3261 	if (device_is_bound(dev)) {
3262 		pr_err_ratelimited("Device is still bound to driver\n");
3263 		ret = -EBUSY;
3264 		goto out;
3265 	}
3266 
3267 	ret = iommu_change_dev_def_domain(group, dev, req_type);
3268 	ret = ret ?: count;
3269 
3270 out:
3271 	device_unlock(dev);
3272 	put_device(dev);
3273 
3274 	return ret;
3275 }
3276