xref: /openbmc/linux/drivers/iommu/iommu.c (revision 165f2d28)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  */
6 
7 #define pr_fmt(fmt)    "iommu: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <linux/module.h>
26 #include <trace/events/iommu.h>
27 
28 static struct kset *iommu_group_kset;
29 static DEFINE_IDA(iommu_group_ida);
30 
31 static unsigned int iommu_def_domain_type __read_mostly;
32 static bool iommu_dma_strict __read_mostly = true;
33 static u32 iommu_cmd_line __read_mostly;
34 
35 struct iommu_group {
36 	struct kobject kobj;
37 	struct kobject *devices_kobj;
38 	struct list_head devices;
39 	struct mutex mutex;
40 	struct blocking_notifier_head notifier;
41 	void *iommu_data;
42 	void (*iommu_data_release)(void *iommu_data);
43 	char *name;
44 	int id;
45 	struct iommu_domain *default_domain;
46 	struct iommu_domain *domain;
47 };
48 
49 struct group_device {
50 	struct list_head list;
51 	struct device *dev;
52 	char *name;
53 };
54 
55 struct iommu_group_attribute {
56 	struct attribute attr;
57 	ssize_t (*show)(struct iommu_group *group, char *buf);
58 	ssize_t (*store)(struct iommu_group *group,
59 			 const char *buf, size_t count);
60 };
61 
62 static const char * const iommu_group_resv_type_string[] = {
63 	[IOMMU_RESV_DIRECT]			= "direct",
64 	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
65 	[IOMMU_RESV_RESERVED]			= "reserved",
66 	[IOMMU_RESV_MSI]			= "msi",
67 	[IOMMU_RESV_SW_MSI]			= "msi",
68 };
69 
70 #define IOMMU_CMD_LINE_DMA_API		BIT(0)
71 
72 static void iommu_set_cmd_line_dma_api(void)
73 {
74 	iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
75 }
76 
77 static bool iommu_cmd_line_dma_api(void)
78 {
79 	return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
80 }
81 
82 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
83 struct iommu_group_attribute iommu_group_attr_##_name =		\
84 	__ATTR(_name, _mode, _show, _store)
85 
86 #define to_iommu_group_attr(_attr)	\
87 	container_of(_attr, struct iommu_group_attribute, attr)
88 #define to_iommu_group(_kobj)		\
89 	container_of(_kobj, struct iommu_group, kobj)
90 
91 static LIST_HEAD(iommu_device_list);
92 static DEFINE_SPINLOCK(iommu_device_lock);
93 
94 /*
95  * Use a function instead of an array here because the domain-type is a
96  * bit-field, so an array would waste memory.
97  */
98 static const char *iommu_domain_type_str(unsigned int t)
99 {
100 	switch (t) {
101 	case IOMMU_DOMAIN_BLOCKED:
102 		return "Blocked";
103 	case IOMMU_DOMAIN_IDENTITY:
104 		return "Passthrough";
105 	case IOMMU_DOMAIN_UNMANAGED:
106 		return "Unmanaged";
107 	case IOMMU_DOMAIN_DMA:
108 		return "Translated";
109 	default:
110 		return "Unknown";
111 	}
112 }
113 
114 static int __init iommu_subsys_init(void)
115 {
116 	bool cmd_line = iommu_cmd_line_dma_api();
117 
118 	if (!cmd_line) {
119 		if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
120 			iommu_set_default_passthrough(false);
121 		else
122 			iommu_set_default_translated(false);
123 
124 		if (iommu_default_passthrough() && mem_encrypt_active()) {
125 			pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
126 			iommu_set_default_translated(false);
127 		}
128 	}
129 
130 	pr_info("Default domain type: %s %s\n",
131 		iommu_domain_type_str(iommu_def_domain_type),
132 		cmd_line ? "(set via kernel command line)" : "");
133 
134 	return 0;
135 }
136 subsys_initcall(iommu_subsys_init);
137 
138 int iommu_device_register(struct iommu_device *iommu)
139 {
140 	spin_lock(&iommu_device_lock);
141 	list_add_tail(&iommu->list, &iommu_device_list);
142 	spin_unlock(&iommu_device_lock);
143 	return 0;
144 }
145 EXPORT_SYMBOL_GPL(iommu_device_register);
146 
147 void iommu_device_unregister(struct iommu_device *iommu)
148 {
149 	spin_lock(&iommu_device_lock);
150 	list_del(&iommu->list);
151 	spin_unlock(&iommu_device_lock);
152 }
153 EXPORT_SYMBOL_GPL(iommu_device_unregister);
154 
155 static struct dev_iommu *dev_iommu_get(struct device *dev)
156 {
157 	struct dev_iommu *param = dev->iommu;
158 
159 	if (param)
160 		return param;
161 
162 	param = kzalloc(sizeof(*param), GFP_KERNEL);
163 	if (!param)
164 		return NULL;
165 
166 	mutex_init(&param->lock);
167 	dev->iommu = param;
168 	return param;
169 }
170 
171 static void dev_iommu_free(struct device *dev)
172 {
173 	iommu_fwspec_free(dev);
174 	kfree(dev->iommu);
175 	dev->iommu = NULL;
176 }
177 
178 int iommu_probe_device(struct device *dev)
179 {
180 	const struct iommu_ops *ops = dev->bus->iommu_ops;
181 	int ret;
182 
183 	WARN_ON(dev->iommu_group);
184 	if (!ops)
185 		return -EINVAL;
186 
187 	if (!dev_iommu_get(dev))
188 		return -ENOMEM;
189 
190 	if (!try_module_get(ops->owner)) {
191 		ret = -EINVAL;
192 		goto err_free_dev_param;
193 	}
194 
195 	ret = ops->add_device(dev);
196 	if (ret)
197 		goto err_module_put;
198 
199 	return 0;
200 
201 err_module_put:
202 	module_put(ops->owner);
203 err_free_dev_param:
204 	dev_iommu_free(dev);
205 	return ret;
206 }
207 
208 void iommu_release_device(struct device *dev)
209 {
210 	const struct iommu_ops *ops = dev->bus->iommu_ops;
211 
212 	if (dev->iommu_group)
213 		ops->remove_device(dev);
214 
215 	if (dev->iommu) {
216 		module_put(ops->owner);
217 		dev_iommu_free(dev);
218 	}
219 }
220 
221 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
222 						 unsigned type);
223 static int __iommu_attach_device(struct iommu_domain *domain,
224 				 struct device *dev);
225 static int __iommu_attach_group(struct iommu_domain *domain,
226 				struct iommu_group *group);
227 static void __iommu_detach_group(struct iommu_domain *domain,
228 				 struct iommu_group *group);
229 
230 static int __init iommu_set_def_domain_type(char *str)
231 {
232 	bool pt;
233 	int ret;
234 
235 	ret = kstrtobool(str, &pt);
236 	if (ret)
237 		return ret;
238 
239 	if (pt)
240 		iommu_set_default_passthrough(true);
241 	else
242 		iommu_set_default_translated(true);
243 
244 	return 0;
245 }
246 early_param("iommu.passthrough", iommu_set_def_domain_type);
247 
248 static int __init iommu_dma_setup(char *str)
249 {
250 	return kstrtobool(str, &iommu_dma_strict);
251 }
252 early_param("iommu.strict", iommu_dma_setup);
253 
254 static ssize_t iommu_group_attr_show(struct kobject *kobj,
255 				     struct attribute *__attr, char *buf)
256 {
257 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
258 	struct iommu_group *group = to_iommu_group(kobj);
259 	ssize_t ret = -EIO;
260 
261 	if (attr->show)
262 		ret = attr->show(group, buf);
263 	return ret;
264 }
265 
266 static ssize_t iommu_group_attr_store(struct kobject *kobj,
267 				      struct attribute *__attr,
268 				      const char *buf, size_t count)
269 {
270 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
271 	struct iommu_group *group = to_iommu_group(kobj);
272 	ssize_t ret = -EIO;
273 
274 	if (attr->store)
275 		ret = attr->store(group, buf, count);
276 	return ret;
277 }
278 
279 static const struct sysfs_ops iommu_group_sysfs_ops = {
280 	.show = iommu_group_attr_show,
281 	.store = iommu_group_attr_store,
282 };
283 
284 static int iommu_group_create_file(struct iommu_group *group,
285 				   struct iommu_group_attribute *attr)
286 {
287 	return sysfs_create_file(&group->kobj, &attr->attr);
288 }
289 
290 static void iommu_group_remove_file(struct iommu_group *group,
291 				    struct iommu_group_attribute *attr)
292 {
293 	sysfs_remove_file(&group->kobj, &attr->attr);
294 }
295 
296 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
297 {
298 	return sprintf(buf, "%s\n", group->name);
299 }
300 
301 /**
302  * iommu_insert_resv_region - Insert a new region in the
303  * list of reserved regions.
304  * @new: new region to insert
305  * @regions: list of regions
306  *
307  * Elements are sorted by start address and overlapping segments
308  * of the same type are merged.
309  */
310 int iommu_insert_resv_region(struct iommu_resv_region *new,
311 			     struct list_head *regions)
312 {
313 	struct iommu_resv_region *iter, *tmp, *nr, *top;
314 	LIST_HEAD(stack);
315 
316 	nr = iommu_alloc_resv_region(new->start, new->length,
317 				     new->prot, new->type);
318 	if (!nr)
319 		return -ENOMEM;
320 
321 	/* First add the new element based on start address sorting */
322 	list_for_each_entry(iter, regions, list) {
323 		if (nr->start < iter->start ||
324 		    (nr->start == iter->start && nr->type <= iter->type))
325 			break;
326 	}
327 	list_add_tail(&nr->list, &iter->list);
328 
329 	/* Merge overlapping segments of type nr->type in @regions, if any */
330 	list_for_each_entry_safe(iter, tmp, regions, list) {
331 		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
332 
333 		/* no merge needed on elements of different types than @new */
334 		if (iter->type != new->type) {
335 			list_move_tail(&iter->list, &stack);
336 			continue;
337 		}
338 
339 		/* look for the last stack element of same type as @iter */
340 		list_for_each_entry_reverse(top, &stack, list)
341 			if (top->type == iter->type)
342 				goto check_overlap;
343 
344 		list_move_tail(&iter->list, &stack);
345 		continue;
346 
347 check_overlap:
348 		top_end = top->start + top->length - 1;
349 
350 		if (iter->start > top_end + 1) {
351 			list_move_tail(&iter->list, &stack);
352 		} else {
353 			top->length = max(top_end, iter_end) - top->start + 1;
354 			list_del(&iter->list);
355 			kfree(iter);
356 		}
357 	}
358 	list_splice(&stack, regions);
359 	return 0;
360 }
361 
362 static int
363 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
364 				 struct list_head *group_resv_regions)
365 {
366 	struct iommu_resv_region *entry;
367 	int ret = 0;
368 
369 	list_for_each_entry(entry, dev_resv_regions, list) {
370 		ret = iommu_insert_resv_region(entry, group_resv_regions);
371 		if (ret)
372 			break;
373 	}
374 	return ret;
375 }
376 
377 int iommu_get_group_resv_regions(struct iommu_group *group,
378 				 struct list_head *head)
379 {
380 	struct group_device *device;
381 	int ret = 0;
382 
383 	mutex_lock(&group->mutex);
384 	list_for_each_entry(device, &group->devices, list) {
385 		struct list_head dev_resv_regions;
386 
387 		INIT_LIST_HEAD(&dev_resv_regions);
388 		iommu_get_resv_regions(device->dev, &dev_resv_regions);
389 		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
390 		iommu_put_resv_regions(device->dev, &dev_resv_regions);
391 		if (ret)
392 			break;
393 	}
394 	mutex_unlock(&group->mutex);
395 	return ret;
396 }
397 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
398 
399 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
400 					     char *buf)
401 {
402 	struct iommu_resv_region *region, *next;
403 	struct list_head group_resv_regions;
404 	char *str = buf;
405 
406 	INIT_LIST_HEAD(&group_resv_regions);
407 	iommu_get_group_resv_regions(group, &group_resv_regions);
408 
409 	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
410 		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
411 			       (long long int)region->start,
412 			       (long long int)(region->start +
413 						region->length - 1),
414 			       iommu_group_resv_type_string[region->type]);
415 		kfree(region);
416 	}
417 
418 	return (str - buf);
419 }
420 
421 static ssize_t iommu_group_show_type(struct iommu_group *group,
422 				     char *buf)
423 {
424 	char *type = "unknown\n";
425 
426 	if (group->default_domain) {
427 		switch (group->default_domain->type) {
428 		case IOMMU_DOMAIN_BLOCKED:
429 			type = "blocked\n";
430 			break;
431 		case IOMMU_DOMAIN_IDENTITY:
432 			type = "identity\n";
433 			break;
434 		case IOMMU_DOMAIN_UNMANAGED:
435 			type = "unmanaged\n";
436 			break;
437 		case IOMMU_DOMAIN_DMA:
438 			type = "DMA\n";
439 			break;
440 		}
441 	}
442 	strcpy(buf, type);
443 
444 	return strlen(type);
445 }
446 
447 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
448 
449 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
450 			iommu_group_show_resv_regions, NULL);
451 
452 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
453 
454 static void iommu_group_release(struct kobject *kobj)
455 {
456 	struct iommu_group *group = to_iommu_group(kobj);
457 
458 	pr_debug("Releasing group %d\n", group->id);
459 
460 	if (group->iommu_data_release)
461 		group->iommu_data_release(group->iommu_data);
462 
463 	ida_simple_remove(&iommu_group_ida, group->id);
464 
465 	if (group->default_domain)
466 		iommu_domain_free(group->default_domain);
467 
468 	kfree(group->name);
469 	kfree(group);
470 }
471 
472 static struct kobj_type iommu_group_ktype = {
473 	.sysfs_ops = &iommu_group_sysfs_ops,
474 	.release = iommu_group_release,
475 };
476 
477 /**
478  * iommu_group_alloc - Allocate a new group
479  *
480  * This function is called by an iommu driver to allocate a new iommu
481  * group.  The iommu group represents the minimum granularity of the iommu.
482  * Upon successful return, the caller holds a reference to the supplied
483  * group in order to hold the group until devices are added.  Use
484  * iommu_group_put() to release this extra reference count, allowing the
485  * group to be automatically reclaimed once it has no devices or external
486  * references.
487  */
488 struct iommu_group *iommu_group_alloc(void)
489 {
490 	struct iommu_group *group;
491 	int ret;
492 
493 	group = kzalloc(sizeof(*group), GFP_KERNEL);
494 	if (!group)
495 		return ERR_PTR(-ENOMEM);
496 
497 	group->kobj.kset = iommu_group_kset;
498 	mutex_init(&group->mutex);
499 	INIT_LIST_HEAD(&group->devices);
500 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
501 
502 	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
503 	if (ret < 0) {
504 		kfree(group);
505 		return ERR_PTR(ret);
506 	}
507 	group->id = ret;
508 
509 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
510 				   NULL, "%d", group->id);
511 	if (ret) {
512 		ida_simple_remove(&iommu_group_ida, group->id);
513 		kfree(group);
514 		return ERR_PTR(ret);
515 	}
516 
517 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
518 	if (!group->devices_kobj) {
519 		kobject_put(&group->kobj); /* triggers .release & free */
520 		return ERR_PTR(-ENOMEM);
521 	}
522 
523 	/*
524 	 * The devices_kobj holds a reference on the group kobject, so
525 	 * as long as that exists so will the group.  We can therefore
526 	 * use the devices_kobj for reference counting.
527 	 */
528 	kobject_put(&group->kobj);
529 
530 	ret = iommu_group_create_file(group,
531 				      &iommu_group_attr_reserved_regions);
532 	if (ret)
533 		return ERR_PTR(ret);
534 
535 	ret = iommu_group_create_file(group, &iommu_group_attr_type);
536 	if (ret)
537 		return ERR_PTR(ret);
538 
539 	pr_debug("Allocated group %d\n", group->id);
540 
541 	return group;
542 }
543 EXPORT_SYMBOL_GPL(iommu_group_alloc);
544 
545 struct iommu_group *iommu_group_get_by_id(int id)
546 {
547 	struct kobject *group_kobj;
548 	struct iommu_group *group;
549 	const char *name;
550 
551 	if (!iommu_group_kset)
552 		return NULL;
553 
554 	name = kasprintf(GFP_KERNEL, "%d", id);
555 	if (!name)
556 		return NULL;
557 
558 	group_kobj = kset_find_obj(iommu_group_kset, name);
559 	kfree(name);
560 
561 	if (!group_kobj)
562 		return NULL;
563 
564 	group = container_of(group_kobj, struct iommu_group, kobj);
565 	BUG_ON(group->id != id);
566 
567 	kobject_get(group->devices_kobj);
568 	kobject_put(&group->kobj);
569 
570 	return group;
571 }
572 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
573 
574 /**
575  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
576  * @group: the group
577  *
578  * iommu drivers can store data in the group for use when doing iommu
579  * operations.  This function provides a way to retrieve it.  Caller
580  * should hold a group reference.
581  */
582 void *iommu_group_get_iommudata(struct iommu_group *group)
583 {
584 	return group->iommu_data;
585 }
586 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
587 
588 /**
589  * iommu_group_set_iommudata - set iommu_data for a group
590  * @group: the group
591  * @iommu_data: new data
592  * @release: release function for iommu_data
593  *
594  * iommu drivers can store data in the group for use when doing iommu
595  * operations.  This function provides a way to set the data after
596  * the group has been allocated.  Caller should hold a group reference.
597  */
598 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
599 			       void (*release)(void *iommu_data))
600 {
601 	group->iommu_data = iommu_data;
602 	group->iommu_data_release = release;
603 }
604 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
605 
606 /**
607  * iommu_group_set_name - set name for a group
608  * @group: the group
609  * @name: name
610  *
611  * Allow iommu driver to set a name for a group.  When set it will
612  * appear in a name attribute file under the group in sysfs.
613  */
614 int iommu_group_set_name(struct iommu_group *group, const char *name)
615 {
616 	int ret;
617 
618 	if (group->name) {
619 		iommu_group_remove_file(group, &iommu_group_attr_name);
620 		kfree(group->name);
621 		group->name = NULL;
622 		if (!name)
623 			return 0;
624 	}
625 
626 	group->name = kstrdup(name, GFP_KERNEL);
627 	if (!group->name)
628 		return -ENOMEM;
629 
630 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
631 	if (ret) {
632 		kfree(group->name);
633 		group->name = NULL;
634 		return ret;
635 	}
636 
637 	return 0;
638 }
639 EXPORT_SYMBOL_GPL(iommu_group_set_name);
640 
641 static int iommu_group_create_direct_mappings(struct iommu_group *group,
642 					      struct device *dev)
643 {
644 	struct iommu_domain *domain = group->default_domain;
645 	struct iommu_resv_region *entry;
646 	struct list_head mappings;
647 	unsigned long pg_size;
648 	int ret = 0;
649 
650 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
651 		return 0;
652 
653 	BUG_ON(!domain->pgsize_bitmap);
654 
655 	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
656 	INIT_LIST_HEAD(&mappings);
657 
658 	iommu_get_resv_regions(dev, &mappings);
659 
660 	/* We need to consider overlapping regions for different devices */
661 	list_for_each_entry(entry, &mappings, list) {
662 		dma_addr_t start, end, addr;
663 
664 		if (domain->ops->apply_resv_region)
665 			domain->ops->apply_resv_region(dev, domain, entry);
666 
667 		start = ALIGN(entry->start, pg_size);
668 		end   = ALIGN(entry->start + entry->length, pg_size);
669 
670 		if (entry->type != IOMMU_RESV_DIRECT &&
671 		    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
672 			continue;
673 
674 		for (addr = start; addr < end; addr += pg_size) {
675 			phys_addr_t phys_addr;
676 
677 			phys_addr = iommu_iova_to_phys(domain, addr);
678 			if (phys_addr)
679 				continue;
680 
681 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
682 			if (ret)
683 				goto out;
684 		}
685 
686 	}
687 
688 	iommu_flush_tlb_all(domain);
689 
690 out:
691 	iommu_put_resv_regions(dev, &mappings);
692 
693 	return ret;
694 }
695 
696 /**
697  * iommu_group_add_device - add a device to an iommu group
698  * @group: the group into which to add the device (reference should be held)
699  * @dev: the device
700  *
701  * This function is called by an iommu driver to add a device into a
702  * group.  Adding a device increments the group reference count.
703  */
704 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
705 {
706 	int ret, i = 0;
707 	struct group_device *device;
708 
709 	device = kzalloc(sizeof(*device), GFP_KERNEL);
710 	if (!device)
711 		return -ENOMEM;
712 
713 	device->dev = dev;
714 
715 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
716 	if (ret)
717 		goto err_free_device;
718 
719 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
720 rename:
721 	if (!device->name) {
722 		ret = -ENOMEM;
723 		goto err_remove_link;
724 	}
725 
726 	ret = sysfs_create_link_nowarn(group->devices_kobj,
727 				       &dev->kobj, device->name);
728 	if (ret) {
729 		if (ret == -EEXIST && i >= 0) {
730 			/*
731 			 * Account for the slim chance of collision
732 			 * and append an instance to the name.
733 			 */
734 			kfree(device->name);
735 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
736 						 kobject_name(&dev->kobj), i++);
737 			goto rename;
738 		}
739 		goto err_free_name;
740 	}
741 
742 	kobject_get(group->devices_kobj);
743 
744 	dev->iommu_group = group;
745 
746 	iommu_group_create_direct_mappings(group, dev);
747 
748 	mutex_lock(&group->mutex);
749 	list_add_tail(&device->list, &group->devices);
750 	if (group->domain)
751 		ret = __iommu_attach_device(group->domain, dev);
752 	mutex_unlock(&group->mutex);
753 	if (ret)
754 		goto err_put_group;
755 
756 	/* Notify any listeners about change to group. */
757 	blocking_notifier_call_chain(&group->notifier,
758 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
759 
760 	trace_add_device_to_group(group->id, dev);
761 
762 	dev_info(dev, "Adding to iommu group %d\n", group->id);
763 
764 	return 0;
765 
766 err_put_group:
767 	mutex_lock(&group->mutex);
768 	list_del(&device->list);
769 	mutex_unlock(&group->mutex);
770 	dev->iommu_group = NULL;
771 	kobject_put(group->devices_kobj);
772 	sysfs_remove_link(group->devices_kobj, device->name);
773 err_free_name:
774 	kfree(device->name);
775 err_remove_link:
776 	sysfs_remove_link(&dev->kobj, "iommu_group");
777 err_free_device:
778 	kfree(device);
779 	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
780 	return ret;
781 }
782 EXPORT_SYMBOL_GPL(iommu_group_add_device);
783 
784 /**
785  * iommu_group_remove_device - remove a device from it's current group
786  * @dev: device to be removed
787  *
788  * This function is called by an iommu driver to remove the device from
789  * it's current group.  This decrements the iommu group reference count.
790  */
791 void iommu_group_remove_device(struct device *dev)
792 {
793 	struct iommu_group *group = dev->iommu_group;
794 	struct group_device *tmp_device, *device = NULL;
795 
796 	dev_info(dev, "Removing from iommu group %d\n", group->id);
797 
798 	/* Pre-notify listeners that a device is being removed. */
799 	blocking_notifier_call_chain(&group->notifier,
800 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
801 
802 	mutex_lock(&group->mutex);
803 	list_for_each_entry(tmp_device, &group->devices, list) {
804 		if (tmp_device->dev == dev) {
805 			device = tmp_device;
806 			list_del(&device->list);
807 			break;
808 		}
809 	}
810 	mutex_unlock(&group->mutex);
811 
812 	if (!device)
813 		return;
814 
815 	sysfs_remove_link(group->devices_kobj, device->name);
816 	sysfs_remove_link(&dev->kobj, "iommu_group");
817 
818 	trace_remove_device_from_group(group->id, dev);
819 
820 	kfree(device->name);
821 	kfree(device);
822 	dev->iommu_group = NULL;
823 	kobject_put(group->devices_kobj);
824 }
825 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
826 
827 static int iommu_group_device_count(struct iommu_group *group)
828 {
829 	struct group_device *entry;
830 	int ret = 0;
831 
832 	list_for_each_entry(entry, &group->devices, list)
833 		ret++;
834 
835 	return ret;
836 }
837 
838 /**
839  * iommu_group_for_each_dev - iterate over each device in the group
840  * @group: the group
841  * @data: caller opaque data to be passed to callback function
842  * @fn: caller supplied callback function
843  *
844  * This function is called by group users to iterate over group devices.
845  * Callers should hold a reference count to the group during callback.
846  * The group->mutex is held across callbacks, which will block calls to
847  * iommu_group_add/remove_device.
848  */
849 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
850 				      int (*fn)(struct device *, void *))
851 {
852 	struct group_device *device;
853 	int ret = 0;
854 
855 	list_for_each_entry(device, &group->devices, list) {
856 		ret = fn(device->dev, data);
857 		if (ret)
858 			break;
859 	}
860 	return ret;
861 }
862 
863 
864 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
865 			     int (*fn)(struct device *, void *))
866 {
867 	int ret;
868 
869 	mutex_lock(&group->mutex);
870 	ret = __iommu_group_for_each_dev(group, data, fn);
871 	mutex_unlock(&group->mutex);
872 
873 	return ret;
874 }
875 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
876 
877 /**
878  * iommu_group_get - Return the group for a device and increment reference
879  * @dev: get the group that this device belongs to
880  *
881  * This function is called by iommu drivers and users to get the group
882  * for the specified device.  If found, the group is returned and the group
883  * reference in incremented, else NULL.
884  */
885 struct iommu_group *iommu_group_get(struct device *dev)
886 {
887 	struct iommu_group *group = dev->iommu_group;
888 
889 	if (group)
890 		kobject_get(group->devices_kobj);
891 
892 	return group;
893 }
894 EXPORT_SYMBOL_GPL(iommu_group_get);
895 
896 /**
897  * iommu_group_ref_get - Increment reference on a group
898  * @group: the group to use, must not be NULL
899  *
900  * This function is called by iommu drivers to take additional references on an
901  * existing group.  Returns the given group for convenience.
902  */
903 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
904 {
905 	kobject_get(group->devices_kobj);
906 	return group;
907 }
908 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
909 
910 /**
911  * iommu_group_put - Decrement group reference
912  * @group: the group to use
913  *
914  * This function is called by iommu drivers and users to release the
915  * iommu group.  Once the reference count is zero, the group is released.
916  */
917 void iommu_group_put(struct iommu_group *group)
918 {
919 	if (group)
920 		kobject_put(group->devices_kobj);
921 }
922 EXPORT_SYMBOL_GPL(iommu_group_put);
923 
924 /**
925  * iommu_group_register_notifier - Register a notifier for group changes
926  * @group: the group to watch
927  * @nb: notifier block to signal
928  *
929  * This function allows iommu group users to track changes in a group.
930  * See include/linux/iommu.h for actions sent via this notifier.  Caller
931  * should hold a reference to the group throughout notifier registration.
932  */
933 int iommu_group_register_notifier(struct iommu_group *group,
934 				  struct notifier_block *nb)
935 {
936 	return blocking_notifier_chain_register(&group->notifier, nb);
937 }
938 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
939 
940 /**
941  * iommu_group_unregister_notifier - Unregister a notifier
942  * @group: the group to watch
943  * @nb: notifier block to signal
944  *
945  * Unregister a previously registered group notifier block.
946  */
947 int iommu_group_unregister_notifier(struct iommu_group *group,
948 				    struct notifier_block *nb)
949 {
950 	return blocking_notifier_chain_unregister(&group->notifier, nb);
951 }
952 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
953 
954 /**
955  * iommu_register_device_fault_handler() - Register a device fault handler
956  * @dev: the device
957  * @handler: the fault handler
958  * @data: private data passed as argument to the handler
959  *
960  * When an IOMMU fault event is received, this handler gets called with the
961  * fault event and data as argument. The handler should return 0 on success. If
962  * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
963  * complete the fault by calling iommu_page_response() with one of the following
964  * response code:
965  * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
966  * - IOMMU_PAGE_RESP_INVALID: terminate the fault
967  * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
968  *   page faults if possible.
969  *
970  * Return 0 if the fault handler was installed successfully, or an error.
971  */
972 int iommu_register_device_fault_handler(struct device *dev,
973 					iommu_dev_fault_handler_t handler,
974 					void *data)
975 {
976 	struct dev_iommu *param = dev->iommu;
977 	int ret = 0;
978 
979 	if (!param)
980 		return -EINVAL;
981 
982 	mutex_lock(&param->lock);
983 	/* Only allow one fault handler registered for each device */
984 	if (param->fault_param) {
985 		ret = -EBUSY;
986 		goto done_unlock;
987 	}
988 
989 	get_device(dev);
990 	param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
991 	if (!param->fault_param) {
992 		put_device(dev);
993 		ret = -ENOMEM;
994 		goto done_unlock;
995 	}
996 	param->fault_param->handler = handler;
997 	param->fault_param->data = data;
998 	mutex_init(&param->fault_param->lock);
999 	INIT_LIST_HEAD(&param->fault_param->faults);
1000 
1001 done_unlock:
1002 	mutex_unlock(&param->lock);
1003 
1004 	return ret;
1005 }
1006 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1007 
1008 /**
1009  * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1010  * @dev: the device
1011  *
1012  * Remove the device fault handler installed with
1013  * iommu_register_device_fault_handler().
1014  *
1015  * Return 0 on success, or an error.
1016  */
1017 int iommu_unregister_device_fault_handler(struct device *dev)
1018 {
1019 	struct dev_iommu *param = dev->iommu;
1020 	int ret = 0;
1021 
1022 	if (!param)
1023 		return -EINVAL;
1024 
1025 	mutex_lock(&param->lock);
1026 
1027 	if (!param->fault_param)
1028 		goto unlock;
1029 
1030 	/* we cannot unregister handler if there are pending faults */
1031 	if (!list_empty(&param->fault_param->faults)) {
1032 		ret = -EBUSY;
1033 		goto unlock;
1034 	}
1035 
1036 	kfree(param->fault_param);
1037 	param->fault_param = NULL;
1038 	put_device(dev);
1039 unlock:
1040 	mutex_unlock(&param->lock);
1041 
1042 	return ret;
1043 }
1044 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1045 
1046 /**
1047  * iommu_report_device_fault() - Report fault event to device driver
1048  * @dev: the device
1049  * @evt: fault event data
1050  *
1051  * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1052  * handler. When this function fails and the fault is recoverable, it is the
1053  * caller's responsibility to complete the fault.
1054  *
1055  * Return 0 on success, or an error.
1056  */
1057 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1058 {
1059 	struct dev_iommu *param = dev->iommu;
1060 	struct iommu_fault_event *evt_pending = NULL;
1061 	struct iommu_fault_param *fparam;
1062 	int ret = 0;
1063 
1064 	if (!param || !evt)
1065 		return -EINVAL;
1066 
1067 	/* we only report device fault if there is a handler registered */
1068 	mutex_lock(&param->lock);
1069 	fparam = param->fault_param;
1070 	if (!fparam || !fparam->handler) {
1071 		ret = -EINVAL;
1072 		goto done_unlock;
1073 	}
1074 
1075 	if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1076 	    (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1077 		evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1078 				      GFP_KERNEL);
1079 		if (!evt_pending) {
1080 			ret = -ENOMEM;
1081 			goto done_unlock;
1082 		}
1083 		mutex_lock(&fparam->lock);
1084 		list_add_tail(&evt_pending->list, &fparam->faults);
1085 		mutex_unlock(&fparam->lock);
1086 	}
1087 
1088 	ret = fparam->handler(&evt->fault, fparam->data);
1089 	if (ret && evt_pending) {
1090 		mutex_lock(&fparam->lock);
1091 		list_del(&evt_pending->list);
1092 		mutex_unlock(&fparam->lock);
1093 		kfree(evt_pending);
1094 	}
1095 done_unlock:
1096 	mutex_unlock(&param->lock);
1097 	return ret;
1098 }
1099 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1100 
1101 int iommu_page_response(struct device *dev,
1102 			struct iommu_page_response *msg)
1103 {
1104 	bool pasid_valid;
1105 	int ret = -EINVAL;
1106 	struct iommu_fault_event *evt;
1107 	struct iommu_fault_page_request *prm;
1108 	struct dev_iommu *param = dev->iommu;
1109 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1110 
1111 	if (!domain || !domain->ops->page_response)
1112 		return -ENODEV;
1113 
1114 	if (!param || !param->fault_param)
1115 		return -EINVAL;
1116 
1117 	if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1118 	    msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1119 		return -EINVAL;
1120 
1121 	/* Only send response if there is a fault report pending */
1122 	mutex_lock(&param->fault_param->lock);
1123 	if (list_empty(&param->fault_param->faults)) {
1124 		dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1125 		goto done_unlock;
1126 	}
1127 	/*
1128 	 * Check if we have a matching page request pending to respond,
1129 	 * otherwise return -EINVAL
1130 	 */
1131 	list_for_each_entry(evt, &param->fault_param->faults, list) {
1132 		prm = &evt->fault.prm;
1133 		pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1134 
1135 		if ((pasid_valid && prm->pasid != msg->pasid) ||
1136 		    prm->grpid != msg->grpid)
1137 			continue;
1138 
1139 		/* Sanitize the reply */
1140 		msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1141 
1142 		ret = domain->ops->page_response(dev, evt, msg);
1143 		list_del(&evt->list);
1144 		kfree(evt);
1145 		break;
1146 	}
1147 
1148 done_unlock:
1149 	mutex_unlock(&param->fault_param->lock);
1150 	return ret;
1151 }
1152 EXPORT_SYMBOL_GPL(iommu_page_response);
1153 
1154 /**
1155  * iommu_group_id - Return ID for a group
1156  * @group: the group to ID
1157  *
1158  * Return the unique ID for the group matching the sysfs group number.
1159  */
1160 int iommu_group_id(struct iommu_group *group)
1161 {
1162 	return group->id;
1163 }
1164 EXPORT_SYMBOL_GPL(iommu_group_id);
1165 
1166 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1167 					       unsigned long *devfns);
1168 
1169 /*
1170  * To consider a PCI device isolated, we require ACS to support Source
1171  * Validation, Request Redirection, Completer Redirection, and Upstream
1172  * Forwarding.  This effectively means that devices cannot spoof their
1173  * requester ID, requests and completions cannot be redirected, and all
1174  * transactions are forwarded upstream, even as it passes through a
1175  * bridge where the target device is downstream.
1176  */
1177 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1178 
1179 /*
1180  * For multifunction devices which are not isolated from each other, find
1181  * all the other non-isolated functions and look for existing groups.  For
1182  * each function, we also need to look for aliases to or from other devices
1183  * that may already have a group.
1184  */
1185 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1186 							unsigned long *devfns)
1187 {
1188 	struct pci_dev *tmp = NULL;
1189 	struct iommu_group *group;
1190 
1191 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1192 		return NULL;
1193 
1194 	for_each_pci_dev(tmp) {
1195 		if (tmp == pdev || tmp->bus != pdev->bus ||
1196 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1197 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1198 			continue;
1199 
1200 		group = get_pci_alias_group(tmp, devfns);
1201 		if (group) {
1202 			pci_dev_put(tmp);
1203 			return group;
1204 		}
1205 	}
1206 
1207 	return NULL;
1208 }
1209 
1210 /*
1211  * Look for aliases to or from the given device for existing groups. DMA
1212  * aliases are only supported on the same bus, therefore the search
1213  * space is quite small (especially since we're really only looking at pcie
1214  * device, and therefore only expect multiple slots on the root complex or
1215  * downstream switch ports).  It's conceivable though that a pair of
1216  * multifunction devices could have aliases between them that would cause a
1217  * loop.  To prevent this, we use a bitmap to track where we've been.
1218  */
1219 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1220 					       unsigned long *devfns)
1221 {
1222 	struct pci_dev *tmp = NULL;
1223 	struct iommu_group *group;
1224 
1225 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1226 		return NULL;
1227 
1228 	group = iommu_group_get(&pdev->dev);
1229 	if (group)
1230 		return group;
1231 
1232 	for_each_pci_dev(tmp) {
1233 		if (tmp == pdev || tmp->bus != pdev->bus)
1234 			continue;
1235 
1236 		/* We alias them or they alias us */
1237 		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1238 			group = get_pci_alias_group(tmp, devfns);
1239 			if (group) {
1240 				pci_dev_put(tmp);
1241 				return group;
1242 			}
1243 
1244 			group = get_pci_function_alias_group(tmp, devfns);
1245 			if (group) {
1246 				pci_dev_put(tmp);
1247 				return group;
1248 			}
1249 		}
1250 	}
1251 
1252 	return NULL;
1253 }
1254 
1255 struct group_for_pci_data {
1256 	struct pci_dev *pdev;
1257 	struct iommu_group *group;
1258 };
1259 
1260 /*
1261  * DMA alias iterator callback, return the last seen device.  Stop and return
1262  * the IOMMU group if we find one along the way.
1263  */
1264 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1265 {
1266 	struct group_for_pci_data *data = opaque;
1267 
1268 	data->pdev = pdev;
1269 	data->group = iommu_group_get(&pdev->dev);
1270 
1271 	return data->group != NULL;
1272 }
1273 
1274 /*
1275  * Generic device_group call-back function. It just allocates one
1276  * iommu-group per device.
1277  */
1278 struct iommu_group *generic_device_group(struct device *dev)
1279 {
1280 	return iommu_group_alloc();
1281 }
1282 EXPORT_SYMBOL_GPL(generic_device_group);
1283 
1284 /*
1285  * Use standard PCI bus topology, isolation features, and DMA alias quirks
1286  * to find or create an IOMMU group for a device.
1287  */
1288 struct iommu_group *pci_device_group(struct device *dev)
1289 {
1290 	struct pci_dev *pdev = to_pci_dev(dev);
1291 	struct group_for_pci_data data;
1292 	struct pci_bus *bus;
1293 	struct iommu_group *group = NULL;
1294 	u64 devfns[4] = { 0 };
1295 
1296 	if (WARN_ON(!dev_is_pci(dev)))
1297 		return ERR_PTR(-EINVAL);
1298 
1299 	/*
1300 	 * Find the upstream DMA alias for the device.  A device must not
1301 	 * be aliased due to topology in order to have its own IOMMU group.
1302 	 * If we find an alias along the way that already belongs to a
1303 	 * group, use it.
1304 	 */
1305 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1306 		return data.group;
1307 
1308 	pdev = data.pdev;
1309 
1310 	/*
1311 	 * Continue upstream from the point of minimum IOMMU granularity
1312 	 * due to aliases to the point where devices are protected from
1313 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1314 	 * group, use it.
1315 	 */
1316 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1317 		if (!bus->self)
1318 			continue;
1319 
1320 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1321 			break;
1322 
1323 		pdev = bus->self;
1324 
1325 		group = iommu_group_get(&pdev->dev);
1326 		if (group)
1327 			return group;
1328 	}
1329 
1330 	/*
1331 	 * Look for existing groups on device aliases.  If we alias another
1332 	 * device or another device aliases us, use the same group.
1333 	 */
1334 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1335 	if (group)
1336 		return group;
1337 
1338 	/*
1339 	 * Look for existing groups on non-isolated functions on the same
1340 	 * slot and aliases of those funcions, if any.  No need to clear
1341 	 * the search bitmap, the tested devfns are still valid.
1342 	 */
1343 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1344 	if (group)
1345 		return group;
1346 
1347 	/* No shared group found, allocate new */
1348 	return iommu_group_alloc();
1349 }
1350 EXPORT_SYMBOL_GPL(pci_device_group);
1351 
1352 /* Get the IOMMU group for device on fsl-mc bus */
1353 struct iommu_group *fsl_mc_device_group(struct device *dev)
1354 {
1355 	struct device *cont_dev = fsl_mc_cont_dev(dev);
1356 	struct iommu_group *group;
1357 
1358 	group = iommu_group_get(cont_dev);
1359 	if (!group)
1360 		group = iommu_group_alloc();
1361 	return group;
1362 }
1363 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1364 
1365 /**
1366  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1367  * @dev: target device
1368  *
1369  * This function is intended to be called by IOMMU drivers and extended to
1370  * support common, bus-defined algorithms when determining or creating the
1371  * IOMMU group for a device.  On success, the caller will hold a reference
1372  * to the returned IOMMU group, which will already include the provided
1373  * device.  The reference should be released with iommu_group_put().
1374  */
1375 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1376 {
1377 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1378 	struct iommu_group *group;
1379 	int ret;
1380 
1381 	group = iommu_group_get(dev);
1382 	if (group)
1383 		return group;
1384 
1385 	if (!ops)
1386 		return ERR_PTR(-EINVAL);
1387 
1388 	group = ops->device_group(dev);
1389 	if (WARN_ON_ONCE(group == NULL))
1390 		return ERR_PTR(-EINVAL);
1391 
1392 	if (IS_ERR(group))
1393 		return group;
1394 
1395 	/*
1396 	 * Try to allocate a default domain - needs support from the
1397 	 * IOMMU driver.
1398 	 */
1399 	if (!group->default_domain) {
1400 		struct iommu_domain *dom;
1401 
1402 		dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1403 		if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1404 			dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1405 			if (dom) {
1406 				dev_warn(dev,
1407 					 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1408 					 iommu_def_domain_type);
1409 			}
1410 		}
1411 
1412 		group->default_domain = dom;
1413 		if (!group->domain)
1414 			group->domain = dom;
1415 
1416 		if (dom && !iommu_dma_strict) {
1417 			int attr = 1;
1418 			iommu_domain_set_attr(dom,
1419 					      DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1420 					      &attr);
1421 		}
1422 	}
1423 
1424 	ret = iommu_group_add_device(group, dev);
1425 	if (ret) {
1426 		iommu_group_put(group);
1427 		return ERR_PTR(ret);
1428 	}
1429 
1430 	return group;
1431 }
1432 EXPORT_SYMBOL_GPL(iommu_group_get_for_dev);
1433 
1434 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1435 {
1436 	return group->default_domain;
1437 }
1438 
1439 static int add_iommu_group(struct device *dev, void *data)
1440 {
1441 	int ret = iommu_probe_device(dev);
1442 
1443 	/*
1444 	 * We ignore -ENODEV errors for now, as they just mean that the
1445 	 * device is not translated by an IOMMU. We still care about
1446 	 * other errors and fail to initialize when they happen.
1447 	 */
1448 	if (ret == -ENODEV)
1449 		ret = 0;
1450 
1451 	return ret;
1452 }
1453 
1454 static int remove_iommu_group(struct device *dev, void *data)
1455 {
1456 	iommu_release_device(dev);
1457 
1458 	return 0;
1459 }
1460 
1461 static int iommu_bus_notifier(struct notifier_block *nb,
1462 			      unsigned long action, void *data)
1463 {
1464 	unsigned long group_action = 0;
1465 	struct device *dev = data;
1466 	struct iommu_group *group;
1467 
1468 	/*
1469 	 * ADD/DEL call into iommu driver ops if provided, which may
1470 	 * result in ADD/DEL notifiers to group->notifier
1471 	 */
1472 	if (action == BUS_NOTIFY_ADD_DEVICE) {
1473 		int ret;
1474 
1475 		ret = iommu_probe_device(dev);
1476 		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1477 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1478 		iommu_release_device(dev);
1479 		return NOTIFY_OK;
1480 	}
1481 
1482 	/*
1483 	 * Remaining BUS_NOTIFYs get filtered and republished to the
1484 	 * group, if anyone is listening
1485 	 */
1486 	group = iommu_group_get(dev);
1487 	if (!group)
1488 		return 0;
1489 
1490 	switch (action) {
1491 	case BUS_NOTIFY_BIND_DRIVER:
1492 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1493 		break;
1494 	case BUS_NOTIFY_BOUND_DRIVER:
1495 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1496 		break;
1497 	case BUS_NOTIFY_UNBIND_DRIVER:
1498 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1499 		break;
1500 	case BUS_NOTIFY_UNBOUND_DRIVER:
1501 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1502 		break;
1503 	}
1504 
1505 	if (group_action)
1506 		blocking_notifier_call_chain(&group->notifier,
1507 					     group_action, dev);
1508 
1509 	iommu_group_put(group);
1510 	return 0;
1511 }
1512 
1513 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1514 {
1515 	int err;
1516 	struct notifier_block *nb;
1517 
1518 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1519 	if (!nb)
1520 		return -ENOMEM;
1521 
1522 	nb->notifier_call = iommu_bus_notifier;
1523 
1524 	err = bus_register_notifier(bus, nb);
1525 	if (err)
1526 		goto out_free;
1527 
1528 	err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1529 	if (err)
1530 		goto out_err;
1531 
1532 
1533 	return 0;
1534 
1535 out_err:
1536 	/* Clean up */
1537 	bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1538 	bus_unregister_notifier(bus, nb);
1539 
1540 out_free:
1541 	kfree(nb);
1542 
1543 	return err;
1544 }
1545 
1546 /**
1547  * bus_set_iommu - set iommu-callbacks for the bus
1548  * @bus: bus.
1549  * @ops: the callbacks provided by the iommu-driver
1550  *
1551  * This function is called by an iommu driver to set the iommu methods
1552  * used for a particular bus. Drivers for devices on that bus can use
1553  * the iommu-api after these ops are registered.
1554  * This special function is needed because IOMMUs are usually devices on
1555  * the bus itself, so the iommu drivers are not initialized when the bus
1556  * is set up. With this function the iommu-driver can set the iommu-ops
1557  * afterwards.
1558  */
1559 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1560 {
1561 	int err;
1562 
1563 	if (ops == NULL) {
1564 		bus->iommu_ops = NULL;
1565 		return 0;
1566 	}
1567 
1568 	if (bus->iommu_ops != NULL)
1569 		return -EBUSY;
1570 
1571 	bus->iommu_ops = ops;
1572 
1573 	/* Do IOMMU specific setup for this bus-type */
1574 	err = iommu_bus_init(bus, ops);
1575 	if (err)
1576 		bus->iommu_ops = NULL;
1577 
1578 	return err;
1579 }
1580 EXPORT_SYMBOL_GPL(bus_set_iommu);
1581 
1582 bool iommu_present(struct bus_type *bus)
1583 {
1584 	return bus->iommu_ops != NULL;
1585 }
1586 EXPORT_SYMBOL_GPL(iommu_present);
1587 
1588 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1589 {
1590 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1591 		return false;
1592 
1593 	return bus->iommu_ops->capable(cap);
1594 }
1595 EXPORT_SYMBOL_GPL(iommu_capable);
1596 
1597 /**
1598  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1599  * @domain: iommu domain
1600  * @handler: fault handler
1601  * @token: user data, will be passed back to the fault handler
1602  *
1603  * This function should be used by IOMMU users which want to be notified
1604  * whenever an IOMMU fault happens.
1605  *
1606  * The fault handler itself should return 0 on success, and an appropriate
1607  * error code otherwise.
1608  */
1609 void iommu_set_fault_handler(struct iommu_domain *domain,
1610 					iommu_fault_handler_t handler,
1611 					void *token)
1612 {
1613 	BUG_ON(!domain);
1614 
1615 	domain->handler = handler;
1616 	domain->handler_token = token;
1617 }
1618 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1619 
1620 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1621 						 unsigned type)
1622 {
1623 	struct iommu_domain *domain;
1624 
1625 	if (bus == NULL || bus->iommu_ops == NULL)
1626 		return NULL;
1627 
1628 	domain = bus->iommu_ops->domain_alloc(type);
1629 	if (!domain)
1630 		return NULL;
1631 
1632 	domain->ops  = bus->iommu_ops;
1633 	domain->type = type;
1634 	/* Assume all sizes by default; the driver may override this later */
1635 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1636 
1637 	return domain;
1638 }
1639 
1640 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1641 {
1642 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1643 }
1644 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1645 
1646 void iommu_domain_free(struct iommu_domain *domain)
1647 {
1648 	domain->ops->domain_free(domain);
1649 }
1650 EXPORT_SYMBOL_GPL(iommu_domain_free);
1651 
1652 static int __iommu_attach_device(struct iommu_domain *domain,
1653 				 struct device *dev)
1654 {
1655 	int ret;
1656 	if ((domain->ops->is_attach_deferred != NULL) &&
1657 	    domain->ops->is_attach_deferred(domain, dev))
1658 		return 0;
1659 
1660 	if (unlikely(domain->ops->attach_dev == NULL))
1661 		return -ENODEV;
1662 
1663 	ret = domain->ops->attach_dev(domain, dev);
1664 	if (!ret)
1665 		trace_attach_device_to_domain(dev);
1666 	return ret;
1667 }
1668 
1669 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1670 {
1671 	struct iommu_group *group;
1672 	int ret;
1673 
1674 	group = iommu_group_get(dev);
1675 	if (!group)
1676 		return -ENODEV;
1677 
1678 	/*
1679 	 * Lock the group to make sure the device-count doesn't
1680 	 * change while we are attaching
1681 	 */
1682 	mutex_lock(&group->mutex);
1683 	ret = -EINVAL;
1684 	if (iommu_group_device_count(group) != 1)
1685 		goto out_unlock;
1686 
1687 	ret = __iommu_attach_group(domain, group);
1688 
1689 out_unlock:
1690 	mutex_unlock(&group->mutex);
1691 	iommu_group_put(group);
1692 
1693 	return ret;
1694 }
1695 EXPORT_SYMBOL_GPL(iommu_attach_device);
1696 
1697 int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
1698 			   struct iommu_cache_invalidate_info *inv_info)
1699 {
1700 	if (unlikely(!domain->ops->cache_invalidate))
1701 		return -ENODEV;
1702 
1703 	return domain->ops->cache_invalidate(domain, dev, inv_info);
1704 }
1705 EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
1706 
1707 int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1708 			   struct device *dev, struct iommu_gpasid_bind_data *data)
1709 {
1710 	if (unlikely(!domain->ops->sva_bind_gpasid))
1711 		return -ENODEV;
1712 
1713 	return domain->ops->sva_bind_gpasid(domain, dev, data);
1714 }
1715 EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
1716 
1717 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
1718 			     ioasid_t pasid)
1719 {
1720 	if (unlikely(!domain->ops->sva_unbind_gpasid))
1721 		return -ENODEV;
1722 
1723 	return domain->ops->sva_unbind_gpasid(dev, pasid);
1724 }
1725 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
1726 
1727 static void __iommu_detach_device(struct iommu_domain *domain,
1728 				  struct device *dev)
1729 {
1730 	if ((domain->ops->is_attach_deferred != NULL) &&
1731 	    domain->ops->is_attach_deferred(domain, dev))
1732 		return;
1733 
1734 	if (unlikely(domain->ops->detach_dev == NULL))
1735 		return;
1736 
1737 	domain->ops->detach_dev(domain, dev);
1738 	trace_detach_device_from_domain(dev);
1739 }
1740 
1741 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1742 {
1743 	struct iommu_group *group;
1744 
1745 	group = iommu_group_get(dev);
1746 	if (!group)
1747 		return;
1748 
1749 	mutex_lock(&group->mutex);
1750 	if (iommu_group_device_count(group) != 1) {
1751 		WARN_ON(1);
1752 		goto out_unlock;
1753 	}
1754 
1755 	__iommu_detach_group(domain, group);
1756 
1757 out_unlock:
1758 	mutex_unlock(&group->mutex);
1759 	iommu_group_put(group);
1760 }
1761 EXPORT_SYMBOL_GPL(iommu_detach_device);
1762 
1763 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1764 {
1765 	struct iommu_domain *domain;
1766 	struct iommu_group *group;
1767 
1768 	group = iommu_group_get(dev);
1769 	if (!group)
1770 		return NULL;
1771 
1772 	domain = group->domain;
1773 
1774 	iommu_group_put(group);
1775 
1776 	return domain;
1777 }
1778 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1779 
1780 /*
1781  * For IOMMU_DOMAIN_DMA implementations which already provide their own
1782  * guarantees that the group and its default domain are valid and correct.
1783  */
1784 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1785 {
1786 	return dev->iommu_group->default_domain;
1787 }
1788 
1789 /*
1790  * IOMMU groups are really the natural working unit of the IOMMU, but
1791  * the IOMMU API works on domains and devices.  Bridge that gap by
1792  * iterating over the devices in a group.  Ideally we'd have a single
1793  * device which represents the requestor ID of the group, but we also
1794  * allow IOMMU drivers to create policy defined minimum sets, where
1795  * the physical hardware may be able to distiguish members, but we
1796  * wish to group them at a higher level (ex. untrusted multi-function
1797  * PCI devices).  Thus we attach each device.
1798  */
1799 static int iommu_group_do_attach_device(struct device *dev, void *data)
1800 {
1801 	struct iommu_domain *domain = data;
1802 
1803 	return __iommu_attach_device(domain, dev);
1804 }
1805 
1806 static int __iommu_attach_group(struct iommu_domain *domain,
1807 				struct iommu_group *group)
1808 {
1809 	int ret;
1810 
1811 	if (group->default_domain && group->domain != group->default_domain)
1812 		return -EBUSY;
1813 
1814 	ret = __iommu_group_for_each_dev(group, domain,
1815 					 iommu_group_do_attach_device);
1816 	if (ret == 0)
1817 		group->domain = domain;
1818 
1819 	return ret;
1820 }
1821 
1822 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1823 {
1824 	int ret;
1825 
1826 	mutex_lock(&group->mutex);
1827 	ret = __iommu_attach_group(domain, group);
1828 	mutex_unlock(&group->mutex);
1829 
1830 	return ret;
1831 }
1832 EXPORT_SYMBOL_GPL(iommu_attach_group);
1833 
1834 static int iommu_group_do_detach_device(struct device *dev, void *data)
1835 {
1836 	struct iommu_domain *domain = data;
1837 
1838 	__iommu_detach_device(domain, dev);
1839 
1840 	return 0;
1841 }
1842 
1843 static void __iommu_detach_group(struct iommu_domain *domain,
1844 				 struct iommu_group *group)
1845 {
1846 	int ret;
1847 
1848 	if (!group->default_domain) {
1849 		__iommu_group_for_each_dev(group, domain,
1850 					   iommu_group_do_detach_device);
1851 		group->domain = NULL;
1852 		return;
1853 	}
1854 
1855 	if (group->domain == group->default_domain)
1856 		return;
1857 
1858 	/* Detach by re-attaching to the default domain */
1859 	ret = __iommu_group_for_each_dev(group, group->default_domain,
1860 					 iommu_group_do_attach_device);
1861 	if (ret != 0)
1862 		WARN_ON(1);
1863 	else
1864 		group->domain = group->default_domain;
1865 }
1866 
1867 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1868 {
1869 	mutex_lock(&group->mutex);
1870 	__iommu_detach_group(domain, group);
1871 	mutex_unlock(&group->mutex);
1872 }
1873 EXPORT_SYMBOL_GPL(iommu_detach_group);
1874 
1875 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1876 {
1877 	if (unlikely(domain->ops->iova_to_phys == NULL))
1878 		return 0;
1879 
1880 	return domain->ops->iova_to_phys(domain, iova);
1881 }
1882 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1883 
1884 static size_t iommu_pgsize(struct iommu_domain *domain,
1885 			   unsigned long addr_merge, size_t size)
1886 {
1887 	unsigned int pgsize_idx;
1888 	size_t pgsize;
1889 
1890 	/* Max page size that still fits into 'size' */
1891 	pgsize_idx = __fls(size);
1892 
1893 	/* need to consider alignment requirements ? */
1894 	if (likely(addr_merge)) {
1895 		/* Max page size allowed by address */
1896 		unsigned int align_pgsize_idx = __ffs(addr_merge);
1897 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1898 	}
1899 
1900 	/* build a mask of acceptable page sizes */
1901 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
1902 
1903 	/* throw away page sizes not supported by the hardware */
1904 	pgsize &= domain->pgsize_bitmap;
1905 
1906 	/* make sure we're still sane */
1907 	BUG_ON(!pgsize);
1908 
1909 	/* pick the biggest page */
1910 	pgsize_idx = __fls(pgsize);
1911 	pgsize = 1UL << pgsize_idx;
1912 
1913 	return pgsize;
1914 }
1915 
1916 int __iommu_map(struct iommu_domain *domain, unsigned long iova,
1917 	      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1918 {
1919 	const struct iommu_ops *ops = domain->ops;
1920 	unsigned long orig_iova = iova;
1921 	unsigned int min_pagesz;
1922 	size_t orig_size = size;
1923 	phys_addr_t orig_paddr = paddr;
1924 	int ret = 0;
1925 
1926 	if (unlikely(ops->map == NULL ||
1927 		     domain->pgsize_bitmap == 0UL))
1928 		return -ENODEV;
1929 
1930 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1931 		return -EINVAL;
1932 
1933 	/* find out the minimum page size supported */
1934 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1935 
1936 	/*
1937 	 * both the virtual address and the physical one, as well as
1938 	 * the size of the mapping, must be aligned (at least) to the
1939 	 * size of the smallest page supported by the hardware
1940 	 */
1941 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1942 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1943 		       iova, &paddr, size, min_pagesz);
1944 		return -EINVAL;
1945 	}
1946 
1947 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1948 
1949 	while (size) {
1950 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1951 
1952 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1953 			 iova, &paddr, pgsize);
1954 		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
1955 
1956 		if (ret)
1957 			break;
1958 
1959 		iova += pgsize;
1960 		paddr += pgsize;
1961 		size -= pgsize;
1962 	}
1963 
1964 	if (ops->iotlb_sync_map)
1965 		ops->iotlb_sync_map(domain);
1966 
1967 	/* unroll mapping in case something went wrong */
1968 	if (ret)
1969 		iommu_unmap(domain, orig_iova, orig_size - size);
1970 	else
1971 		trace_map(orig_iova, orig_paddr, orig_size);
1972 
1973 	return ret;
1974 }
1975 
1976 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1977 	      phys_addr_t paddr, size_t size, int prot)
1978 {
1979 	might_sleep();
1980 	return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
1981 }
1982 EXPORT_SYMBOL_GPL(iommu_map);
1983 
1984 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
1985 	      phys_addr_t paddr, size_t size, int prot)
1986 {
1987 	return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
1988 }
1989 EXPORT_SYMBOL_GPL(iommu_map_atomic);
1990 
1991 static size_t __iommu_unmap(struct iommu_domain *domain,
1992 			    unsigned long iova, size_t size,
1993 			    struct iommu_iotlb_gather *iotlb_gather)
1994 {
1995 	const struct iommu_ops *ops = domain->ops;
1996 	size_t unmapped_page, unmapped = 0;
1997 	unsigned long orig_iova = iova;
1998 	unsigned int min_pagesz;
1999 
2000 	if (unlikely(ops->unmap == NULL ||
2001 		     domain->pgsize_bitmap == 0UL))
2002 		return 0;
2003 
2004 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2005 		return 0;
2006 
2007 	/* find out the minimum page size supported */
2008 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2009 
2010 	/*
2011 	 * The virtual address, as well as the size of the mapping, must be
2012 	 * aligned (at least) to the size of the smallest page supported
2013 	 * by the hardware
2014 	 */
2015 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
2016 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2017 		       iova, size, min_pagesz);
2018 		return 0;
2019 	}
2020 
2021 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2022 
2023 	/*
2024 	 * Keep iterating until we either unmap 'size' bytes (or more)
2025 	 * or we hit an area that isn't mapped.
2026 	 */
2027 	while (unmapped < size) {
2028 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2029 
2030 		unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2031 		if (!unmapped_page)
2032 			break;
2033 
2034 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2035 			 iova, unmapped_page);
2036 
2037 		iova += unmapped_page;
2038 		unmapped += unmapped_page;
2039 	}
2040 
2041 	trace_unmap(orig_iova, size, unmapped);
2042 	return unmapped;
2043 }
2044 
2045 size_t iommu_unmap(struct iommu_domain *domain,
2046 		   unsigned long iova, size_t size)
2047 {
2048 	struct iommu_iotlb_gather iotlb_gather;
2049 	size_t ret;
2050 
2051 	iommu_iotlb_gather_init(&iotlb_gather);
2052 	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2053 	iommu_tlb_sync(domain, &iotlb_gather);
2054 
2055 	return ret;
2056 }
2057 EXPORT_SYMBOL_GPL(iommu_unmap);
2058 
2059 size_t iommu_unmap_fast(struct iommu_domain *domain,
2060 			unsigned long iova, size_t size,
2061 			struct iommu_iotlb_gather *iotlb_gather)
2062 {
2063 	return __iommu_unmap(domain, iova, size, iotlb_gather);
2064 }
2065 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2066 
2067 size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2068 		    struct scatterlist *sg, unsigned int nents, int prot,
2069 		    gfp_t gfp)
2070 {
2071 	size_t len = 0, mapped = 0;
2072 	phys_addr_t start;
2073 	unsigned int i = 0;
2074 	int ret;
2075 
2076 	while (i <= nents) {
2077 		phys_addr_t s_phys = sg_phys(sg);
2078 
2079 		if (len && s_phys != start + len) {
2080 			ret = __iommu_map(domain, iova + mapped, start,
2081 					len, prot, gfp);
2082 
2083 			if (ret)
2084 				goto out_err;
2085 
2086 			mapped += len;
2087 			len = 0;
2088 		}
2089 
2090 		if (len) {
2091 			len += sg->length;
2092 		} else {
2093 			len = sg->length;
2094 			start = s_phys;
2095 		}
2096 
2097 		if (++i < nents)
2098 			sg = sg_next(sg);
2099 	}
2100 
2101 	return mapped;
2102 
2103 out_err:
2104 	/* undo mappings already done */
2105 	iommu_unmap(domain, iova, mapped);
2106 
2107 	return 0;
2108 
2109 }
2110 
2111 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2112 		    struct scatterlist *sg, unsigned int nents, int prot)
2113 {
2114 	might_sleep();
2115 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2116 }
2117 EXPORT_SYMBOL_GPL(iommu_map_sg);
2118 
2119 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2120 		    struct scatterlist *sg, unsigned int nents, int prot)
2121 {
2122 	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2123 }
2124 EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2125 
2126 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2127 			       phys_addr_t paddr, u64 size, int prot)
2128 {
2129 	if (unlikely(domain->ops->domain_window_enable == NULL))
2130 		return -ENODEV;
2131 
2132 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2133 						 prot);
2134 }
2135 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2136 
2137 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2138 {
2139 	if (unlikely(domain->ops->domain_window_disable == NULL))
2140 		return;
2141 
2142 	return domain->ops->domain_window_disable(domain, wnd_nr);
2143 }
2144 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2145 
2146 /**
2147  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2148  * @domain: the iommu domain where the fault has happened
2149  * @dev: the device where the fault has happened
2150  * @iova: the faulting address
2151  * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2152  *
2153  * This function should be called by the low-level IOMMU implementations
2154  * whenever IOMMU faults happen, to allow high-level users, that are
2155  * interested in such events, to know about them.
2156  *
2157  * This event may be useful for several possible use cases:
2158  * - mere logging of the event
2159  * - dynamic TLB/PTE loading
2160  * - if restarting of the faulting device is required
2161  *
2162  * Returns 0 on success and an appropriate error code otherwise (if dynamic
2163  * PTE/TLB loading will one day be supported, implementations will be able
2164  * to tell whether it succeeded or not according to this return value).
2165  *
2166  * Specifically, -ENOSYS is returned if a fault handler isn't installed
2167  * (though fault handlers can also return -ENOSYS, in case they want to
2168  * elicit the default behavior of the IOMMU drivers).
2169  */
2170 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2171 		       unsigned long iova, int flags)
2172 {
2173 	int ret = -ENOSYS;
2174 
2175 	/*
2176 	 * if upper layers showed interest and installed a fault handler,
2177 	 * invoke it.
2178 	 */
2179 	if (domain->handler)
2180 		ret = domain->handler(domain, dev, iova, flags,
2181 						domain->handler_token);
2182 
2183 	trace_io_page_fault(dev, iova, flags);
2184 	return ret;
2185 }
2186 EXPORT_SYMBOL_GPL(report_iommu_fault);
2187 
2188 static int __init iommu_init(void)
2189 {
2190 	iommu_group_kset = kset_create_and_add("iommu_groups",
2191 					       NULL, kernel_kobj);
2192 	BUG_ON(!iommu_group_kset);
2193 
2194 	iommu_debugfs_setup();
2195 
2196 	return 0;
2197 }
2198 core_initcall(iommu_init);
2199 
2200 int iommu_domain_get_attr(struct iommu_domain *domain,
2201 			  enum iommu_attr attr, void *data)
2202 {
2203 	struct iommu_domain_geometry *geometry;
2204 	bool *paging;
2205 	int ret = 0;
2206 
2207 	switch (attr) {
2208 	case DOMAIN_ATTR_GEOMETRY:
2209 		geometry  = data;
2210 		*geometry = domain->geometry;
2211 
2212 		break;
2213 	case DOMAIN_ATTR_PAGING:
2214 		paging  = data;
2215 		*paging = (domain->pgsize_bitmap != 0UL);
2216 		break;
2217 	default:
2218 		if (!domain->ops->domain_get_attr)
2219 			return -EINVAL;
2220 
2221 		ret = domain->ops->domain_get_attr(domain, attr, data);
2222 	}
2223 
2224 	return ret;
2225 }
2226 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2227 
2228 int iommu_domain_set_attr(struct iommu_domain *domain,
2229 			  enum iommu_attr attr, void *data)
2230 {
2231 	int ret = 0;
2232 
2233 	switch (attr) {
2234 	default:
2235 		if (domain->ops->domain_set_attr == NULL)
2236 			return -EINVAL;
2237 
2238 		ret = domain->ops->domain_set_attr(domain, attr, data);
2239 	}
2240 
2241 	return ret;
2242 }
2243 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2244 
2245 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2246 {
2247 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2248 
2249 	if (ops && ops->get_resv_regions)
2250 		ops->get_resv_regions(dev, list);
2251 }
2252 
2253 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2254 {
2255 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2256 
2257 	if (ops && ops->put_resv_regions)
2258 		ops->put_resv_regions(dev, list);
2259 }
2260 
2261 /**
2262  * generic_iommu_put_resv_regions - Reserved region driver helper
2263  * @dev: device for which to free reserved regions
2264  * @list: reserved region list for device
2265  *
2266  * IOMMU drivers can use this to implement their .put_resv_regions() callback
2267  * for simple reservations. Memory allocated for each reserved region will be
2268  * freed. If an IOMMU driver allocates additional resources per region, it is
2269  * going to have to implement a custom callback.
2270  */
2271 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2272 {
2273 	struct iommu_resv_region *entry, *next;
2274 
2275 	list_for_each_entry_safe(entry, next, list, list)
2276 		kfree(entry);
2277 }
2278 EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2279 
2280 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2281 						  size_t length, int prot,
2282 						  enum iommu_resv_type type)
2283 {
2284 	struct iommu_resv_region *region;
2285 
2286 	region = kzalloc(sizeof(*region), GFP_KERNEL);
2287 	if (!region)
2288 		return NULL;
2289 
2290 	INIT_LIST_HEAD(&region->list);
2291 	region->start = start;
2292 	region->length = length;
2293 	region->prot = prot;
2294 	region->type = type;
2295 	return region;
2296 }
2297 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2298 
2299 static int
2300 request_default_domain_for_dev(struct device *dev, unsigned long type)
2301 {
2302 	struct iommu_domain *domain;
2303 	struct iommu_group *group;
2304 	int ret;
2305 
2306 	/* Device must already be in a group before calling this function */
2307 	group = iommu_group_get(dev);
2308 	if (!group)
2309 		return -EINVAL;
2310 
2311 	mutex_lock(&group->mutex);
2312 
2313 	ret = 0;
2314 	if (group->default_domain && group->default_domain->type == type)
2315 		goto out;
2316 
2317 	/* Don't change mappings of existing devices */
2318 	ret = -EBUSY;
2319 	if (iommu_group_device_count(group) != 1)
2320 		goto out;
2321 
2322 	ret = -ENOMEM;
2323 	domain = __iommu_domain_alloc(dev->bus, type);
2324 	if (!domain)
2325 		goto out;
2326 
2327 	/* Attach the device to the domain */
2328 	ret = __iommu_attach_group(domain, group);
2329 	if (ret) {
2330 		iommu_domain_free(domain);
2331 		goto out;
2332 	}
2333 
2334 	/* Make the domain the default for this group */
2335 	if (group->default_domain)
2336 		iommu_domain_free(group->default_domain);
2337 	group->default_domain = domain;
2338 
2339 	iommu_group_create_direct_mappings(group, dev);
2340 
2341 	dev_info(dev, "Using iommu %s mapping\n",
2342 		 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2343 
2344 	ret = 0;
2345 out:
2346 	mutex_unlock(&group->mutex);
2347 	iommu_group_put(group);
2348 
2349 	return ret;
2350 }
2351 
2352 /* Request that a device is direct mapped by the IOMMU */
2353 int iommu_request_dm_for_dev(struct device *dev)
2354 {
2355 	return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2356 }
2357 
2358 /* Request that a device can't be direct mapped by the IOMMU */
2359 int iommu_request_dma_domain_for_dev(struct device *dev)
2360 {
2361 	return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2362 }
2363 
2364 void iommu_set_default_passthrough(bool cmd_line)
2365 {
2366 	if (cmd_line)
2367 		iommu_set_cmd_line_dma_api();
2368 
2369 	iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2370 }
2371 
2372 void iommu_set_default_translated(bool cmd_line)
2373 {
2374 	if (cmd_line)
2375 		iommu_set_cmd_line_dma_api();
2376 
2377 	iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2378 }
2379 
2380 bool iommu_default_passthrough(void)
2381 {
2382 	return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2383 }
2384 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2385 
2386 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2387 {
2388 	const struct iommu_ops *ops = NULL;
2389 	struct iommu_device *iommu;
2390 
2391 	spin_lock(&iommu_device_lock);
2392 	list_for_each_entry(iommu, &iommu_device_list, list)
2393 		if (iommu->fwnode == fwnode) {
2394 			ops = iommu->ops;
2395 			break;
2396 		}
2397 	spin_unlock(&iommu_device_lock);
2398 	return ops;
2399 }
2400 
2401 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2402 		      const struct iommu_ops *ops)
2403 {
2404 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2405 
2406 	if (fwspec)
2407 		return ops == fwspec->ops ? 0 : -EINVAL;
2408 
2409 	if (!dev_iommu_get(dev))
2410 		return -ENOMEM;
2411 
2412 	/* Preallocate for the overwhelmingly common case of 1 ID */
2413 	fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2414 	if (!fwspec)
2415 		return -ENOMEM;
2416 
2417 	of_node_get(to_of_node(iommu_fwnode));
2418 	fwspec->iommu_fwnode = iommu_fwnode;
2419 	fwspec->ops = ops;
2420 	dev_iommu_fwspec_set(dev, fwspec);
2421 	return 0;
2422 }
2423 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2424 
2425 void iommu_fwspec_free(struct device *dev)
2426 {
2427 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2428 
2429 	if (fwspec) {
2430 		fwnode_handle_put(fwspec->iommu_fwnode);
2431 		kfree(fwspec);
2432 		dev_iommu_fwspec_set(dev, NULL);
2433 	}
2434 }
2435 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2436 
2437 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2438 {
2439 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2440 	int i, new_num;
2441 
2442 	if (!fwspec)
2443 		return -EINVAL;
2444 
2445 	new_num = fwspec->num_ids + num_ids;
2446 	if (new_num > 1) {
2447 		fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2448 				  GFP_KERNEL);
2449 		if (!fwspec)
2450 			return -ENOMEM;
2451 
2452 		dev_iommu_fwspec_set(dev, fwspec);
2453 	}
2454 
2455 	for (i = 0; i < num_ids; i++)
2456 		fwspec->ids[fwspec->num_ids + i] = ids[i];
2457 
2458 	fwspec->num_ids = new_num;
2459 	return 0;
2460 }
2461 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2462 
2463 /*
2464  * Per device IOMMU features.
2465  */
2466 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2467 {
2468 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2469 
2470 	if (ops && ops->dev_has_feat)
2471 		return ops->dev_has_feat(dev, feat);
2472 
2473 	return false;
2474 }
2475 EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2476 
2477 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2478 {
2479 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2480 
2481 	if (ops && ops->dev_enable_feat)
2482 		return ops->dev_enable_feat(dev, feat);
2483 
2484 	return -ENODEV;
2485 }
2486 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2487 
2488 /*
2489  * The device drivers should do the necessary cleanups before calling this.
2490  * For example, before disabling the aux-domain feature, the device driver
2491  * should detach all aux-domains. Otherwise, this will return -EBUSY.
2492  */
2493 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2494 {
2495 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2496 
2497 	if (ops && ops->dev_disable_feat)
2498 		return ops->dev_disable_feat(dev, feat);
2499 
2500 	return -EBUSY;
2501 }
2502 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2503 
2504 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2505 {
2506 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2507 
2508 	if (ops && ops->dev_feat_enabled)
2509 		return ops->dev_feat_enabled(dev, feat);
2510 
2511 	return false;
2512 }
2513 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2514 
2515 /*
2516  * Aux-domain specific attach/detach.
2517  *
2518  * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2519  * true. Also, as long as domains are attached to a device through this
2520  * interface, any tries to call iommu_attach_device() should fail
2521  * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2522  * This should make us safe against a device being attached to a guest as a
2523  * whole while there are still pasid users on it (aux and sva).
2524  */
2525 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2526 {
2527 	int ret = -ENODEV;
2528 
2529 	if (domain->ops->aux_attach_dev)
2530 		ret = domain->ops->aux_attach_dev(domain, dev);
2531 
2532 	if (!ret)
2533 		trace_attach_device_to_domain(dev);
2534 
2535 	return ret;
2536 }
2537 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2538 
2539 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2540 {
2541 	if (domain->ops->aux_detach_dev) {
2542 		domain->ops->aux_detach_dev(domain, dev);
2543 		trace_detach_device_from_domain(dev);
2544 	}
2545 }
2546 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2547 
2548 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2549 {
2550 	int ret = -ENODEV;
2551 
2552 	if (domain->ops->aux_get_pasid)
2553 		ret = domain->ops->aux_get_pasid(domain, dev);
2554 
2555 	return ret;
2556 }
2557 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2558 
2559 /**
2560  * iommu_sva_bind_device() - Bind a process address space to a device
2561  * @dev: the device
2562  * @mm: the mm to bind, caller must hold a reference to it
2563  *
2564  * Create a bond between device and address space, allowing the device to access
2565  * the mm using the returned PASID. If a bond already exists between @device and
2566  * @mm, it is returned and an additional reference is taken. Caller must call
2567  * iommu_sva_unbind_device() to release each reference.
2568  *
2569  * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2570  * initialize the required SVA features.
2571  *
2572  * On error, returns an ERR_PTR value.
2573  */
2574 struct iommu_sva *
2575 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2576 {
2577 	struct iommu_group *group;
2578 	struct iommu_sva *handle = ERR_PTR(-EINVAL);
2579 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2580 
2581 	if (!ops || !ops->sva_bind)
2582 		return ERR_PTR(-ENODEV);
2583 
2584 	group = iommu_group_get(dev);
2585 	if (!group)
2586 		return ERR_PTR(-ENODEV);
2587 
2588 	/* Ensure device count and domain don't change while we're binding */
2589 	mutex_lock(&group->mutex);
2590 
2591 	/*
2592 	 * To keep things simple, SVA currently doesn't support IOMMU groups
2593 	 * with more than one device. Existing SVA-capable systems are not
2594 	 * affected by the problems that required IOMMU groups (lack of ACS
2595 	 * isolation, device ID aliasing and other hardware issues).
2596 	 */
2597 	if (iommu_group_device_count(group) != 1)
2598 		goto out_unlock;
2599 
2600 	handle = ops->sva_bind(dev, mm, drvdata);
2601 
2602 out_unlock:
2603 	mutex_unlock(&group->mutex);
2604 	iommu_group_put(group);
2605 
2606 	return handle;
2607 }
2608 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2609 
2610 /**
2611  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2612  * @handle: the handle returned by iommu_sva_bind_device()
2613  *
2614  * Put reference to a bond between device and address space. The device should
2615  * not be issuing any more transaction for this PASID. All outstanding page
2616  * requests for this PASID must have been flushed to the IOMMU.
2617  *
2618  * Returns 0 on success, or an error value
2619  */
2620 void iommu_sva_unbind_device(struct iommu_sva *handle)
2621 {
2622 	struct iommu_group *group;
2623 	struct device *dev = handle->dev;
2624 	const struct iommu_ops *ops = dev->bus->iommu_ops;
2625 
2626 	if (!ops || !ops->sva_unbind)
2627 		return;
2628 
2629 	group = iommu_group_get(dev);
2630 	if (!group)
2631 		return;
2632 
2633 	mutex_lock(&group->mutex);
2634 	ops->sva_unbind(handle);
2635 	mutex_unlock(&group->mutex);
2636 
2637 	iommu_group_put(group);
2638 }
2639 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2640 
2641 int iommu_sva_set_ops(struct iommu_sva *handle,
2642 		      const struct iommu_sva_ops *sva_ops)
2643 {
2644 	if (handle->ops && handle->ops != sva_ops)
2645 		return -EEXIST;
2646 
2647 	handle->ops = sva_ops;
2648 	return 0;
2649 }
2650 EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2651 
2652 int iommu_sva_get_pasid(struct iommu_sva *handle)
2653 {
2654 	const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2655 
2656 	if (!ops || !ops->sva_get_pasid)
2657 		return IOMMU_PASID_INVALID;
2658 
2659 	return ops->sva_get_pasid(handle);
2660 }
2661 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
2662