xref: /openbmc/linux/drivers/iommu/iommu.c (revision c0c74acb)
1 /*
2  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3  * Author: Joerg Roedel <jroedel@suse.de>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  */
18 
19 #define pr_fmt(fmt)    "iommu: " fmt
20 
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 #include <linux/idr.h>
30 #include <linux/notifier.h>
31 #include <linux/err.h>
32 #include <linux/pci.h>
33 #include <linux/bitops.h>
34 #include <linux/property.h>
35 #include <trace/events/iommu.h>
36 
37 static struct kset *iommu_group_kset;
38 static DEFINE_IDA(iommu_group_ida);
39 
40 struct iommu_callback_data {
41 	const struct iommu_ops *ops;
42 };
43 
44 struct iommu_group {
45 	struct kobject kobj;
46 	struct kobject *devices_kobj;
47 	struct list_head devices;
48 	struct mutex mutex;
49 	struct blocking_notifier_head notifier;
50 	void *iommu_data;
51 	void (*iommu_data_release)(void *iommu_data);
52 	char *name;
53 	int id;
54 	struct iommu_domain *default_domain;
55 	struct iommu_domain *domain;
56 };
57 
58 struct group_device {
59 	struct list_head list;
60 	struct device *dev;
61 	char *name;
62 };
63 
64 struct iommu_group_attribute {
65 	struct attribute attr;
66 	ssize_t (*show)(struct iommu_group *group, char *buf);
67 	ssize_t (*store)(struct iommu_group *group,
68 			 const char *buf, size_t count);
69 };
70 
71 static const char * const iommu_group_resv_type_string[] = {
72 	[IOMMU_RESV_DIRECT]	= "direct",
73 	[IOMMU_RESV_RESERVED]	= "reserved",
74 	[IOMMU_RESV_MSI]	= "msi",
75 	[IOMMU_RESV_SW_MSI]	= "msi",
76 };
77 
78 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
79 struct iommu_group_attribute iommu_group_attr_##_name =		\
80 	__ATTR(_name, _mode, _show, _store)
81 
82 #define to_iommu_group_attr(_attr)	\
83 	container_of(_attr, struct iommu_group_attribute, attr)
84 #define to_iommu_group(_kobj)		\
85 	container_of(_kobj, struct iommu_group, kobj)
86 
87 static LIST_HEAD(iommu_device_list);
88 static DEFINE_SPINLOCK(iommu_device_lock);
89 
90 int iommu_device_register(struct iommu_device *iommu)
91 {
92 	spin_lock(&iommu_device_lock);
93 	list_add_tail(&iommu->list, &iommu_device_list);
94 	spin_unlock(&iommu_device_lock);
95 
96 	return 0;
97 }
98 
99 void iommu_device_unregister(struct iommu_device *iommu)
100 {
101 	spin_lock(&iommu_device_lock);
102 	list_del(&iommu->list);
103 	spin_unlock(&iommu_device_lock);
104 }
105 
106 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
107 						 unsigned type);
108 static int __iommu_attach_device(struct iommu_domain *domain,
109 				 struct device *dev);
110 static int __iommu_attach_group(struct iommu_domain *domain,
111 				struct iommu_group *group);
112 static void __iommu_detach_group(struct iommu_domain *domain,
113 				 struct iommu_group *group);
114 
115 static ssize_t iommu_group_attr_show(struct kobject *kobj,
116 				     struct attribute *__attr, char *buf)
117 {
118 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
119 	struct iommu_group *group = to_iommu_group(kobj);
120 	ssize_t ret = -EIO;
121 
122 	if (attr->show)
123 		ret = attr->show(group, buf);
124 	return ret;
125 }
126 
127 static ssize_t iommu_group_attr_store(struct kobject *kobj,
128 				      struct attribute *__attr,
129 				      const char *buf, size_t count)
130 {
131 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
132 	struct iommu_group *group = to_iommu_group(kobj);
133 	ssize_t ret = -EIO;
134 
135 	if (attr->store)
136 		ret = attr->store(group, buf, count);
137 	return ret;
138 }
139 
140 static const struct sysfs_ops iommu_group_sysfs_ops = {
141 	.show = iommu_group_attr_show,
142 	.store = iommu_group_attr_store,
143 };
144 
145 static int iommu_group_create_file(struct iommu_group *group,
146 				   struct iommu_group_attribute *attr)
147 {
148 	return sysfs_create_file(&group->kobj, &attr->attr);
149 }
150 
151 static void iommu_group_remove_file(struct iommu_group *group,
152 				    struct iommu_group_attribute *attr)
153 {
154 	sysfs_remove_file(&group->kobj, &attr->attr);
155 }
156 
157 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
158 {
159 	return sprintf(buf, "%s\n", group->name);
160 }
161 
162 /**
163  * iommu_insert_resv_region - Insert a new region in the
164  * list of reserved regions.
165  * @new: new region to insert
166  * @regions: list of regions
167  *
168  * The new element is sorted by address with respect to the other
169  * regions of the same type. In case it overlaps with another
170  * region of the same type, regions are merged. In case it
171  * overlaps with another region of different type, regions are
172  * not merged.
173  */
174 static int iommu_insert_resv_region(struct iommu_resv_region *new,
175 				    struct list_head *regions)
176 {
177 	struct iommu_resv_region *region;
178 	phys_addr_t start = new->start;
179 	phys_addr_t end = new->start + new->length - 1;
180 	struct list_head *pos = regions->next;
181 
182 	while (pos != regions) {
183 		struct iommu_resv_region *entry =
184 			list_entry(pos, struct iommu_resv_region, list);
185 		phys_addr_t a = entry->start;
186 		phys_addr_t b = entry->start + entry->length - 1;
187 		int type = entry->type;
188 
189 		if (end < a) {
190 			goto insert;
191 		} else if (start > b) {
192 			pos = pos->next;
193 		} else if ((start >= a) && (end <= b)) {
194 			if (new->type == type)
195 				goto done;
196 			else
197 				pos = pos->next;
198 		} else {
199 			if (new->type == type) {
200 				phys_addr_t new_start = min(a, start);
201 				phys_addr_t new_end = max(b, end);
202 
203 				list_del(&entry->list);
204 				entry->start = new_start;
205 				entry->length = new_end - new_start + 1;
206 				iommu_insert_resv_region(entry, regions);
207 			} else {
208 				pos = pos->next;
209 			}
210 		}
211 	}
212 insert:
213 	region = iommu_alloc_resv_region(new->start, new->length,
214 					 new->prot, new->type);
215 	if (!region)
216 		return -ENOMEM;
217 
218 	list_add_tail(&region->list, pos);
219 done:
220 	return 0;
221 }
222 
223 static int
224 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
225 				 struct list_head *group_resv_regions)
226 {
227 	struct iommu_resv_region *entry;
228 	int ret = 0;
229 
230 	list_for_each_entry(entry, dev_resv_regions, list) {
231 		ret = iommu_insert_resv_region(entry, group_resv_regions);
232 		if (ret)
233 			break;
234 	}
235 	return ret;
236 }
237 
238 int iommu_get_group_resv_regions(struct iommu_group *group,
239 				 struct list_head *head)
240 {
241 	struct group_device *device;
242 	int ret = 0;
243 
244 	mutex_lock(&group->mutex);
245 	list_for_each_entry(device, &group->devices, list) {
246 		struct list_head dev_resv_regions;
247 
248 		INIT_LIST_HEAD(&dev_resv_regions);
249 		iommu_get_resv_regions(device->dev, &dev_resv_regions);
250 		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
251 		iommu_put_resv_regions(device->dev, &dev_resv_regions);
252 		if (ret)
253 			break;
254 	}
255 	mutex_unlock(&group->mutex);
256 	return ret;
257 }
258 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
259 
260 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
261 					     char *buf)
262 {
263 	struct iommu_resv_region *region, *next;
264 	struct list_head group_resv_regions;
265 	char *str = buf;
266 
267 	INIT_LIST_HEAD(&group_resv_regions);
268 	iommu_get_group_resv_regions(group, &group_resv_regions);
269 
270 	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
271 		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
272 			       (long long int)region->start,
273 			       (long long int)(region->start +
274 						region->length - 1),
275 			       iommu_group_resv_type_string[region->type]);
276 		kfree(region);
277 	}
278 
279 	return (str - buf);
280 }
281 
282 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
283 
284 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
285 			iommu_group_show_resv_regions, NULL);
286 
287 static void iommu_group_release(struct kobject *kobj)
288 {
289 	struct iommu_group *group = to_iommu_group(kobj);
290 
291 	pr_debug("Releasing group %d\n", group->id);
292 
293 	if (group->iommu_data_release)
294 		group->iommu_data_release(group->iommu_data);
295 
296 	ida_simple_remove(&iommu_group_ida, group->id);
297 
298 	if (group->default_domain)
299 		iommu_domain_free(group->default_domain);
300 
301 	kfree(group->name);
302 	kfree(group);
303 }
304 
305 static struct kobj_type iommu_group_ktype = {
306 	.sysfs_ops = &iommu_group_sysfs_ops,
307 	.release = iommu_group_release,
308 };
309 
310 /**
311  * iommu_group_alloc - Allocate a new group
312  * @name: Optional name to associate with group, visible in sysfs
313  *
314  * This function is called by an iommu driver to allocate a new iommu
315  * group.  The iommu group represents the minimum granularity of the iommu.
316  * Upon successful return, the caller holds a reference to the supplied
317  * group in order to hold the group until devices are added.  Use
318  * iommu_group_put() to release this extra reference count, allowing the
319  * group to be automatically reclaimed once it has no devices or external
320  * references.
321  */
322 struct iommu_group *iommu_group_alloc(void)
323 {
324 	struct iommu_group *group;
325 	int ret;
326 
327 	group = kzalloc(sizeof(*group), GFP_KERNEL);
328 	if (!group)
329 		return ERR_PTR(-ENOMEM);
330 
331 	group->kobj.kset = iommu_group_kset;
332 	mutex_init(&group->mutex);
333 	INIT_LIST_HEAD(&group->devices);
334 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
335 
336 	ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
337 	if (ret < 0) {
338 		kfree(group);
339 		return ERR_PTR(ret);
340 	}
341 	group->id = ret;
342 
343 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
344 				   NULL, "%d", group->id);
345 	if (ret) {
346 		ida_simple_remove(&iommu_group_ida, group->id);
347 		kfree(group);
348 		return ERR_PTR(ret);
349 	}
350 
351 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
352 	if (!group->devices_kobj) {
353 		kobject_put(&group->kobj); /* triggers .release & free */
354 		return ERR_PTR(-ENOMEM);
355 	}
356 
357 	/*
358 	 * The devices_kobj holds a reference on the group kobject, so
359 	 * as long as that exists so will the group.  We can therefore
360 	 * use the devices_kobj for reference counting.
361 	 */
362 	kobject_put(&group->kobj);
363 
364 	ret = iommu_group_create_file(group,
365 				      &iommu_group_attr_reserved_regions);
366 	if (ret)
367 		return ERR_PTR(ret);
368 
369 	pr_debug("Allocated group %d\n", group->id);
370 
371 	return group;
372 }
373 EXPORT_SYMBOL_GPL(iommu_group_alloc);
374 
375 struct iommu_group *iommu_group_get_by_id(int id)
376 {
377 	struct kobject *group_kobj;
378 	struct iommu_group *group;
379 	const char *name;
380 
381 	if (!iommu_group_kset)
382 		return NULL;
383 
384 	name = kasprintf(GFP_KERNEL, "%d", id);
385 	if (!name)
386 		return NULL;
387 
388 	group_kobj = kset_find_obj(iommu_group_kset, name);
389 	kfree(name);
390 
391 	if (!group_kobj)
392 		return NULL;
393 
394 	group = container_of(group_kobj, struct iommu_group, kobj);
395 	BUG_ON(group->id != id);
396 
397 	kobject_get(group->devices_kobj);
398 	kobject_put(&group->kobj);
399 
400 	return group;
401 }
402 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
403 
404 /**
405  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
406  * @group: the group
407  *
408  * iommu drivers can store data in the group for use when doing iommu
409  * operations.  This function provides a way to retrieve it.  Caller
410  * should hold a group reference.
411  */
412 void *iommu_group_get_iommudata(struct iommu_group *group)
413 {
414 	return group->iommu_data;
415 }
416 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
417 
418 /**
419  * iommu_group_set_iommudata - set iommu_data for a group
420  * @group: the group
421  * @iommu_data: new data
422  * @release: release function for iommu_data
423  *
424  * iommu drivers can store data in the group for use when doing iommu
425  * operations.  This function provides a way to set the data after
426  * the group has been allocated.  Caller should hold a group reference.
427  */
428 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
429 			       void (*release)(void *iommu_data))
430 {
431 	group->iommu_data = iommu_data;
432 	group->iommu_data_release = release;
433 }
434 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
435 
436 /**
437  * iommu_group_set_name - set name for a group
438  * @group: the group
439  * @name: name
440  *
441  * Allow iommu driver to set a name for a group.  When set it will
442  * appear in a name attribute file under the group in sysfs.
443  */
444 int iommu_group_set_name(struct iommu_group *group, const char *name)
445 {
446 	int ret;
447 
448 	if (group->name) {
449 		iommu_group_remove_file(group, &iommu_group_attr_name);
450 		kfree(group->name);
451 		group->name = NULL;
452 		if (!name)
453 			return 0;
454 	}
455 
456 	group->name = kstrdup(name, GFP_KERNEL);
457 	if (!group->name)
458 		return -ENOMEM;
459 
460 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
461 	if (ret) {
462 		kfree(group->name);
463 		group->name = NULL;
464 		return ret;
465 	}
466 
467 	return 0;
468 }
469 EXPORT_SYMBOL_GPL(iommu_group_set_name);
470 
471 static int iommu_group_create_direct_mappings(struct iommu_group *group,
472 					      struct device *dev)
473 {
474 	struct iommu_domain *domain = group->default_domain;
475 	struct iommu_resv_region *entry;
476 	struct list_head mappings;
477 	unsigned long pg_size;
478 	int ret = 0;
479 
480 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
481 		return 0;
482 
483 	BUG_ON(!domain->pgsize_bitmap);
484 
485 	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
486 	INIT_LIST_HEAD(&mappings);
487 
488 	iommu_get_resv_regions(dev, &mappings);
489 
490 	/* We need to consider overlapping regions for different devices */
491 	list_for_each_entry(entry, &mappings, list) {
492 		dma_addr_t start, end, addr;
493 
494 		if (domain->ops->apply_resv_region)
495 			domain->ops->apply_resv_region(dev, domain, entry);
496 
497 		start = ALIGN(entry->start, pg_size);
498 		end   = ALIGN(entry->start + entry->length, pg_size);
499 
500 		if (entry->type != IOMMU_RESV_DIRECT)
501 			continue;
502 
503 		for (addr = start; addr < end; addr += pg_size) {
504 			phys_addr_t phys_addr;
505 
506 			phys_addr = iommu_iova_to_phys(domain, addr);
507 			if (phys_addr)
508 				continue;
509 
510 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
511 			if (ret)
512 				goto out;
513 		}
514 
515 	}
516 
517 out:
518 	iommu_put_resv_regions(dev, &mappings);
519 
520 	return ret;
521 }
522 
523 /**
524  * iommu_group_add_device - add a device to an iommu group
525  * @group: the group into which to add the device (reference should be held)
526  * @dev: the device
527  *
528  * This function is called by an iommu driver to add a device into a
529  * group.  Adding a device increments the group reference count.
530  */
531 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
532 {
533 	int ret, i = 0;
534 	struct group_device *device;
535 
536 	device = kzalloc(sizeof(*device), GFP_KERNEL);
537 	if (!device)
538 		return -ENOMEM;
539 
540 	device->dev = dev;
541 
542 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
543 	if (ret)
544 		goto err_free_device;
545 
546 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
547 rename:
548 	if (!device->name) {
549 		ret = -ENOMEM;
550 		goto err_remove_link;
551 	}
552 
553 	ret = sysfs_create_link_nowarn(group->devices_kobj,
554 				       &dev->kobj, device->name);
555 	if (ret) {
556 		if (ret == -EEXIST && i >= 0) {
557 			/*
558 			 * Account for the slim chance of collision
559 			 * and append an instance to the name.
560 			 */
561 			kfree(device->name);
562 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
563 						 kobject_name(&dev->kobj), i++);
564 			goto rename;
565 		}
566 		goto err_free_name;
567 	}
568 
569 	kobject_get(group->devices_kobj);
570 
571 	dev->iommu_group = group;
572 
573 	iommu_group_create_direct_mappings(group, dev);
574 
575 	mutex_lock(&group->mutex);
576 	list_add_tail(&device->list, &group->devices);
577 	if (group->domain)
578 		ret = __iommu_attach_device(group->domain, dev);
579 	mutex_unlock(&group->mutex);
580 	if (ret)
581 		goto err_put_group;
582 
583 	/* Notify any listeners about change to group. */
584 	blocking_notifier_call_chain(&group->notifier,
585 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
586 
587 	trace_add_device_to_group(group->id, dev);
588 
589 	pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
590 
591 	return 0;
592 
593 err_put_group:
594 	mutex_lock(&group->mutex);
595 	list_del(&device->list);
596 	mutex_unlock(&group->mutex);
597 	dev->iommu_group = NULL;
598 	kobject_put(group->devices_kobj);
599 err_free_name:
600 	kfree(device->name);
601 err_remove_link:
602 	sysfs_remove_link(&dev->kobj, "iommu_group");
603 err_free_device:
604 	kfree(device);
605 	pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
606 	return ret;
607 }
608 EXPORT_SYMBOL_GPL(iommu_group_add_device);
609 
610 /**
611  * iommu_group_remove_device - remove a device from it's current group
612  * @dev: device to be removed
613  *
614  * This function is called by an iommu driver to remove the device from
615  * it's current group.  This decrements the iommu group reference count.
616  */
617 void iommu_group_remove_device(struct device *dev)
618 {
619 	struct iommu_group *group = dev->iommu_group;
620 	struct group_device *tmp_device, *device = NULL;
621 
622 	pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
623 
624 	/* Pre-notify listeners that a device is being removed. */
625 	blocking_notifier_call_chain(&group->notifier,
626 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
627 
628 	mutex_lock(&group->mutex);
629 	list_for_each_entry(tmp_device, &group->devices, list) {
630 		if (tmp_device->dev == dev) {
631 			device = tmp_device;
632 			list_del(&device->list);
633 			break;
634 		}
635 	}
636 	mutex_unlock(&group->mutex);
637 
638 	if (!device)
639 		return;
640 
641 	sysfs_remove_link(group->devices_kobj, device->name);
642 	sysfs_remove_link(&dev->kobj, "iommu_group");
643 
644 	trace_remove_device_from_group(group->id, dev);
645 
646 	kfree(device->name);
647 	kfree(device);
648 	dev->iommu_group = NULL;
649 	kobject_put(group->devices_kobj);
650 }
651 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
652 
653 static int iommu_group_device_count(struct iommu_group *group)
654 {
655 	struct group_device *entry;
656 	int ret = 0;
657 
658 	list_for_each_entry(entry, &group->devices, list)
659 		ret++;
660 
661 	return ret;
662 }
663 
664 /**
665  * iommu_group_for_each_dev - iterate over each device in the group
666  * @group: the group
667  * @data: caller opaque data to be passed to callback function
668  * @fn: caller supplied callback function
669  *
670  * This function is called by group users to iterate over group devices.
671  * Callers should hold a reference count to the group during callback.
672  * The group->mutex is held across callbacks, which will block calls to
673  * iommu_group_add/remove_device.
674  */
675 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
676 				      int (*fn)(struct device *, void *))
677 {
678 	struct group_device *device;
679 	int ret = 0;
680 
681 	list_for_each_entry(device, &group->devices, list) {
682 		ret = fn(device->dev, data);
683 		if (ret)
684 			break;
685 	}
686 	return ret;
687 }
688 
689 
690 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
691 			     int (*fn)(struct device *, void *))
692 {
693 	int ret;
694 
695 	mutex_lock(&group->mutex);
696 	ret = __iommu_group_for_each_dev(group, data, fn);
697 	mutex_unlock(&group->mutex);
698 
699 	return ret;
700 }
701 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
702 
703 /**
704  * iommu_group_get - Return the group for a device and increment reference
705  * @dev: get the group that this device belongs to
706  *
707  * This function is called by iommu drivers and users to get the group
708  * for the specified device.  If found, the group is returned and the group
709  * reference in incremented, else NULL.
710  */
711 struct iommu_group *iommu_group_get(struct device *dev)
712 {
713 	struct iommu_group *group = dev->iommu_group;
714 
715 	if (group)
716 		kobject_get(group->devices_kobj);
717 
718 	return group;
719 }
720 EXPORT_SYMBOL_GPL(iommu_group_get);
721 
722 /**
723  * iommu_group_ref_get - Increment reference on a group
724  * @group: the group to use, must not be NULL
725  *
726  * This function is called by iommu drivers to take additional references on an
727  * existing group.  Returns the given group for convenience.
728  */
729 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
730 {
731 	kobject_get(group->devices_kobj);
732 	return group;
733 }
734 
735 /**
736  * iommu_group_put - Decrement group reference
737  * @group: the group to use
738  *
739  * This function is called by iommu drivers and users to release the
740  * iommu group.  Once the reference count is zero, the group is released.
741  */
742 void iommu_group_put(struct iommu_group *group)
743 {
744 	if (group)
745 		kobject_put(group->devices_kobj);
746 }
747 EXPORT_SYMBOL_GPL(iommu_group_put);
748 
749 /**
750  * iommu_group_register_notifier - Register a notifier for group changes
751  * @group: the group to watch
752  * @nb: notifier block to signal
753  *
754  * This function allows iommu group users to track changes in a group.
755  * See include/linux/iommu.h for actions sent via this notifier.  Caller
756  * should hold a reference to the group throughout notifier registration.
757  */
758 int iommu_group_register_notifier(struct iommu_group *group,
759 				  struct notifier_block *nb)
760 {
761 	return blocking_notifier_chain_register(&group->notifier, nb);
762 }
763 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
764 
765 /**
766  * iommu_group_unregister_notifier - Unregister a notifier
767  * @group: the group to watch
768  * @nb: notifier block to signal
769  *
770  * Unregister a previously registered group notifier block.
771  */
772 int iommu_group_unregister_notifier(struct iommu_group *group,
773 				    struct notifier_block *nb)
774 {
775 	return blocking_notifier_chain_unregister(&group->notifier, nb);
776 }
777 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
778 
779 /**
780  * iommu_group_id - Return ID for a group
781  * @group: the group to ID
782  *
783  * Return the unique ID for the group matching the sysfs group number.
784  */
785 int iommu_group_id(struct iommu_group *group)
786 {
787 	return group->id;
788 }
789 EXPORT_SYMBOL_GPL(iommu_group_id);
790 
791 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
792 					       unsigned long *devfns);
793 
794 /*
795  * To consider a PCI device isolated, we require ACS to support Source
796  * Validation, Request Redirection, Completer Redirection, and Upstream
797  * Forwarding.  This effectively means that devices cannot spoof their
798  * requester ID, requests and completions cannot be redirected, and all
799  * transactions are forwarded upstream, even as it passes through a
800  * bridge where the target device is downstream.
801  */
802 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
803 
804 /*
805  * For multifunction devices which are not isolated from each other, find
806  * all the other non-isolated functions and look for existing groups.  For
807  * each function, we also need to look for aliases to or from other devices
808  * that may already have a group.
809  */
810 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
811 							unsigned long *devfns)
812 {
813 	struct pci_dev *tmp = NULL;
814 	struct iommu_group *group;
815 
816 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
817 		return NULL;
818 
819 	for_each_pci_dev(tmp) {
820 		if (tmp == pdev || tmp->bus != pdev->bus ||
821 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
822 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
823 			continue;
824 
825 		group = get_pci_alias_group(tmp, devfns);
826 		if (group) {
827 			pci_dev_put(tmp);
828 			return group;
829 		}
830 	}
831 
832 	return NULL;
833 }
834 
835 /*
836  * Look for aliases to or from the given device for existing groups. DMA
837  * aliases are only supported on the same bus, therefore the search
838  * space is quite small (especially since we're really only looking at pcie
839  * device, and therefore only expect multiple slots on the root complex or
840  * downstream switch ports).  It's conceivable though that a pair of
841  * multifunction devices could have aliases between them that would cause a
842  * loop.  To prevent this, we use a bitmap to track where we've been.
843  */
844 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
845 					       unsigned long *devfns)
846 {
847 	struct pci_dev *tmp = NULL;
848 	struct iommu_group *group;
849 
850 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
851 		return NULL;
852 
853 	group = iommu_group_get(&pdev->dev);
854 	if (group)
855 		return group;
856 
857 	for_each_pci_dev(tmp) {
858 		if (tmp == pdev || tmp->bus != pdev->bus)
859 			continue;
860 
861 		/* We alias them or they alias us */
862 		if (pci_devs_are_dma_aliases(pdev, tmp)) {
863 			group = get_pci_alias_group(tmp, devfns);
864 			if (group) {
865 				pci_dev_put(tmp);
866 				return group;
867 			}
868 
869 			group = get_pci_function_alias_group(tmp, devfns);
870 			if (group) {
871 				pci_dev_put(tmp);
872 				return group;
873 			}
874 		}
875 	}
876 
877 	return NULL;
878 }
879 
880 struct group_for_pci_data {
881 	struct pci_dev *pdev;
882 	struct iommu_group *group;
883 };
884 
885 /*
886  * DMA alias iterator callback, return the last seen device.  Stop and return
887  * the IOMMU group if we find one along the way.
888  */
889 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
890 {
891 	struct group_for_pci_data *data = opaque;
892 
893 	data->pdev = pdev;
894 	data->group = iommu_group_get(&pdev->dev);
895 
896 	return data->group != NULL;
897 }
898 
899 /*
900  * Generic device_group call-back function. It just allocates one
901  * iommu-group per device.
902  */
903 struct iommu_group *generic_device_group(struct device *dev)
904 {
905 	struct iommu_group *group;
906 
907 	group = iommu_group_alloc();
908 	if (IS_ERR(group))
909 		return NULL;
910 
911 	return group;
912 }
913 
914 /*
915  * Use standard PCI bus topology, isolation features, and DMA alias quirks
916  * to find or create an IOMMU group for a device.
917  */
918 struct iommu_group *pci_device_group(struct device *dev)
919 {
920 	struct pci_dev *pdev = to_pci_dev(dev);
921 	struct group_for_pci_data data;
922 	struct pci_bus *bus;
923 	struct iommu_group *group = NULL;
924 	u64 devfns[4] = { 0 };
925 
926 	if (WARN_ON(!dev_is_pci(dev)))
927 		return ERR_PTR(-EINVAL);
928 
929 	/*
930 	 * Find the upstream DMA alias for the device.  A device must not
931 	 * be aliased due to topology in order to have its own IOMMU group.
932 	 * If we find an alias along the way that already belongs to a
933 	 * group, use it.
934 	 */
935 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
936 		return data.group;
937 
938 	pdev = data.pdev;
939 
940 	/*
941 	 * Continue upstream from the point of minimum IOMMU granularity
942 	 * due to aliases to the point where devices are protected from
943 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
944 	 * group, use it.
945 	 */
946 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
947 		if (!bus->self)
948 			continue;
949 
950 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
951 			break;
952 
953 		pdev = bus->self;
954 
955 		group = iommu_group_get(&pdev->dev);
956 		if (group)
957 			return group;
958 	}
959 
960 	/*
961 	 * Look for existing groups on device aliases.  If we alias another
962 	 * device or another device aliases us, use the same group.
963 	 */
964 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
965 	if (group)
966 		return group;
967 
968 	/*
969 	 * Look for existing groups on non-isolated functions on the same
970 	 * slot and aliases of those funcions, if any.  No need to clear
971 	 * the search bitmap, the tested devfns are still valid.
972 	 */
973 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
974 	if (group)
975 		return group;
976 
977 	/* No shared group found, allocate new */
978 	group = iommu_group_alloc();
979 	if (IS_ERR(group))
980 		return NULL;
981 
982 	return group;
983 }
984 
985 /**
986  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
987  * @dev: target device
988  *
989  * This function is intended to be called by IOMMU drivers and extended to
990  * support common, bus-defined algorithms when determining or creating the
991  * IOMMU group for a device.  On success, the caller will hold a reference
992  * to the returned IOMMU group, which will already include the provided
993  * device.  The reference should be released with iommu_group_put().
994  */
995 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
996 {
997 	const struct iommu_ops *ops = dev->bus->iommu_ops;
998 	struct iommu_group *group;
999 	int ret;
1000 
1001 	group = iommu_group_get(dev);
1002 	if (group)
1003 		return group;
1004 
1005 	group = ERR_PTR(-EINVAL);
1006 
1007 	if (ops && ops->device_group)
1008 		group = ops->device_group(dev);
1009 
1010 	if (IS_ERR(group))
1011 		return group;
1012 
1013 	/*
1014 	 * Try to allocate a default domain - needs support from the
1015 	 * IOMMU driver.
1016 	 */
1017 	if (!group->default_domain) {
1018 		group->default_domain = __iommu_domain_alloc(dev->bus,
1019 							     IOMMU_DOMAIN_DMA);
1020 		if (!group->domain)
1021 			group->domain = group->default_domain;
1022 	}
1023 
1024 	ret = iommu_group_add_device(group, dev);
1025 	if (ret) {
1026 		iommu_group_put(group);
1027 		return ERR_PTR(ret);
1028 	}
1029 
1030 	return group;
1031 }
1032 
1033 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1034 {
1035 	return group->default_domain;
1036 }
1037 
1038 static int add_iommu_group(struct device *dev, void *data)
1039 {
1040 	struct iommu_callback_data *cb = data;
1041 	const struct iommu_ops *ops = cb->ops;
1042 	int ret;
1043 
1044 	if (!ops->add_device)
1045 		return 0;
1046 
1047 	WARN_ON(dev->iommu_group);
1048 
1049 	ret = ops->add_device(dev);
1050 
1051 	/*
1052 	 * We ignore -ENODEV errors for now, as they just mean that the
1053 	 * device is not translated by an IOMMU. We still care about
1054 	 * other errors and fail to initialize when they happen.
1055 	 */
1056 	if (ret == -ENODEV)
1057 		ret = 0;
1058 
1059 	return ret;
1060 }
1061 
1062 static int remove_iommu_group(struct device *dev, void *data)
1063 {
1064 	struct iommu_callback_data *cb = data;
1065 	const struct iommu_ops *ops = cb->ops;
1066 
1067 	if (ops->remove_device && dev->iommu_group)
1068 		ops->remove_device(dev);
1069 
1070 	return 0;
1071 }
1072 
1073 static int iommu_bus_notifier(struct notifier_block *nb,
1074 			      unsigned long action, void *data)
1075 {
1076 	struct device *dev = data;
1077 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1078 	struct iommu_group *group;
1079 	unsigned long group_action = 0;
1080 
1081 	/*
1082 	 * ADD/DEL call into iommu driver ops if provided, which may
1083 	 * result in ADD/DEL notifiers to group->notifier
1084 	 */
1085 	if (action == BUS_NOTIFY_ADD_DEVICE) {
1086 		if (ops->add_device)
1087 			return ops->add_device(dev);
1088 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1089 		if (ops->remove_device && dev->iommu_group) {
1090 			ops->remove_device(dev);
1091 			return 0;
1092 		}
1093 	}
1094 
1095 	/*
1096 	 * Remaining BUS_NOTIFYs get filtered and republished to the
1097 	 * group, if anyone is listening
1098 	 */
1099 	group = iommu_group_get(dev);
1100 	if (!group)
1101 		return 0;
1102 
1103 	switch (action) {
1104 	case BUS_NOTIFY_BIND_DRIVER:
1105 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1106 		break;
1107 	case BUS_NOTIFY_BOUND_DRIVER:
1108 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1109 		break;
1110 	case BUS_NOTIFY_UNBIND_DRIVER:
1111 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1112 		break;
1113 	case BUS_NOTIFY_UNBOUND_DRIVER:
1114 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1115 		break;
1116 	}
1117 
1118 	if (group_action)
1119 		blocking_notifier_call_chain(&group->notifier,
1120 					     group_action, dev);
1121 
1122 	iommu_group_put(group);
1123 	return 0;
1124 }
1125 
1126 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1127 {
1128 	int err;
1129 	struct notifier_block *nb;
1130 	struct iommu_callback_data cb = {
1131 		.ops = ops,
1132 	};
1133 
1134 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1135 	if (!nb)
1136 		return -ENOMEM;
1137 
1138 	nb->notifier_call = iommu_bus_notifier;
1139 
1140 	err = bus_register_notifier(bus, nb);
1141 	if (err)
1142 		goto out_free;
1143 
1144 	err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
1145 	if (err)
1146 		goto out_err;
1147 
1148 
1149 	return 0;
1150 
1151 out_err:
1152 	/* Clean up */
1153 	bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
1154 	bus_unregister_notifier(bus, nb);
1155 
1156 out_free:
1157 	kfree(nb);
1158 
1159 	return err;
1160 }
1161 
1162 /**
1163  * bus_set_iommu - set iommu-callbacks for the bus
1164  * @bus: bus.
1165  * @ops: the callbacks provided by the iommu-driver
1166  *
1167  * This function is called by an iommu driver to set the iommu methods
1168  * used for a particular bus. Drivers for devices on that bus can use
1169  * the iommu-api after these ops are registered.
1170  * This special function is needed because IOMMUs are usually devices on
1171  * the bus itself, so the iommu drivers are not initialized when the bus
1172  * is set up. With this function the iommu-driver can set the iommu-ops
1173  * afterwards.
1174  */
1175 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1176 {
1177 	int err;
1178 
1179 	if (bus->iommu_ops != NULL)
1180 		return -EBUSY;
1181 
1182 	bus->iommu_ops = ops;
1183 
1184 	/* Do IOMMU specific setup for this bus-type */
1185 	err = iommu_bus_init(bus, ops);
1186 	if (err)
1187 		bus->iommu_ops = NULL;
1188 
1189 	return err;
1190 }
1191 EXPORT_SYMBOL_GPL(bus_set_iommu);
1192 
1193 bool iommu_present(struct bus_type *bus)
1194 {
1195 	return bus->iommu_ops != NULL;
1196 }
1197 EXPORT_SYMBOL_GPL(iommu_present);
1198 
1199 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1200 {
1201 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1202 		return false;
1203 
1204 	return bus->iommu_ops->capable(cap);
1205 }
1206 EXPORT_SYMBOL_GPL(iommu_capable);
1207 
1208 /**
1209  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1210  * @domain: iommu domain
1211  * @handler: fault handler
1212  * @token: user data, will be passed back to the fault handler
1213  *
1214  * This function should be used by IOMMU users which want to be notified
1215  * whenever an IOMMU fault happens.
1216  *
1217  * The fault handler itself should return 0 on success, and an appropriate
1218  * error code otherwise.
1219  */
1220 void iommu_set_fault_handler(struct iommu_domain *domain,
1221 					iommu_fault_handler_t handler,
1222 					void *token)
1223 {
1224 	BUG_ON(!domain);
1225 
1226 	domain->handler = handler;
1227 	domain->handler_token = token;
1228 }
1229 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1230 
1231 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1232 						 unsigned type)
1233 {
1234 	struct iommu_domain *domain;
1235 
1236 	if (bus == NULL || bus->iommu_ops == NULL)
1237 		return NULL;
1238 
1239 	domain = bus->iommu_ops->domain_alloc(type);
1240 	if (!domain)
1241 		return NULL;
1242 
1243 	domain->ops  = bus->iommu_ops;
1244 	domain->type = type;
1245 	/* Assume all sizes by default; the driver may override this later */
1246 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1247 
1248 	return domain;
1249 }
1250 
1251 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1252 {
1253 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1254 }
1255 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1256 
1257 void iommu_domain_free(struct iommu_domain *domain)
1258 {
1259 	domain->ops->domain_free(domain);
1260 }
1261 EXPORT_SYMBOL_GPL(iommu_domain_free);
1262 
1263 static int __iommu_attach_device(struct iommu_domain *domain,
1264 				 struct device *dev)
1265 {
1266 	int ret;
1267 	if (unlikely(domain->ops->attach_dev == NULL))
1268 		return -ENODEV;
1269 
1270 	ret = domain->ops->attach_dev(domain, dev);
1271 	if (!ret)
1272 		trace_attach_device_to_domain(dev);
1273 	return ret;
1274 }
1275 
1276 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1277 {
1278 	struct iommu_group *group;
1279 	int ret;
1280 
1281 	group = iommu_group_get(dev);
1282 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1283 	if (group == NULL)
1284 		return __iommu_attach_device(domain, dev);
1285 
1286 	/*
1287 	 * We have a group - lock it to make sure the device-count doesn't
1288 	 * change while we are attaching
1289 	 */
1290 	mutex_lock(&group->mutex);
1291 	ret = -EINVAL;
1292 	if (iommu_group_device_count(group) != 1)
1293 		goto out_unlock;
1294 
1295 	ret = __iommu_attach_group(domain, group);
1296 
1297 out_unlock:
1298 	mutex_unlock(&group->mutex);
1299 	iommu_group_put(group);
1300 
1301 	return ret;
1302 }
1303 EXPORT_SYMBOL_GPL(iommu_attach_device);
1304 
1305 static void __iommu_detach_device(struct iommu_domain *domain,
1306 				  struct device *dev)
1307 {
1308 	if (unlikely(domain->ops->detach_dev == NULL))
1309 		return;
1310 
1311 	domain->ops->detach_dev(domain, dev);
1312 	trace_detach_device_from_domain(dev);
1313 }
1314 
1315 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1316 {
1317 	struct iommu_group *group;
1318 
1319 	group = iommu_group_get(dev);
1320 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1321 	if (group == NULL)
1322 		return __iommu_detach_device(domain, dev);
1323 
1324 	mutex_lock(&group->mutex);
1325 	if (iommu_group_device_count(group) != 1) {
1326 		WARN_ON(1);
1327 		goto out_unlock;
1328 	}
1329 
1330 	__iommu_detach_group(domain, group);
1331 
1332 out_unlock:
1333 	mutex_unlock(&group->mutex);
1334 	iommu_group_put(group);
1335 }
1336 EXPORT_SYMBOL_GPL(iommu_detach_device);
1337 
1338 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1339 {
1340 	struct iommu_domain *domain;
1341 	struct iommu_group *group;
1342 
1343 	group = iommu_group_get(dev);
1344 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1345 	if (group == NULL)
1346 		return NULL;
1347 
1348 	domain = group->domain;
1349 
1350 	iommu_group_put(group);
1351 
1352 	return domain;
1353 }
1354 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1355 
1356 /*
1357  * IOMMU groups are really the natrual working unit of the IOMMU, but
1358  * the IOMMU API works on domains and devices.  Bridge that gap by
1359  * iterating over the devices in a group.  Ideally we'd have a single
1360  * device which represents the requestor ID of the group, but we also
1361  * allow IOMMU drivers to create policy defined minimum sets, where
1362  * the physical hardware may be able to distiguish members, but we
1363  * wish to group them at a higher level (ex. untrusted multi-function
1364  * PCI devices).  Thus we attach each device.
1365  */
1366 static int iommu_group_do_attach_device(struct device *dev, void *data)
1367 {
1368 	struct iommu_domain *domain = data;
1369 
1370 	return __iommu_attach_device(domain, dev);
1371 }
1372 
1373 static int __iommu_attach_group(struct iommu_domain *domain,
1374 				struct iommu_group *group)
1375 {
1376 	int ret;
1377 
1378 	if (group->default_domain && group->domain != group->default_domain)
1379 		return -EBUSY;
1380 
1381 	ret = __iommu_group_for_each_dev(group, domain,
1382 					 iommu_group_do_attach_device);
1383 	if (ret == 0)
1384 		group->domain = domain;
1385 
1386 	return ret;
1387 }
1388 
1389 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1390 {
1391 	int ret;
1392 
1393 	mutex_lock(&group->mutex);
1394 	ret = __iommu_attach_group(domain, group);
1395 	mutex_unlock(&group->mutex);
1396 
1397 	return ret;
1398 }
1399 EXPORT_SYMBOL_GPL(iommu_attach_group);
1400 
1401 static int iommu_group_do_detach_device(struct device *dev, void *data)
1402 {
1403 	struct iommu_domain *domain = data;
1404 
1405 	__iommu_detach_device(domain, dev);
1406 
1407 	return 0;
1408 }
1409 
1410 static void __iommu_detach_group(struct iommu_domain *domain,
1411 				 struct iommu_group *group)
1412 {
1413 	int ret;
1414 
1415 	if (!group->default_domain) {
1416 		__iommu_group_for_each_dev(group, domain,
1417 					   iommu_group_do_detach_device);
1418 		group->domain = NULL;
1419 		return;
1420 	}
1421 
1422 	if (group->domain == group->default_domain)
1423 		return;
1424 
1425 	/* Detach by re-attaching to the default domain */
1426 	ret = __iommu_group_for_each_dev(group, group->default_domain,
1427 					 iommu_group_do_attach_device);
1428 	if (ret != 0)
1429 		WARN_ON(1);
1430 	else
1431 		group->domain = group->default_domain;
1432 }
1433 
1434 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1435 {
1436 	mutex_lock(&group->mutex);
1437 	__iommu_detach_group(domain, group);
1438 	mutex_unlock(&group->mutex);
1439 }
1440 EXPORT_SYMBOL_GPL(iommu_detach_group);
1441 
1442 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1443 {
1444 	if (unlikely(domain->ops->iova_to_phys == NULL))
1445 		return 0;
1446 
1447 	return domain->ops->iova_to_phys(domain, iova);
1448 }
1449 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1450 
1451 static size_t iommu_pgsize(struct iommu_domain *domain,
1452 			   unsigned long addr_merge, size_t size)
1453 {
1454 	unsigned int pgsize_idx;
1455 	size_t pgsize;
1456 
1457 	/* Max page size that still fits into 'size' */
1458 	pgsize_idx = __fls(size);
1459 
1460 	/* need to consider alignment requirements ? */
1461 	if (likely(addr_merge)) {
1462 		/* Max page size allowed by address */
1463 		unsigned int align_pgsize_idx = __ffs(addr_merge);
1464 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1465 	}
1466 
1467 	/* build a mask of acceptable page sizes */
1468 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
1469 
1470 	/* throw away page sizes not supported by the hardware */
1471 	pgsize &= domain->pgsize_bitmap;
1472 
1473 	/* make sure we're still sane */
1474 	BUG_ON(!pgsize);
1475 
1476 	/* pick the biggest page */
1477 	pgsize_idx = __fls(pgsize);
1478 	pgsize = 1UL << pgsize_idx;
1479 
1480 	return pgsize;
1481 }
1482 
1483 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1484 	      phys_addr_t paddr, size_t size, int prot)
1485 {
1486 	unsigned long orig_iova = iova;
1487 	unsigned int min_pagesz;
1488 	size_t orig_size = size;
1489 	phys_addr_t orig_paddr = paddr;
1490 	int ret = 0;
1491 
1492 	if (unlikely(domain->ops->map == NULL ||
1493 		     domain->pgsize_bitmap == 0UL))
1494 		return -ENODEV;
1495 
1496 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1497 		return -EINVAL;
1498 
1499 	/* find out the minimum page size supported */
1500 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1501 
1502 	/*
1503 	 * both the virtual address and the physical one, as well as
1504 	 * the size of the mapping, must be aligned (at least) to the
1505 	 * size of the smallest page supported by the hardware
1506 	 */
1507 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1508 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1509 		       iova, &paddr, size, min_pagesz);
1510 		return -EINVAL;
1511 	}
1512 
1513 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1514 
1515 	while (size) {
1516 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1517 
1518 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1519 			 iova, &paddr, pgsize);
1520 
1521 		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1522 		if (ret)
1523 			break;
1524 
1525 		iova += pgsize;
1526 		paddr += pgsize;
1527 		size -= pgsize;
1528 	}
1529 
1530 	/* unroll mapping in case something went wrong */
1531 	if (ret)
1532 		iommu_unmap(domain, orig_iova, orig_size - size);
1533 	else
1534 		trace_map(orig_iova, orig_paddr, orig_size);
1535 
1536 	return ret;
1537 }
1538 EXPORT_SYMBOL_GPL(iommu_map);
1539 
1540 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1541 {
1542 	size_t unmapped_page, unmapped = 0;
1543 	unsigned int min_pagesz;
1544 	unsigned long orig_iova = iova;
1545 
1546 	if (unlikely(domain->ops->unmap == NULL ||
1547 		     domain->pgsize_bitmap == 0UL))
1548 		return -ENODEV;
1549 
1550 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1551 		return -EINVAL;
1552 
1553 	/* find out the minimum page size supported */
1554 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1555 
1556 	/*
1557 	 * The virtual address, as well as the size of the mapping, must be
1558 	 * aligned (at least) to the size of the smallest page supported
1559 	 * by the hardware
1560 	 */
1561 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
1562 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1563 		       iova, size, min_pagesz);
1564 		return -EINVAL;
1565 	}
1566 
1567 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1568 
1569 	/*
1570 	 * Keep iterating until we either unmap 'size' bytes (or more)
1571 	 * or we hit an area that isn't mapped.
1572 	 */
1573 	while (unmapped < size) {
1574 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1575 
1576 		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
1577 		if (!unmapped_page)
1578 			break;
1579 
1580 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1581 			 iova, unmapped_page);
1582 
1583 		iova += unmapped_page;
1584 		unmapped += unmapped_page;
1585 	}
1586 
1587 	trace_unmap(orig_iova, size, unmapped);
1588 	return unmapped;
1589 }
1590 EXPORT_SYMBOL_GPL(iommu_unmap);
1591 
1592 size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1593 			 struct scatterlist *sg, unsigned int nents, int prot)
1594 {
1595 	struct scatterlist *s;
1596 	size_t mapped = 0;
1597 	unsigned int i, min_pagesz;
1598 	int ret;
1599 
1600 	if (unlikely(domain->pgsize_bitmap == 0UL))
1601 		return 0;
1602 
1603 	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1604 
1605 	for_each_sg(sg, s, nents, i) {
1606 		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1607 
1608 		/*
1609 		 * We are mapping on IOMMU page boundaries, so offset within
1610 		 * the page must be 0. However, the IOMMU may support pages
1611 		 * smaller than PAGE_SIZE, so s->offset may still represent
1612 		 * an offset of that boundary within the CPU page.
1613 		 */
1614 		if (!IS_ALIGNED(s->offset, min_pagesz))
1615 			goto out_err;
1616 
1617 		ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1618 		if (ret)
1619 			goto out_err;
1620 
1621 		mapped += s->length;
1622 	}
1623 
1624 	return mapped;
1625 
1626 out_err:
1627 	/* undo mappings already done */
1628 	iommu_unmap(domain, iova, mapped);
1629 
1630 	return 0;
1631 
1632 }
1633 EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1634 
1635 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1636 			       phys_addr_t paddr, u64 size, int prot)
1637 {
1638 	if (unlikely(domain->ops->domain_window_enable == NULL))
1639 		return -ENODEV;
1640 
1641 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1642 						 prot);
1643 }
1644 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1645 
1646 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1647 {
1648 	if (unlikely(domain->ops->domain_window_disable == NULL))
1649 		return;
1650 
1651 	return domain->ops->domain_window_disable(domain, wnd_nr);
1652 }
1653 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1654 
1655 static int __init iommu_init(void)
1656 {
1657 	iommu_group_kset = kset_create_and_add("iommu_groups",
1658 					       NULL, kernel_kobj);
1659 	BUG_ON(!iommu_group_kset);
1660 
1661 	return 0;
1662 }
1663 core_initcall(iommu_init);
1664 
1665 int iommu_domain_get_attr(struct iommu_domain *domain,
1666 			  enum iommu_attr attr, void *data)
1667 {
1668 	struct iommu_domain_geometry *geometry;
1669 	bool *paging;
1670 	int ret = 0;
1671 	u32 *count;
1672 
1673 	switch (attr) {
1674 	case DOMAIN_ATTR_GEOMETRY:
1675 		geometry  = data;
1676 		*geometry = domain->geometry;
1677 
1678 		break;
1679 	case DOMAIN_ATTR_PAGING:
1680 		paging  = data;
1681 		*paging = (domain->pgsize_bitmap != 0UL);
1682 		break;
1683 	case DOMAIN_ATTR_WINDOWS:
1684 		count = data;
1685 
1686 		if (domain->ops->domain_get_windows != NULL)
1687 			*count = domain->ops->domain_get_windows(domain);
1688 		else
1689 			ret = -ENODEV;
1690 
1691 		break;
1692 	default:
1693 		if (!domain->ops->domain_get_attr)
1694 			return -EINVAL;
1695 
1696 		ret = domain->ops->domain_get_attr(domain, attr, data);
1697 	}
1698 
1699 	return ret;
1700 }
1701 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1702 
1703 int iommu_domain_set_attr(struct iommu_domain *domain,
1704 			  enum iommu_attr attr, void *data)
1705 {
1706 	int ret = 0;
1707 	u32 *count;
1708 
1709 	switch (attr) {
1710 	case DOMAIN_ATTR_WINDOWS:
1711 		count = data;
1712 
1713 		if (domain->ops->domain_set_windows != NULL)
1714 			ret = domain->ops->domain_set_windows(domain, *count);
1715 		else
1716 			ret = -ENODEV;
1717 
1718 		break;
1719 	default:
1720 		if (domain->ops->domain_set_attr == NULL)
1721 			return -EINVAL;
1722 
1723 		ret = domain->ops->domain_set_attr(domain, attr, data);
1724 	}
1725 
1726 	return ret;
1727 }
1728 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1729 
1730 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
1731 {
1732 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1733 
1734 	if (ops && ops->get_resv_regions)
1735 		ops->get_resv_regions(dev, list);
1736 }
1737 
1738 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1739 {
1740 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1741 
1742 	if (ops && ops->put_resv_regions)
1743 		ops->put_resv_regions(dev, list);
1744 }
1745 
1746 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1747 						  size_t length, int prot,
1748 						  enum iommu_resv_type type)
1749 {
1750 	struct iommu_resv_region *region;
1751 
1752 	region = kzalloc(sizeof(*region), GFP_KERNEL);
1753 	if (!region)
1754 		return NULL;
1755 
1756 	INIT_LIST_HEAD(&region->list);
1757 	region->start = start;
1758 	region->length = length;
1759 	region->prot = prot;
1760 	region->type = type;
1761 	return region;
1762 }
1763 
1764 /* Request that a device is direct mapped by the IOMMU */
1765 int iommu_request_dm_for_dev(struct device *dev)
1766 {
1767 	struct iommu_domain *dm_domain;
1768 	struct iommu_group *group;
1769 	int ret;
1770 
1771 	/* Device must already be in a group before calling this function */
1772 	group = iommu_group_get_for_dev(dev);
1773 	if (IS_ERR(group))
1774 		return PTR_ERR(group);
1775 
1776 	mutex_lock(&group->mutex);
1777 
1778 	/* Check if the default domain is already direct mapped */
1779 	ret = 0;
1780 	if (group->default_domain &&
1781 	    group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
1782 		goto out;
1783 
1784 	/* Don't change mappings of existing devices */
1785 	ret = -EBUSY;
1786 	if (iommu_group_device_count(group) != 1)
1787 		goto out;
1788 
1789 	/* Allocate a direct mapped domain */
1790 	ret = -ENOMEM;
1791 	dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
1792 	if (!dm_domain)
1793 		goto out;
1794 
1795 	/* Attach the device to the domain */
1796 	ret = __iommu_attach_group(dm_domain, group);
1797 	if (ret) {
1798 		iommu_domain_free(dm_domain);
1799 		goto out;
1800 	}
1801 
1802 	/* Make the direct mapped domain the default for this group */
1803 	if (group->default_domain)
1804 		iommu_domain_free(group->default_domain);
1805 	group->default_domain = dm_domain;
1806 
1807 	pr_info("Using direct mapping for device %s\n", dev_name(dev));
1808 
1809 	ret = 0;
1810 out:
1811 	mutex_unlock(&group->mutex);
1812 	iommu_group_put(group);
1813 
1814 	return ret;
1815 }
1816 
1817 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
1818 {
1819 	const struct iommu_ops *ops = NULL;
1820 	struct iommu_device *iommu;
1821 
1822 	spin_lock(&iommu_device_lock);
1823 	list_for_each_entry(iommu, &iommu_device_list, list)
1824 		if (iommu->fwnode == fwnode) {
1825 			ops = iommu->ops;
1826 			break;
1827 		}
1828 	spin_unlock(&iommu_device_lock);
1829 	return ops;
1830 }
1831 
1832 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
1833 		      const struct iommu_ops *ops)
1834 {
1835 	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1836 
1837 	if (fwspec)
1838 		return ops == fwspec->ops ? 0 : -EINVAL;
1839 
1840 	fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
1841 	if (!fwspec)
1842 		return -ENOMEM;
1843 
1844 	of_node_get(to_of_node(iommu_fwnode));
1845 	fwspec->iommu_fwnode = iommu_fwnode;
1846 	fwspec->ops = ops;
1847 	dev->iommu_fwspec = fwspec;
1848 	return 0;
1849 }
1850 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
1851 
1852 void iommu_fwspec_free(struct device *dev)
1853 {
1854 	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1855 
1856 	if (fwspec) {
1857 		fwnode_handle_put(fwspec->iommu_fwnode);
1858 		kfree(fwspec);
1859 		dev->iommu_fwspec = NULL;
1860 	}
1861 }
1862 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
1863 
1864 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
1865 {
1866 	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1867 	size_t size;
1868 	int i;
1869 
1870 	if (!fwspec)
1871 		return -EINVAL;
1872 
1873 	size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
1874 	if (size > sizeof(*fwspec)) {
1875 		fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
1876 		if (!fwspec)
1877 			return -ENOMEM;
1878 
1879 		dev->iommu_fwspec = fwspec;
1880 	}
1881 
1882 	for (i = 0; i < num_ids; i++)
1883 		fwspec->ids[fwspec->num_ids + i] = ids[i];
1884 
1885 	fwspec->num_ids += num_ids;
1886 	return 0;
1887 }
1888 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
1889