xref: /openbmc/linux/drivers/iommu/iommu.c (revision c0e297dc)
1 /*
2  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3  * Author: Joerg Roedel <jroedel@suse.de>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
17  */
18 
19 #define pr_fmt(fmt)    "iommu: " fmt
20 
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 #include <linux/idr.h>
30 #include <linux/notifier.h>
31 #include <linux/err.h>
32 #include <linux/pci.h>
33 #include <linux/bitops.h>
34 #include <trace/events/iommu.h>
35 
36 static struct kset *iommu_group_kset;
37 static struct ida iommu_group_ida;
38 static struct mutex iommu_group_mutex;
39 
40 struct iommu_callback_data {
41 	const struct iommu_ops *ops;
42 };
43 
44 struct iommu_group {
45 	struct kobject kobj;
46 	struct kobject *devices_kobj;
47 	struct list_head devices;
48 	struct mutex mutex;
49 	struct blocking_notifier_head notifier;
50 	void *iommu_data;
51 	void (*iommu_data_release)(void *iommu_data);
52 	char *name;
53 	int id;
54 	struct iommu_domain *default_domain;
55 	struct iommu_domain *domain;
56 };
57 
58 struct iommu_device {
59 	struct list_head list;
60 	struct device *dev;
61 	char *name;
62 };
63 
64 struct iommu_group_attribute {
65 	struct attribute attr;
66 	ssize_t (*show)(struct iommu_group *group, char *buf);
67 	ssize_t (*store)(struct iommu_group *group,
68 			 const char *buf, size_t count);
69 };
70 
71 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
72 struct iommu_group_attribute iommu_group_attr_##_name =		\
73 	__ATTR(_name, _mode, _show, _store)
74 
75 #define to_iommu_group_attr(_attr)	\
76 	container_of(_attr, struct iommu_group_attribute, attr)
77 #define to_iommu_group(_kobj)		\
78 	container_of(_kobj, struct iommu_group, kobj)
79 
80 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
81 						 unsigned type);
82 static int __iommu_attach_device(struct iommu_domain *domain,
83 				 struct device *dev);
84 static int __iommu_attach_group(struct iommu_domain *domain,
85 				struct iommu_group *group);
86 static void __iommu_detach_group(struct iommu_domain *domain,
87 				 struct iommu_group *group);
88 
89 static ssize_t iommu_group_attr_show(struct kobject *kobj,
90 				     struct attribute *__attr, char *buf)
91 {
92 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
93 	struct iommu_group *group = to_iommu_group(kobj);
94 	ssize_t ret = -EIO;
95 
96 	if (attr->show)
97 		ret = attr->show(group, buf);
98 	return ret;
99 }
100 
101 static ssize_t iommu_group_attr_store(struct kobject *kobj,
102 				      struct attribute *__attr,
103 				      const char *buf, size_t count)
104 {
105 	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
106 	struct iommu_group *group = to_iommu_group(kobj);
107 	ssize_t ret = -EIO;
108 
109 	if (attr->store)
110 		ret = attr->store(group, buf, count);
111 	return ret;
112 }
113 
114 static const struct sysfs_ops iommu_group_sysfs_ops = {
115 	.show = iommu_group_attr_show,
116 	.store = iommu_group_attr_store,
117 };
118 
119 static int iommu_group_create_file(struct iommu_group *group,
120 				   struct iommu_group_attribute *attr)
121 {
122 	return sysfs_create_file(&group->kobj, &attr->attr);
123 }
124 
125 static void iommu_group_remove_file(struct iommu_group *group,
126 				    struct iommu_group_attribute *attr)
127 {
128 	sysfs_remove_file(&group->kobj, &attr->attr);
129 }
130 
131 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
132 {
133 	return sprintf(buf, "%s\n", group->name);
134 }
135 
136 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
137 
138 static void iommu_group_release(struct kobject *kobj)
139 {
140 	struct iommu_group *group = to_iommu_group(kobj);
141 
142 	pr_debug("Releasing group %d\n", group->id);
143 
144 	if (group->iommu_data_release)
145 		group->iommu_data_release(group->iommu_data);
146 
147 	mutex_lock(&iommu_group_mutex);
148 	ida_remove(&iommu_group_ida, group->id);
149 	mutex_unlock(&iommu_group_mutex);
150 
151 	if (group->default_domain)
152 		iommu_domain_free(group->default_domain);
153 
154 	kfree(group->name);
155 	kfree(group);
156 }
157 
158 static struct kobj_type iommu_group_ktype = {
159 	.sysfs_ops = &iommu_group_sysfs_ops,
160 	.release = iommu_group_release,
161 };
162 
163 /**
164  * iommu_group_alloc - Allocate a new group
165  * @name: Optional name to associate with group, visible in sysfs
166  *
167  * This function is called by an iommu driver to allocate a new iommu
168  * group.  The iommu group represents the minimum granularity of the iommu.
169  * Upon successful return, the caller holds a reference to the supplied
170  * group in order to hold the group until devices are added.  Use
171  * iommu_group_put() to release this extra reference count, allowing the
172  * group to be automatically reclaimed once it has no devices or external
173  * references.
174  */
175 struct iommu_group *iommu_group_alloc(void)
176 {
177 	struct iommu_group *group;
178 	int ret;
179 
180 	group = kzalloc(sizeof(*group), GFP_KERNEL);
181 	if (!group)
182 		return ERR_PTR(-ENOMEM);
183 
184 	group->kobj.kset = iommu_group_kset;
185 	mutex_init(&group->mutex);
186 	INIT_LIST_HEAD(&group->devices);
187 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
188 
189 	mutex_lock(&iommu_group_mutex);
190 
191 again:
192 	if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
193 		kfree(group);
194 		mutex_unlock(&iommu_group_mutex);
195 		return ERR_PTR(-ENOMEM);
196 	}
197 
198 	if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
199 		goto again;
200 
201 	mutex_unlock(&iommu_group_mutex);
202 
203 	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
204 				   NULL, "%d", group->id);
205 	if (ret) {
206 		mutex_lock(&iommu_group_mutex);
207 		ida_remove(&iommu_group_ida, group->id);
208 		mutex_unlock(&iommu_group_mutex);
209 		kfree(group);
210 		return ERR_PTR(ret);
211 	}
212 
213 	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
214 	if (!group->devices_kobj) {
215 		kobject_put(&group->kobj); /* triggers .release & free */
216 		return ERR_PTR(-ENOMEM);
217 	}
218 
219 	/*
220 	 * The devices_kobj holds a reference on the group kobject, so
221 	 * as long as that exists so will the group.  We can therefore
222 	 * use the devices_kobj for reference counting.
223 	 */
224 	kobject_put(&group->kobj);
225 
226 	pr_debug("Allocated group %d\n", group->id);
227 
228 	return group;
229 }
230 EXPORT_SYMBOL_GPL(iommu_group_alloc);
231 
232 struct iommu_group *iommu_group_get_by_id(int id)
233 {
234 	struct kobject *group_kobj;
235 	struct iommu_group *group;
236 	const char *name;
237 
238 	if (!iommu_group_kset)
239 		return NULL;
240 
241 	name = kasprintf(GFP_KERNEL, "%d", id);
242 	if (!name)
243 		return NULL;
244 
245 	group_kobj = kset_find_obj(iommu_group_kset, name);
246 	kfree(name);
247 
248 	if (!group_kobj)
249 		return NULL;
250 
251 	group = container_of(group_kobj, struct iommu_group, kobj);
252 	BUG_ON(group->id != id);
253 
254 	kobject_get(group->devices_kobj);
255 	kobject_put(&group->kobj);
256 
257 	return group;
258 }
259 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
260 
261 /**
262  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
263  * @group: the group
264  *
265  * iommu drivers can store data in the group for use when doing iommu
266  * operations.  This function provides a way to retrieve it.  Caller
267  * should hold a group reference.
268  */
269 void *iommu_group_get_iommudata(struct iommu_group *group)
270 {
271 	return group->iommu_data;
272 }
273 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
274 
275 /**
276  * iommu_group_set_iommudata - set iommu_data for a group
277  * @group: the group
278  * @iommu_data: new data
279  * @release: release function for iommu_data
280  *
281  * iommu drivers can store data in the group for use when doing iommu
282  * operations.  This function provides a way to set the data after
283  * the group has been allocated.  Caller should hold a group reference.
284  */
285 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
286 			       void (*release)(void *iommu_data))
287 {
288 	group->iommu_data = iommu_data;
289 	group->iommu_data_release = release;
290 }
291 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
292 
293 /**
294  * iommu_group_set_name - set name for a group
295  * @group: the group
296  * @name: name
297  *
298  * Allow iommu driver to set a name for a group.  When set it will
299  * appear in a name attribute file under the group in sysfs.
300  */
301 int iommu_group_set_name(struct iommu_group *group, const char *name)
302 {
303 	int ret;
304 
305 	if (group->name) {
306 		iommu_group_remove_file(group, &iommu_group_attr_name);
307 		kfree(group->name);
308 		group->name = NULL;
309 		if (!name)
310 			return 0;
311 	}
312 
313 	group->name = kstrdup(name, GFP_KERNEL);
314 	if (!group->name)
315 		return -ENOMEM;
316 
317 	ret = iommu_group_create_file(group, &iommu_group_attr_name);
318 	if (ret) {
319 		kfree(group->name);
320 		group->name = NULL;
321 		return ret;
322 	}
323 
324 	return 0;
325 }
326 EXPORT_SYMBOL_GPL(iommu_group_set_name);
327 
328 static int iommu_group_create_direct_mappings(struct iommu_group *group,
329 					      struct device *dev)
330 {
331 	struct iommu_domain *domain = group->default_domain;
332 	struct iommu_dm_region *entry;
333 	struct list_head mappings;
334 	unsigned long pg_size;
335 	int ret = 0;
336 
337 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
338 		return 0;
339 
340 	BUG_ON(!domain->ops->pgsize_bitmap);
341 
342 	pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
343 	INIT_LIST_HEAD(&mappings);
344 
345 	iommu_get_dm_regions(dev, &mappings);
346 
347 	/* We need to consider overlapping regions for different devices */
348 	list_for_each_entry(entry, &mappings, list) {
349 		dma_addr_t start, end, addr;
350 
351 		start = ALIGN(entry->start, pg_size);
352 		end   = ALIGN(entry->start + entry->length, pg_size);
353 
354 		for (addr = start; addr < end; addr += pg_size) {
355 			phys_addr_t phys_addr;
356 
357 			phys_addr = iommu_iova_to_phys(domain, addr);
358 			if (phys_addr)
359 				continue;
360 
361 			ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
362 			if (ret)
363 				goto out;
364 		}
365 
366 	}
367 
368 out:
369 	iommu_put_dm_regions(dev, &mappings);
370 
371 	return ret;
372 }
373 
374 /**
375  * iommu_group_add_device - add a device to an iommu group
376  * @group: the group into which to add the device (reference should be held)
377  * @dev: the device
378  *
379  * This function is called by an iommu driver to add a device into a
380  * group.  Adding a device increments the group reference count.
381  */
382 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
383 {
384 	int ret, i = 0;
385 	struct iommu_device *device;
386 
387 	device = kzalloc(sizeof(*device), GFP_KERNEL);
388 	if (!device)
389 		return -ENOMEM;
390 
391 	device->dev = dev;
392 
393 	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
394 	if (ret) {
395 		kfree(device);
396 		return ret;
397 	}
398 
399 	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
400 rename:
401 	if (!device->name) {
402 		sysfs_remove_link(&dev->kobj, "iommu_group");
403 		kfree(device);
404 		return -ENOMEM;
405 	}
406 
407 	ret = sysfs_create_link_nowarn(group->devices_kobj,
408 				       &dev->kobj, device->name);
409 	if (ret) {
410 		kfree(device->name);
411 		if (ret == -EEXIST && i >= 0) {
412 			/*
413 			 * Account for the slim chance of collision
414 			 * and append an instance to the name.
415 			 */
416 			device->name = kasprintf(GFP_KERNEL, "%s.%d",
417 						 kobject_name(&dev->kobj), i++);
418 			goto rename;
419 		}
420 
421 		sysfs_remove_link(&dev->kobj, "iommu_group");
422 		kfree(device);
423 		return ret;
424 	}
425 
426 	kobject_get(group->devices_kobj);
427 
428 	dev->iommu_group = group;
429 
430 	iommu_group_create_direct_mappings(group, dev);
431 
432 	mutex_lock(&group->mutex);
433 	list_add_tail(&device->list, &group->devices);
434 	if (group->domain)
435 		__iommu_attach_device(group->domain, dev);
436 	mutex_unlock(&group->mutex);
437 
438 	/* Notify any listeners about change to group. */
439 	blocking_notifier_call_chain(&group->notifier,
440 				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
441 
442 	trace_add_device_to_group(group->id, dev);
443 
444 	pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
445 
446 	return 0;
447 }
448 EXPORT_SYMBOL_GPL(iommu_group_add_device);
449 
450 /**
451  * iommu_group_remove_device - remove a device from it's current group
452  * @dev: device to be removed
453  *
454  * This function is called by an iommu driver to remove the device from
455  * it's current group.  This decrements the iommu group reference count.
456  */
457 void iommu_group_remove_device(struct device *dev)
458 {
459 	struct iommu_group *group = dev->iommu_group;
460 	struct iommu_device *tmp_device, *device = NULL;
461 
462 	pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
463 
464 	/* Pre-notify listeners that a device is being removed. */
465 	blocking_notifier_call_chain(&group->notifier,
466 				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
467 
468 	mutex_lock(&group->mutex);
469 	list_for_each_entry(tmp_device, &group->devices, list) {
470 		if (tmp_device->dev == dev) {
471 			device = tmp_device;
472 			list_del(&device->list);
473 			break;
474 		}
475 	}
476 	mutex_unlock(&group->mutex);
477 
478 	if (!device)
479 		return;
480 
481 	sysfs_remove_link(group->devices_kobj, device->name);
482 	sysfs_remove_link(&dev->kobj, "iommu_group");
483 
484 	trace_remove_device_from_group(group->id, dev);
485 
486 	kfree(device->name);
487 	kfree(device);
488 	dev->iommu_group = NULL;
489 	kobject_put(group->devices_kobj);
490 }
491 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
492 
493 static int iommu_group_device_count(struct iommu_group *group)
494 {
495 	struct iommu_device *entry;
496 	int ret = 0;
497 
498 	list_for_each_entry(entry, &group->devices, list)
499 		ret++;
500 
501 	return ret;
502 }
503 
504 /**
505  * iommu_group_for_each_dev - iterate over each device in the group
506  * @group: the group
507  * @data: caller opaque data to be passed to callback function
508  * @fn: caller supplied callback function
509  *
510  * This function is called by group users to iterate over group devices.
511  * Callers should hold a reference count to the group during callback.
512  * The group->mutex is held across callbacks, which will block calls to
513  * iommu_group_add/remove_device.
514  */
515 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
516 				      int (*fn)(struct device *, void *))
517 {
518 	struct iommu_device *device;
519 	int ret = 0;
520 
521 	list_for_each_entry(device, &group->devices, list) {
522 		ret = fn(device->dev, data);
523 		if (ret)
524 			break;
525 	}
526 	return ret;
527 }
528 
529 
530 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
531 			     int (*fn)(struct device *, void *))
532 {
533 	int ret;
534 
535 	mutex_lock(&group->mutex);
536 	ret = __iommu_group_for_each_dev(group, data, fn);
537 	mutex_unlock(&group->mutex);
538 
539 	return ret;
540 }
541 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
542 
543 /**
544  * iommu_group_get - Return the group for a device and increment reference
545  * @dev: get the group that this device belongs to
546  *
547  * This function is called by iommu drivers and users to get the group
548  * for the specified device.  If found, the group is returned and the group
549  * reference in incremented, else NULL.
550  */
551 struct iommu_group *iommu_group_get(struct device *dev)
552 {
553 	struct iommu_group *group = dev->iommu_group;
554 
555 	if (group)
556 		kobject_get(group->devices_kobj);
557 
558 	return group;
559 }
560 EXPORT_SYMBOL_GPL(iommu_group_get);
561 
562 /**
563  * iommu_group_put - Decrement group reference
564  * @group: the group to use
565  *
566  * This function is called by iommu drivers and users to release the
567  * iommu group.  Once the reference count is zero, the group is released.
568  */
569 void iommu_group_put(struct iommu_group *group)
570 {
571 	if (group)
572 		kobject_put(group->devices_kobj);
573 }
574 EXPORT_SYMBOL_GPL(iommu_group_put);
575 
576 /**
577  * iommu_group_register_notifier - Register a notifier for group changes
578  * @group: the group to watch
579  * @nb: notifier block to signal
580  *
581  * This function allows iommu group users to track changes in a group.
582  * See include/linux/iommu.h for actions sent via this notifier.  Caller
583  * should hold a reference to the group throughout notifier registration.
584  */
585 int iommu_group_register_notifier(struct iommu_group *group,
586 				  struct notifier_block *nb)
587 {
588 	return blocking_notifier_chain_register(&group->notifier, nb);
589 }
590 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
591 
592 /**
593  * iommu_group_unregister_notifier - Unregister a notifier
594  * @group: the group to watch
595  * @nb: notifier block to signal
596  *
597  * Unregister a previously registered group notifier block.
598  */
599 int iommu_group_unregister_notifier(struct iommu_group *group,
600 				    struct notifier_block *nb)
601 {
602 	return blocking_notifier_chain_unregister(&group->notifier, nb);
603 }
604 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
605 
606 /**
607  * iommu_group_id - Return ID for a group
608  * @group: the group to ID
609  *
610  * Return the unique ID for the group matching the sysfs group number.
611  */
612 int iommu_group_id(struct iommu_group *group)
613 {
614 	return group->id;
615 }
616 EXPORT_SYMBOL_GPL(iommu_group_id);
617 
618 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
619 					       unsigned long *devfns);
620 
621 /*
622  * To consider a PCI device isolated, we require ACS to support Source
623  * Validation, Request Redirection, Completer Redirection, and Upstream
624  * Forwarding.  This effectively means that devices cannot spoof their
625  * requester ID, requests and completions cannot be redirected, and all
626  * transactions are forwarded upstream, even as it passes through a
627  * bridge where the target device is downstream.
628  */
629 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
630 
631 /*
632  * For multifunction devices which are not isolated from each other, find
633  * all the other non-isolated functions and look for existing groups.  For
634  * each function, we also need to look for aliases to or from other devices
635  * that may already have a group.
636  */
637 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
638 							unsigned long *devfns)
639 {
640 	struct pci_dev *tmp = NULL;
641 	struct iommu_group *group;
642 
643 	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
644 		return NULL;
645 
646 	for_each_pci_dev(tmp) {
647 		if (tmp == pdev || tmp->bus != pdev->bus ||
648 		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
649 		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
650 			continue;
651 
652 		group = get_pci_alias_group(tmp, devfns);
653 		if (group) {
654 			pci_dev_put(tmp);
655 			return group;
656 		}
657 	}
658 
659 	return NULL;
660 }
661 
662 /*
663  * Look for aliases to or from the given device for exisiting groups.  The
664  * dma_alias_devfn only supports aliases on the same bus, therefore the search
665  * space is quite small (especially since we're really only looking at pcie
666  * device, and therefore only expect multiple slots on the root complex or
667  * downstream switch ports).  It's conceivable though that a pair of
668  * multifunction devices could have aliases between them that would cause a
669  * loop.  To prevent this, we use a bitmap to track where we've been.
670  */
671 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
672 					       unsigned long *devfns)
673 {
674 	struct pci_dev *tmp = NULL;
675 	struct iommu_group *group;
676 
677 	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
678 		return NULL;
679 
680 	group = iommu_group_get(&pdev->dev);
681 	if (group)
682 		return group;
683 
684 	for_each_pci_dev(tmp) {
685 		if (tmp == pdev || tmp->bus != pdev->bus)
686 			continue;
687 
688 		/* We alias them or they alias us */
689 		if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
690 		     pdev->dma_alias_devfn == tmp->devfn) ||
691 		    ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
692 		     tmp->dma_alias_devfn == pdev->devfn)) {
693 
694 			group = get_pci_alias_group(tmp, devfns);
695 			if (group) {
696 				pci_dev_put(tmp);
697 				return group;
698 			}
699 
700 			group = get_pci_function_alias_group(tmp, devfns);
701 			if (group) {
702 				pci_dev_put(tmp);
703 				return group;
704 			}
705 		}
706 	}
707 
708 	return NULL;
709 }
710 
711 struct group_for_pci_data {
712 	struct pci_dev *pdev;
713 	struct iommu_group *group;
714 };
715 
716 /*
717  * DMA alias iterator callback, return the last seen device.  Stop and return
718  * the IOMMU group if we find one along the way.
719  */
720 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
721 {
722 	struct group_for_pci_data *data = opaque;
723 
724 	data->pdev = pdev;
725 	data->group = iommu_group_get(&pdev->dev);
726 
727 	return data->group != NULL;
728 }
729 
730 /*
731  * Use standard PCI bus topology, isolation features, and DMA alias quirks
732  * to find or create an IOMMU group for a device.
733  */
734 static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
735 {
736 	struct group_for_pci_data data;
737 	struct pci_bus *bus;
738 	struct iommu_group *group = NULL;
739 	u64 devfns[4] = { 0 };
740 
741 	/*
742 	 * Find the upstream DMA alias for the device.  A device must not
743 	 * be aliased due to topology in order to have its own IOMMU group.
744 	 * If we find an alias along the way that already belongs to a
745 	 * group, use it.
746 	 */
747 	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
748 		return data.group;
749 
750 	pdev = data.pdev;
751 
752 	/*
753 	 * Continue upstream from the point of minimum IOMMU granularity
754 	 * due to aliases to the point where devices are protected from
755 	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
756 	 * group, use it.
757 	 */
758 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
759 		if (!bus->self)
760 			continue;
761 
762 		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
763 			break;
764 
765 		pdev = bus->self;
766 
767 		group = iommu_group_get(&pdev->dev);
768 		if (group)
769 			return group;
770 	}
771 
772 	/*
773 	 * Look for existing groups on device aliases.  If we alias another
774 	 * device or another device aliases us, use the same group.
775 	 */
776 	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
777 	if (group)
778 		return group;
779 
780 	/*
781 	 * Look for existing groups on non-isolated functions on the same
782 	 * slot and aliases of those funcions, if any.  No need to clear
783 	 * the search bitmap, the tested devfns are still valid.
784 	 */
785 	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
786 	if (group)
787 		return group;
788 
789 	/* No shared group found, allocate new */
790 	group = iommu_group_alloc();
791 	if (IS_ERR(group))
792 		return NULL;
793 
794 	/*
795 	 * Try to allocate a default domain - needs support from the
796 	 * IOMMU driver.
797 	 */
798 	group->default_domain = __iommu_domain_alloc(pdev->dev.bus,
799 						     IOMMU_DOMAIN_DMA);
800 	group->domain = group->default_domain;
801 
802 	return group;
803 }
804 
805 /**
806  * iommu_group_get_for_dev - Find or create the IOMMU group for a device
807  * @dev: target device
808  *
809  * This function is intended to be called by IOMMU drivers and extended to
810  * support common, bus-defined algorithms when determining or creating the
811  * IOMMU group for a device.  On success, the caller will hold a reference
812  * to the returned IOMMU group, which will already include the provided
813  * device.  The reference should be released with iommu_group_put().
814  */
815 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
816 {
817 	struct iommu_group *group;
818 	int ret;
819 
820 	group = iommu_group_get(dev);
821 	if (group)
822 		return group;
823 
824 	if (!dev_is_pci(dev))
825 		return ERR_PTR(-EINVAL);
826 
827 	group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
828 
829 	if (IS_ERR(group))
830 		return group;
831 
832 	ret = iommu_group_add_device(group, dev);
833 	if (ret) {
834 		iommu_group_put(group);
835 		return ERR_PTR(ret);
836 	}
837 
838 	return group;
839 }
840 
841 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
842 {
843 	return group->default_domain;
844 }
845 
846 static int add_iommu_group(struct device *dev, void *data)
847 {
848 	struct iommu_callback_data *cb = data;
849 	const struct iommu_ops *ops = cb->ops;
850 	int ret;
851 
852 	if (!ops->add_device)
853 		return 0;
854 
855 	WARN_ON(dev->iommu_group);
856 
857 	ret = ops->add_device(dev);
858 
859 	/*
860 	 * We ignore -ENODEV errors for now, as they just mean that the
861 	 * device is not translated by an IOMMU. We still care about
862 	 * other errors and fail to initialize when they happen.
863 	 */
864 	if (ret == -ENODEV)
865 		ret = 0;
866 
867 	return ret;
868 }
869 
870 static int remove_iommu_group(struct device *dev, void *data)
871 {
872 	struct iommu_callback_data *cb = data;
873 	const struct iommu_ops *ops = cb->ops;
874 
875 	if (ops->remove_device && dev->iommu_group)
876 		ops->remove_device(dev);
877 
878 	return 0;
879 }
880 
881 static int iommu_bus_notifier(struct notifier_block *nb,
882 			      unsigned long action, void *data)
883 {
884 	struct device *dev = data;
885 	const struct iommu_ops *ops = dev->bus->iommu_ops;
886 	struct iommu_group *group;
887 	unsigned long group_action = 0;
888 
889 	/*
890 	 * ADD/DEL call into iommu driver ops if provided, which may
891 	 * result in ADD/DEL notifiers to group->notifier
892 	 */
893 	if (action == BUS_NOTIFY_ADD_DEVICE) {
894 		if (ops->add_device)
895 			return ops->add_device(dev);
896 	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
897 		if (ops->remove_device && dev->iommu_group) {
898 			ops->remove_device(dev);
899 			return 0;
900 		}
901 	}
902 
903 	/*
904 	 * Remaining BUS_NOTIFYs get filtered and republished to the
905 	 * group, if anyone is listening
906 	 */
907 	group = iommu_group_get(dev);
908 	if (!group)
909 		return 0;
910 
911 	switch (action) {
912 	case BUS_NOTIFY_BIND_DRIVER:
913 		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
914 		break;
915 	case BUS_NOTIFY_BOUND_DRIVER:
916 		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
917 		break;
918 	case BUS_NOTIFY_UNBIND_DRIVER:
919 		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
920 		break;
921 	case BUS_NOTIFY_UNBOUND_DRIVER:
922 		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
923 		break;
924 	}
925 
926 	if (group_action)
927 		blocking_notifier_call_chain(&group->notifier,
928 					     group_action, dev);
929 
930 	iommu_group_put(group);
931 	return 0;
932 }
933 
934 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
935 {
936 	int err;
937 	struct notifier_block *nb;
938 	struct iommu_callback_data cb = {
939 		.ops = ops,
940 	};
941 
942 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
943 	if (!nb)
944 		return -ENOMEM;
945 
946 	nb->notifier_call = iommu_bus_notifier;
947 
948 	err = bus_register_notifier(bus, nb);
949 	if (err)
950 		goto out_free;
951 
952 	err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
953 	if (err)
954 		goto out_err;
955 
956 
957 	return 0;
958 
959 out_err:
960 	/* Clean up */
961 	bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
962 	bus_unregister_notifier(bus, nb);
963 
964 out_free:
965 	kfree(nb);
966 
967 	return err;
968 }
969 
970 /**
971  * bus_set_iommu - set iommu-callbacks for the bus
972  * @bus: bus.
973  * @ops: the callbacks provided by the iommu-driver
974  *
975  * This function is called by an iommu driver to set the iommu methods
976  * used for a particular bus. Drivers for devices on that bus can use
977  * the iommu-api after these ops are registered.
978  * This special function is needed because IOMMUs are usually devices on
979  * the bus itself, so the iommu drivers are not initialized when the bus
980  * is set up. With this function the iommu-driver can set the iommu-ops
981  * afterwards.
982  */
983 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
984 {
985 	int err;
986 
987 	if (bus->iommu_ops != NULL)
988 		return -EBUSY;
989 
990 	bus->iommu_ops = ops;
991 
992 	/* Do IOMMU specific setup for this bus-type */
993 	err = iommu_bus_init(bus, ops);
994 	if (err)
995 		bus->iommu_ops = NULL;
996 
997 	return err;
998 }
999 EXPORT_SYMBOL_GPL(bus_set_iommu);
1000 
1001 bool iommu_present(struct bus_type *bus)
1002 {
1003 	return bus->iommu_ops != NULL;
1004 }
1005 EXPORT_SYMBOL_GPL(iommu_present);
1006 
1007 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1008 {
1009 	if (!bus->iommu_ops || !bus->iommu_ops->capable)
1010 		return false;
1011 
1012 	return bus->iommu_ops->capable(cap);
1013 }
1014 EXPORT_SYMBOL_GPL(iommu_capable);
1015 
1016 /**
1017  * iommu_set_fault_handler() - set a fault handler for an iommu domain
1018  * @domain: iommu domain
1019  * @handler: fault handler
1020  * @token: user data, will be passed back to the fault handler
1021  *
1022  * This function should be used by IOMMU users which want to be notified
1023  * whenever an IOMMU fault happens.
1024  *
1025  * The fault handler itself should return 0 on success, and an appropriate
1026  * error code otherwise.
1027  */
1028 void iommu_set_fault_handler(struct iommu_domain *domain,
1029 					iommu_fault_handler_t handler,
1030 					void *token)
1031 {
1032 	BUG_ON(!domain);
1033 
1034 	domain->handler = handler;
1035 	domain->handler_token = token;
1036 }
1037 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1038 
1039 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1040 						 unsigned type)
1041 {
1042 	struct iommu_domain *domain;
1043 
1044 	if (bus == NULL || bus->iommu_ops == NULL)
1045 		return NULL;
1046 
1047 	domain = bus->iommu_ops->domain_alloc(type);
1048 	if (!domain)
1049 		return NULL;
1050 
1051 	domain->ops  = bus->iommu_ops;
1052 	domain->type = type;
1053 
1054 	return domain;
1055 }
1056 
1057 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1058 {
1059 	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1060 }
1061 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1062 
1063 void iommu_domain_free(struct iommu_domain *domain)
1064 {
1065 	domain->ops->domain_free(domain);
1066 }
1067 EXPORT_SYMBOL_GPL(iommu_domain_free);
1068 
1069 static int __iommu_attach_device(struct iommu_domain *domain,
1070 				 struct device *dev)
1071 {
1072 	int ret;
1073 	if (unlikely(domain->ops->attach_dev == NULL))
1074 		return -ENODEV;
1075 
1076 	ret = domain->ops->attach_dev(domain, dev);
1077 	if (!ret)
1078 		trace_attach_device_to_domain(dev);
1079 	return ret;
1080 }
1081 
1082 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1083 {
1084 	struct iommu_group *group;
1085 	int ret;
1086 
1087 	group = iommu_group_get(dev);
1088 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1089 	if (group == NULL)
1090 		return __iommu_attach_device(domain, dev);
1091 
1092 	/*
1093 	 * We have a group - lock it to make sure the device-count doesn't
1094 	 * change while we are attaching
1095 	 */
1096 	mutex_lock(&group->mutex);
1097 	ret = -EINVAL;
1098 	if (iommu_group_device_count(group) != 1)
1099 		goto out_unlock;
1100 
1101 	ret = __iommu_attach_group(domain, group);
1102 
1103 out_unlock:
1104 	mutex_unlock(&group->mutex);
1105 	iommu_group_put(group);
1106 
1107 	return ret;
1108 }
1109 EXPORT_SYMBOL_GPL(iommu_attach_device);
1110 
1111 static void __iommu_detach_device(struct iommu_domain *domain,
1112 				  struct device *dev)
1113 {
1114 	if (unlikely(domain->ops->detach_dev == NULL))
1115 		return;
1116 
1117 	domain->ops->detach_dev(domain, dev);
1118 	trace_detach_device_from_domain(dev);
1119 }
1120 
1121 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1122 {
1123 	struct iommu_group *group;
1124 
1125 	group = iommu_group_get(dev);
1126 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1127 	if (group == NULL)
1128 		return __iommu_detach_device(domain, dev);
1129 
1130 	mutex_lock(&group->mutex);
1131 	if (iommu_group_device_count(group) != 1) {
1132 		WARN_ON(1);
1133 		goto out_unlock;
1134 	}
1135 
1136 	__iommu_detach_group(domain, group);
1137 
1138 out_unlock:
1139 	mutex_unlock(&group->mutex);
1140 	iommu_group_put(group);
1141 }
1142 EXPORT_SYMBOL_GPL(iommu_detach_device);
1143 
1144 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1145 {
1146 	struct iommu_domain *domain;
1147 	struct iommu_group *group;
1148 
1149 	group = iommu_group_get(dev);
1150 	/* FIXME: Remove this when groups a mandatory for iommu drivers */
1151 	if (group == NULL)
1152 		return NULL;
1153 
1154 	domain = group->domain;
1155 
1156 	iommu_group_put(group);
1157 
1158 	return domain;
1159 }
1160 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1161 
1162 /*
1163  * IOMMU groups are really the natrual working unit of the IOMMU, but
1164  * the IOMMU API works on domains and devices.  Bridge that gap by
1165  * iterating over the devices in a group.  Ideally we'd have a single
1166  * device which represents the requestor ID of the group, but we also
1167  * allow IOMMU drivers to create policy defined minimum sets, where
1168  * the physical hardware may be able to distiguish members, but we
1169  * wish to group them at a higher level (ex. untrusted multi-function
1170  * PCI devices).  Thus we attach each device.
1171  */
1172 static int iommu_group_do_attach_device(struct device *dev, void *data)
1173 {
1174 	struct iommu_domain *domain = data;
1175 
1176 	return __iommu_attach_device(domain, dev);
1177 }
1178 
1179 static int __iommu_attach_group(struct iommu_domain *domain,
1180 				struct iommu_group *group)
1181 {
1182 	int ret;
1183 
1184 	if (group->default_domain && group->domain != group->default_domain)
1185 		return -EBUSY;
1186 
1187 	ret = __iommu_group_for_each_dev(group, domain,
1188 					 iommu_group_do_attach_device);
1189 	if (ret == 0)
1190 		group->domain = domain;
1191 
1192 	return ret;
1193 }
1194 
1195 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1196 {
1197 	int ret;
1198 
1199 	mutex_lock(&group->mutex);
1200 	ret = __iommu_attach_group(domain, group);
1201 	mutex_unlock(&group->mutex);
1202 
1203 	return ret;
1204 }
1205 EXPORT_SYMBOL_GPL(iommu_attach_group);
1206 
1207 static int iommu_group_do_detach_device(struct device *dev, void *data)
1208 {
1209 	struct iommu_domain *domain = data;
1210 
1211 	__iommu_detach_device(domain, dev);
1212 
1213 	return 0;
1214 }
1215 
1216 static void __iommu_detach_group(struct iommu_domain *domain,
1217 				 struct iommu_group *group)
1218 {
1219 	int ret;
1220 
1221 	if (!group->default_domain) {
1222 		__iommu_group_for_each_dev(group, domain,
1223 					   iommu_group_do_detach_device);
1224 		group->domain = NULL;
1225 		return;
1226 	}
1227 
1228 	if (group->domain == group->default_domain)
1229 		return;
1230 
1231 	/* Detach by re-attaching to the default domain */
1232 	ret = __iommu_group_for_each_dev(group, group->default_domain,
1233 					 iommu_group_do_attach_device);
1234 	if (ret != 0)
1235 		WARN_ON(1);
1236 	else
1237 		group->domain = group->default_domain;
1238 }
1239 
1240 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1241 {
1242 	mutex_lock(&group->mutex);
1243 	__iommu_detach_group(domain, group);
1244 	mutex_unlock(&group->mutex);
1245 }
1246 EXPORT_SYMBOL_GPL(iommu_detach_group);
1247 
1248 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1249 {
1250 	if (unlikely(domain->ops->iova_to_phys == NULL))
1251 		return 0;
1252 
1253 	return domain->ops->iova_to_phys(domain, iova);
1254 }
1255 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1256 
1257 static size_t iommu_pgsize(struct iommu_domain *domain,
1258 			   unsigned long addr_merge, size_t size)
1259 {
1260 	unsigned int pgsize_idx;
1261 	size_t pgsize;
1262 
1263 	/* Max page size that still fits into 'size' */
1264 	pgsize_idx = __fls(size);
1265 
1266 	/* need to consider alignment requirements ? */
1267 	if (likely(addr_merge)) {
1268 		/* Max page size allowed by address */
1269 		unsigned int align_pgsize_idx = __ffs(addr_merge);
1270 		pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1271 	}
1272 
1273 	/* build a mask of acceptable page sizes */
1274 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
1275 
1276 	/* throw away page sizes not supported by the hardware */
1277 	pgsize &= domain->ops->pgsize_bitmap;
1278 
1279 	/* make sure we're still sane */
1280 	BUG_ON(!pgsize);
1281 
1282 	/* pick the biggest page */
1283 	pgsize_idx = __fls(pgsize);
1284 	pgsize = 1UL << pgsize_idx;
1285 
1286 	return pgsize;
1287 }
1288 
1289 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1290 	      phys_addr_t paddr, size_t size, int prot)
1291 {
1292 	unsigned long orig_iova = iova;
1293 	unsigned int min_pagesz;
1294 	size_t orig_size = size;
1295 	int ret = 0;
1296 
1297 	if (unlikely(domain->ops->map == NULL ||
1298 		     domain->ops->pgsize_bitmap == 0UL))
1299 		return -ENODEV;
1300 
1301 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1302 		return -EINVAL;
1303 
1304 	/* find out the minimum page size supported */
1305 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1306 
1307 	/*
1308 	 * both the virtual address and the physical one, as well as
1309 	 * the size of the mapping, must be aligned (at least) to the
1310 	 * size of the smallest page supported by the hardware
1311 	 */
1312 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1313 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1314 		       iova, &paddr, size, min_pagesz);
1315 		return -EINVAL;
1316 	}
1317 
1318 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1319 
1320 	while (size) {
1321 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1322 
1323 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1324 			 iova, &paddr, pgsize);
1325 
1326 		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1327 		if (ret)
1328 			break;
1329 
1330 		iova += pgsize;
1331 		paddr += pgsize;
1332 		size -= pgsize;
1333 	}
1334 
1335 	/* unroll mapping in case something went wrong */
1336 	if (ret)
1337 		iommu_unmap(domain, orig_iova, orig_size - size);
1338 	else
1339 		trace_map(orig_iova, paddr, orig_size);
1340 
1341 	return ret;
1342 }
1343 EXPORT_SYMBOL_GPL(iommu_map);
1344 
1345 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1346 {
1347 	size_t unmapped_page, unmapped = 0;
1348 	unsigned int min_pagesz;
1349 	unsigned long orig_iova = iova;
1350 
1351 	if (unlikely(domain->ops->unmap == NULL ||
1352 		     domain->ops->pgsize_bitmap == 0UL))
1353 		return -ENODEV;
1354 
1355 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1356 		return -EINVAL;
1357 
1358 	/* find out the minimum page size supported */
1359 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1360 
1361 	/*
1362 	 * The virtual address, as well as the size of the mapping, must be
1363 	 * aligned (at least) to the size of the smallest page supported
1364 	 * by the hardware
1365 	 */
1366 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
1367 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1368 		       iova, size, min_pagesz);
1369 		return -EINVAL;
1370 	}
1371 
1372 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1373 
1374 	/*
1375 	 * Keep iterating until we either unmap 'size' bytes (or more)
1376 	 * or we hit an area that isn't mapped.
1377 	 */
1378 	while (unmapped < size) {
1379 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1380 
1381 		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
1382 		if (!unmapped_page)
1383 			break;
1384 
1385 		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1386 			 iova, unmapped_page);
1387 
1388 		iova += unmapped_page;
1389 		unmapped += unmapped_page;
1390 	}
1391 
1392 	trace_unmap(orig_iova, size, unmapped);
1393 	return unmapped;
1394 }
1395 EXPORT_SYMBOL_GPL(iommu_unmap);
1396 
1397 size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1398 			 struct scatterlist *sg, unsigned int nents, int prot)
1399 {
1400 	struct scatterlist *s;
1401 	size_t mapped = 0;
1402 	unsigned int i, min_pagesz;
1403 	int ret;
1404 
1405 	if (unlikely(domain->ops->pgsize_bitmap == 0UL))
1406 		return 0;
1407 
1408 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1409 
1410 	for_each_sg(sg, s, nents, i) {
1411 		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1412 
1413 		/*
1414 		 * We are mapping on IOMMU page boundaries, so offset within
1415 		 * the page must be 0. However, the IOMMU may support pages
1416 		 * smaller than PAGE_SIZE, so s->offset may still represent
1417 		 * an offset of that boundary within the CPU page.
1418 		 */
1419 		if (!IS_ALIGNED(s->offset, min_pagesz))
1420 			goto out_err;
1421 
1422 		ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1423 		if (ret)
1424 			goto out_err;
1425 
1426 		mapped += s->length;
1427 	}
1428 
1429 	return mapped;
1430 
1431 out_err:
1432 	/* undo mappings already done */
1433 	iommu_unmap(domain, iova, mapped);
1434 
1435 	return 0;
1436 
1437 }
1438 EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1439 
1440 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1441 			       phys_addr_t paddr, u64 size, int prot)
1442 {
1443 	if (unlikely(domain->ops->domain_window_enable == NULL))
1444 		return -ENODEV;
1445 
1446 	return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1447 						 prot);
1448 }
1449 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1450 
1451 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1452 {
1453 	if (unlikely(domain->ops->domain_window_disable == NULL))
1454 		return;
1455 
1456 	return domain->ops->domain_window_disable(domain, wnd_nr);
1457 }
1458 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1459 
1460 static int __init iommu_init(void)
1461 {
1462 	iommu_group_kset = kset_create_and_add("iommu_groups",
1463 					       NULL, kernel_kobj);
1464 	ida_init(&iommu_group_ida);
1465 	mutex_init(&iommu_group_mutex);
1466 
1467 	BUG_ON(!iommu_group_kset);
1468 
1469 	return 0;
1470 }
1471 core_initcall(iommu_init);
1472 
1473 int iommu_domain_get_attr(struct iommu_domain *domain,
1474 			  enum iommu_attr attr, void *data)
1475 {
1476 	struct iommu_domain_geometry *geometry;
1477 	bool *paging;
1478 	int ret = 0;
1479 	u32 *count;
1480 
1481 	switch (attr) {
1482 	case DOMAIN_ATTR_GEOMETRY:
1483 		geometry  = data;
1484 		*geometry = domain->geometry;
1485 
1486 		break;
1487 	case DOMAIN_ATTR_PAGING:
1488 		paging  = data;
1489 		*paging = (domain->ops->pgsize_bitmap != 0UL);
1490 		break;
1491 	case DOMAIN_ATTR_WINDOWS:
1492 		count = data;
1493 
1494 		if (domain->ops->domain_get_windows != NULL)
1495 			*count = domain->ops->domain_get_windows(domain);
1496 		else
1497 			ret = -ENODEV;
1498 
1499 		break;
1500 	default:
1501 		if (!domain->ops->domain_get_attr)
1502 			return -EINVAL;
1503 
1504 		ret = domain->ops->domain_get_attr(domain, attr, data);
1505 	}
1506 
1507 	return ret;
1508 }
1509 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1510 
1511 int iommu_domain_set_attr(struct iommu_domain *domain,
1512 			  enum iommu_attr attr, void *data)
1513 {
1514 	int ret = 0;
1515 	u32 *count;
1516 
1517 	switch (attr) {
1518 	case DOMAIN_ATTR_WINDOWS:
1519 		count = data;
1520 
1521 		if (domain->ops->domain_set_windows != NULL)
1522 			ret = domain->ops->domain_set_windows(domain, *count);
1523 		else
1524 			ret = -ENODEV;
1525 
1526 		break;
1527 	default:
1528 		if (domain->ops->domain_set_attr == NULL)
1529 			return -EINVAL;
1530 
1531 		ret = domain->ops->domain_set_attr(domain, attr, data);
1532 	}
1533 
1534 	return ret;
1535 }
1536 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1537 
1538 void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1539 {
1540 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1541 
1542 	if (ops && ops->get_dm_regions)
1543 		ops->get_dm_regions(dev, list);
1544 }
1545 
1546 void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1547 {
1548 	const struct iommu_ops *ops = dev->bus->iommu_ops;
1549 
1550 	if (ops && ops->put_dm_regions)
1551 		ops->put_dm_regions(dev, list);
1552 }
1553 
1554 /* Request that a device is direct mapped by the IOMMU */
1555 int iommu_request_dm_for_dev(struct device *dev)
1556 {
1557 	struct iommu_domain *dm_domain;
1558 	struct iommu_group *group;
1559 	int ret;
1560 
1561 	/* Device must already be in a group before calling this function */
1562 	group = iommu_group_get_for_dev(dev);
1563 	if (IS_ERR(group))
1564 		return PTR_ERR(group);
1565 
1566 	mutex_lock(&group->mutex);
1567 
1568 	/* Check if the default domain is already direct mapped */
1569 	ret = 0;
1570 	if (group->default_domain &&
1571 	    group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
1572 		goto out;
1573 
1574 	/* Don't change mappings of existing devices */
1575 	ret = -EBUSY;
1576 	if (iommu_group_device_count(group) != 1)
1577 		goto out;
1578 
1579 	/* Allocate a direct mapped domain */
1580 	ret = -ENOMEM;
1581 	dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
1582 	if (!dm_domain)
1583 		goto out;
1584 
1585 	/* Attach the device to the domain */
1586 	ret = __iommu_attach_group(dm_domain, group);
1587 	if (ret) {
1588 		iommu_domain_free(dm_domain);
1589 		goto out;
1590 	}
1591 
1592 	/* Make the direct mapped domain the default for this group */
1593 	if (group->default_domain)
1594 		iommu_domain_free(group->default_domain);
1595 	group->default_domain = dm_domain;
1596 
1597 	pr_info("Using direct mapping for device %s\n", dev_name(dev));
1598 
1599 	ret = 0;
1600 out:
1601 	mutex_unlock(&group->mutex);
1602 	iommu_group_put(group);
1603 
1604 	return ret;
1605 }
1606