1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 */
6
7 #define pr_fmt(fmt) "iommu: " fmt
8
9 #include <linux/amba/bus.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/bits.h>
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/host1x_context_bus.h>
20 #include <linux/iommu.h>
21 #include <linux/idr.h>
22 #include <linux/err.h>
23 #include <linux/pci.h>
24 #include <linux/pci-ats.h>
25 #include <linux/bitops.h>
26 #include <linux/platform_device.h>
27 #include <linux/property.h>
28 #include <linux/fsl/mc.h>
29 #include <linux/module.h>
30 #include <linux/cc_platform.h>
31 #include <linux/cdx/cdx_bus.h>
32 #include <trace/events/iommu.h>
33 #include <linux/sched/mm.h>
34 #include <linux/msi.h>
35
36 #include "dma-iommu.h"
37 #include "iommu-priv.h"
38
39 #include "iommu-sva.h"
40 #include "iommu-priv.h"
41
42 static struct kset *iommu_group_kset;
43 static DEFINE_IDA(iommu_group_ida);
44 static DEFINE_IDA(iommu_global_pasid_ida);
45
46 static unsigned int iommu_def_domain_type __read_mostly;
47 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
48 static u32 iommu_cmd_line __read_mostly;
49
50 struct iommu_group {
51 struct kobject kobj;
52 struct kobject *devices_kobj;
53 struct list_head devices;
54 struct xarray pasid_array;
55 struct mutex mutex;
56 void *iommu_data;
57 void (*iommu_data_release)(void *iommu_data);
58 char *name;
59 int id;
60 struct iommu_domain *default_domain;
61 struct iommu_domain *blocking_domain;
62 struct iommu_domain *domain;
63 struct list_head entry;
64 unsigned int owner_cnt;
65 void *owner;
66 };
67
68 struct group_device {
69 struct list_head list;
70 struct device *dev;
71 char *name;
72 };
73
74 /* Iterate over each struct group_device in a struct iommu_group */
75 #define for_each_group_device(group, pos) \
76 list_for_each_entry(pos, &(group)->devices, list)
77
78 struct iommu_group_attribute {
79 struct attribute attr;
80 ssize_t (*show)(struct iommu_group *group, char *buf);
81 ssize_t (*store)(struct iommu_group *group,
82 const char *buf, size_t count);
83 };
84
85 static const char * const iommu_group_resv_type_string[] = {
86 [IOMMU_RESV_DIRECT] = "direct",
87 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
88 [IOMMU_RESV_RESERVED] = "reserved",
89 [IOMMU_RESV_MSI] = "msi",
90 [IOMMU_RESV_SW_MSI] = "msi",
91 };
92
93 #define IOMMU_CMD_LINE_DMA_API BIT(0)
94 #define IOMMU_CMD_LINE_STRICT BIT(1)
95
96 static int iommu_bus_notifier(struct notifier_block *nb,
97 unsigned long action, void *data);
98 static void iommu_release_device(struct device *dev);
99 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
100 unsigned type);
101 static int __iommu_attach_device(struct iommu_domain *domain,
102 struct device *dev);
103 static int __iommu_attach_group(struct iommu_domain *domain,
104 struct iommu_group *group);
105
106 enum {
107 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0,
108 };
109
110 static int __iommu_device_set_domain(struct iommu_group *group,
111 struct device *dev,
112 struct iommu_domain *new_domain,
113 unsigned int flags);
114 static int __iommu_group_set_domain_internal(struct iommu_group *group,
115 struct iommu_domain *new_domain,
116 unsigned int flags);
__iommu_group_set_domain(struct iommu_group * group,struct iommu_domain * new_domain)117 static int __iommu_group_set_domain(struct iommu_group *group,
118 struct iommu_domain *new_domain)
119 {
120 return __iommu_group_set_domain_internal(group, new_domain, 0);
121 }
__iommu_group_set_domain_nofail(struct iommu_group * group,struct iommu_domain * new_domain)122 static void __iommu_group_set_domain_nofail(struct iommu_group *group,
123 struct iommu_domain *new_domain)
124 {
125 WARN_ON(__iommu_group_set_domain_internal(
126 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED));
127 }
128
129 static int iommu_setup_default_domain(struct iommu_group *group,
130 int target_type);
131 static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
132 struct device *dev);
133 static ssize_t iommu_group_store_type(struct iommu_group *group,
134 const char *buf, size_t count);
135 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
136 struct device *dev);
137 static void __iommu_group_free_device(struct iommu_group *group,
138 struct group_device *grp_dev);
139
140 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
141 struct iommu_group_attribute iommu_group_attr_##_name = \
142 __ATTR(_name, _mode, _show, _store)
143
144 #define to_iommu_group_attr(_attr) \
145 container_of(_attr, struct iommu_group_attribute, attr)
146 #define to_iommu_group(_kobj) \
147 container_of(_kobj, struct iommu_group, kobj)
148
149 static LIST_HEAD(iommu_device_list);
150 static DEFINE_SPINLOCK(iommu_device_lock);
151
152 static struct bus_type * const iommu_buses[] = {
153 &platform_bus_type,
154 #ifdef CONFIG_PCI
155 &pci_bus_type,
156 #endif
157 #ifdef CONFIG_ARM_AMBA
158 &amba_bustype,
159 #endif
160 #ifdef CONFIG_FSL_MC_BUS
161 &fsl_mc_bus_type,
162 #endif
163 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
164 &host1x_context_device_bus_type,
165 #endif
166 #ifdef CONFIG_CDX_BUS
167 &cdx_bus_type,
168 #endif
169 };
170
171 /*
172 * Use a function instead of an array here because the domain-type is a
173 * bit-field, so an array would waste memory.
174 */
iommu_domain_type_str(unsigned int t)175 static const char *iommu_domain_type_str(unsigned int t)
176 {
177 switch (t) {
178 case IOMMU_DOMAIN_BLOCKED:
179 return "Blocked";
180 case IOMMU_DOMAIN_IDENTITY:
181 return "Passthrough";
182 case IOMMU_DOMAIN_UNMANAGED:
183 return "Unmanaged";
184 case IOMMU_DOMAIN_DMA:
185 case IOMMU_DOMAIN_DMA_FQ:
186 return "Translated";
187 default:
188 return "Unknown";
189 }
190 }
191
iommu_subsys_init(void)192 static int __init iommu_subsys_init(void)
193 {
194 struct notifier_block *nb;
195
196 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
197 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
198 iommu_set_default_passthrough(false);
199 else
200 iommu_set_default_translated(false);
201
202 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
203 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
204 iommu_set_default_translated(false);
205 }
206 }
207
208 if (!iommu_default_passthrough() && !iommu_dma_strict)
209 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ;
210
211 pr_info("Default domain type: %s%s\n",
212 iommu_domain_type_str(iommu_def_domain_type),
213 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
214 " (set via kernel command line)" : "");
215
216 if (!iommu_default_passthrough())
217 pr_info("DMA domain TLB invalidation policy: %s mode%s\n",
218 iommu_dma_strict ? "strict" : "lazy",
219 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
220 " (set via kernel command line)" : "");
221
222 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL);
223 if (!nb)
224 return -ENOMEM;
225
226 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
227 nb[i].notifier_call = iommu_bus_notifier;
228 bus_register_notifier(iommu_buses[i], &nb[i]);
229 }
230
231 return 0;
232 }
233 subsys_initcall(iommu_subsys_init);
234
remove_iommu_group(struct device * dev,void * data)235 static int remove_iommu_group(struct device *dev, void *data)
236 {
237 if (dev->iommu && dev->iommu->iommu_dev == data)
238 iommu_release_device(dev);
239
240 return 0;
241 }
242
243 /**
244 * iommu_device_register() - Register an IOMMU hardware instance
245 * @iommu: IOMMU handle for the instance
246 * @ops: IOMMU ops to associate with the instance
247 * @hwdev: (optional) actual instance device, used for fwnode lookup
248 *
249 * Return: 0 on success, or an error.
250 */
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)251 int iommu_device_register(struct iommu_device *iommu,
252 const struct iommu_ops *ops, struct device *hwdev)
253 {
254 int err = 0;
255
256 /* We need to be able to take module references appropriately */
257 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
258 return -EINVAL;
259 /*
260 * Temporarily enforce global restriction to a single driver. This was
261 * already the de-facto behaviour, since any possible combination of
262 * existing drivers would compete for at least the PCI or platform bus.
263 */
264 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
265 return -EBUSY;
266
267 iommu->ops = ops;
268 if (hwdev)
269 iommu->fwnode = dev_fwnode(hwdev);
270
271 spin_lock(&iommu_device_lock);
272 list_add_tail(&iommu->list, &iommu_device_list);
273 spin_unlock(&iommu_device_lock);
274
275 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
276 iommu_buses[i]->iommu_ops = ops;
277 err = bus_iommu_probe(iommu_buses[i]);
278 }
279 if (err)
280 iommu_device_unregister(iommu);
281 return err;
282 }
283 EXPORT_SYMBOL_GPL(iommu_device_register);
284
iommu_device_unregister(struct iommu_device * iommu)285 void iommu_device_unregister(struct iommu_device *iommu)
286 {
287 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
288 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
289
290 spin_lock(&iommu_device_lock);
291 list_del(&iommu->list);
292 spin_unlock(&iommu_device_lock);
293 }
294 EXPORT_SYMBOL_GPL(iommu_device_unregister);
295
296 #if IS_ENABLED(CONFIG_IOMMUFD_TEST)
iommu_device_unregister_bus(struct iommu_device * iommu,struct bus_type * bus,struct notifier_block * nb)297 void iommu_device_unregister_bus(struct iommu_device *iommu,
298 struct bus_type *bus,
299 struct notifier_block *nb)
300 {
301 bus_unregister_notifier(bus, nb);
302 iommu_device_unregister(iommu);
303 }
304 EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
305
306 /*
307 * Register an iommu driver against a single bus. This is only used by iommufd
308 * selftest to create a mock iommu driver. The caller must provide
309 * some memory to hold a notifier_block.
310 */
iommu_device_register_bus(struct iommu_device * iommu,const struct iommu_ops * ops,struct bus_type * bus,struct notifier_block * nb)311 int iommu_device_register_bus(struct iommu_device *iommu,
312 const struct iommu_ops *ops, struct bus_type *bus,
313 struct notifier_block *nb)
314 {
315 int err;
316
317 iommu->ops = ops;
318 nb->notifier_call = iommu_bus_notifier;
319 err = bus_register_notifier(bus, nb);
320 if (err)
321 return err;
322
323 spin_lock(&iommu_device_lock);
324 list_add_tail(&iommu->list, &iommu_device_list);
325 spin_unlock(&iommu_device_lock);
326
327 bus->iommu_ops = ops;
328 err = bus_iommu_probe(bus);
329 if (err) {
330 iommu_device_unregister_bus(iommu, bus, nb);
331 return err;
332 }
333 return 0;
334 }
335 EXPORT_SYMBOL_GPL(iommu_device_register_bus);
336 #endif
337
dev_iommu_get(struct device * dev)338 static struct dev_iommu *dev_iommu_get(struct device *dev)
339 {
340 struct dev_iommu *param = dev->iommu;
341
342 if (param)
343 return param;
344
345 param = kzalloc(sizeof(*param), GFP_KERNEL);
346 if (!param)
347 return NULL;
348
349 mutex_init(¶m->lock);
350 dev->iommu = param;
351 return param;
352 }
353
dev_iommu_free(struct device * dev)354 static void dev_iommu_free(struct device *dev)
355 {
356 struct dev_iommu *param = dev->iommu;
357
358 dev->iommu = NULL;
359 if (param->fwspec) {
360 fwnode_handle_put(param->fwspec->iommu_fwnode);
361 kfree(param->fwspec);
362 }
363 kfree(param);
364 }
365
dev_iommu_get_max_pasids(struct device * dev)366 static u32 dev_iommu_get_max_pasids(struct device *dev)
367 {
368 u32 max_pasids = 0, bits = 0;
369 int ret;
370
371 if (dev_is_pci(dev)) {
372 ret = pci_max_pasids(to_pci_dev(dev));
373 if (ret > 0)
374 max_pasids = ret;
375 } else {
376 ret = device_property_read_u32(dev, "pasid-num-bits", &bits);
377 if (!ret)
378 max_pasids = 1UL << bits;
379 }
380
381 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
382 }
383
384 /*
385 * Init the dev->iommu and dev->iommu_group in the struct device and get the
386 * driver probed
387 */
iommu_init_device(struct device * dev,const struct iommu_ops * ops)388 static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
389 {
390 struct iommu_device *iommu_dev;
391 struct iommu_group *group;
392 int ret;
393
394 if (!dev_iommu_get(dev))
395 return -ENOMEM;
396
397 if (!try_module_get(ops->owner)) {
398 ret = -EINVAL;
399 goto err_free;
400 }
401
402 iommu_dev = ops->probe_device(dev);
403 if (IS_ERR(iommu_dev)) {
404 ret = PTR_ERR(iommu_dev);
405 goto err_module_put;
406 }
407
408 ret = iommu_device_link(iommu_dev, dev);
409 if (ret)
410 goto err_release;
411
412 group = ops->device_group(dev);
413 if (WARN_ON_ONCE(group == NULL))
414 group = ERR_PTR(-EINVAL);
415 if (IS_ERR(group)) {
416 ret = PTR_ERR(group);
417 goto err_unlink;
418 }
419 dev->iommu_group = group;
420
421 dev->iommu->iommu_dev = iommu_dev;
422 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
423 if (ops->is_attach_deferred)
424 dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
425 return 0;
426
427 err_unlink:
428 iommu_device_unlink(iommu_dev, dev);
429 err_release:
430 if (ops->release_device)
431 ops->release_device(dev);
432 err_module_put:
433 module_put(ops->owner);
434 err_free:
435 dev_iommu_free(dev);
436 return ret;
437 }
438
iommu_deinit_device(struct device * dev)439 static void iommu_deinit_device(struct device *dev)
440 {
441 struct iommu_group *group = dev->iommu_group;
442 const struct iommu_ops *ops = dev_iommu_ops(dev);
443
444 lockdep_assert_held(&group->mutex);
445
446 iommu_device_unlink(dev->iommu->iommu_dev, dev);
447
448 /*
449 * release_device() must stop using any attached domain on the device.
450 * If there are still other devices in the group they are not effected
451 * by this callback.
452 *
453 * The IOMMU driver must set the device to either an identity or
454 * blocking translation and stop using any domain pointer, as it is
455 * going to be freed.
456 */
457 if (ops->release_device)
458 ops->release_device(dev);
459
460 /*
461 * If this is the last driver to use the group then we must free the
462 * domains before we do the module_put().
463 */
464 if (list_empty(&group->devices)) {
465 if (group->default_domain) {
466 iommu_domain_free(group->default_domain);
467 group->default_domain = NULL;
468 }
469 if (group->blocking_domain) {
470 iommu_domain_free(group->blocking_domain);
471 group->blocking_domain = NULL;
472 }
473 group->domain = NULL;
474 }
475
476 /* Caller must put iommu_group */
477 dev->iommu_group = NULL;
478 module_put(ops->owner);
479 dev_iommu_free(dev);
480 }
481
482 DEFINE_MUTEX(iommu_probe_device_lock);
483
__iommu_probe_device(struct device * dev,struct list_head * group_list)484 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
485 {
486 const struct iommu_ops *ops = dev->bus->iommu_ops;
487 struct iommu_group *group;
488 struct group_device *gdev;
489 int ret;
490
491 if (!ops)
492 return -ENODEV;
493 /*
494 * Serialise to avoid races between IOMMU drivers registering in
495 * parallel and/or the "replay" calls from ACPI/OF code via client
496 * driver probe. Once the latter have been cleaned up we should
497 * probably be able to use device_lock() here to minimise the scope,
498 * but for now enforcing a simple global ordering is fine.
499 */
500 lockdep_assert_held(&iommu_probe_device_lock);
501
502 /* Device is probed already if in a group */
503 if (dev->iommu_group)
504 return 0;
505
506 ret = iommu_init_device(dev, ops);
507 if (ret)
508 return ret;
509
510 group = dev->iommu_group;
511 gdev = iommu_group_alloc_device(group, dev);
512 mutex_lock(&group->mutex);
513 if (IS_ERR(gdev)) {
514 ret = PTR_ERR(gdev);
515 goto err_put_group;
516 }
517
518 /*
519 * The gdev must be in the list before calling
520 * iommu_setup_default_domain()
521 */
522 list_add_tail(&gdev->list, &group->devices);
523 WARN_ON(group->default_domain && !group->domain);
524 if (group->default_domain)
525 iommu_create_device_direct_mappings(group->default_domain, dev);
526 if (group->domain) {
527 ret = __iommu_device_set_domain(group, dev, group->domain, 0);
528 if (ret)
529 goto err_remove_gdev;
530 } else if (!group->default_domain && !group_list) {
531 ret = iommu_setup_default_domain(group, 0);
532 if (ret)
533 goto err_remove_gdev;
534 } else if (!group->default_domain) {
535 /*
536 * With a group_list argument we defer the default_domain setup
537 * to the caller by providing a de-duplicated list of groups
538 * that need further setup.
539 */
540 if (list_empty(&group->entry))
541 list_add_tail(&group->entry, group_list);
542 }
543 mutex_unlock(&group->mutex);
544
545 if (dev_is_pci(dev))
546 iommu_dma_set_pci_32bit_workaround(dev);
547
548 return 0;
549
550 err_remove_gdev:
551 list_del(&gdev->list);
552 __iommu_group_free_device(group, gdev);
553 err_put_group:
554 iommu_deinit_device(dev);
555 mutex_unlock(&group->mutex);
556 iommu_group_put(group);
557
558 return ret;
559 }
560
iommu_probe_device(struct device * dev)561 int iommu_probe_device(struct device *dev)
562 {
563 const struct iommu_ops *ops;
564 int ret;
565
566 mutex_lock(&iommu_probe_device_lock);
567 ret = __iommu_probe_device(dev, NULL);
568 mutex_unlock(&iommu_probe_device_lock);
569 if (ret)
570 return ret;
571
572 ops = dev_iommu_ops(dev);
573 if (ops->probe_finalize)
574 ops->probe_finalize(dev);
575
576 return 0;
577 }
578
__iommu_group_free_device(struct iommu_group * group,struct group_device * grp_dev)579 static void __iommu_group_free_device(struct iommu_group *group,
580 struct group_device *grp_dev)
581 {
582 struct device *dev = grp_dev->dev;
583
584 sysfs_remove_link(group->devices_kobj, grp_dev->name);
585 sysfs_remove_link(&dev->kobj, "iommu_group");
586
587 trace_remove_device_from_group(group->id, dev);
588
589 /*
590 * If the group has become empty then ownership must have been
591 * released, and the current domain must be set back to NULL or
592 * the default domain.
593 */
594 if (list_empty(&group->devices))
595 WARN_ON(group->owner_cnt ||
596 group->domain != group->default_domain);
597
598 kfree(grp_dev->name);
599 kfree(grp_dev);
600 }
601
602 /* Remove the iommu_group from the struct device. */
__iommu_group_remove_device(struct device * dev)603 static void __iommu_group_remove_device(struct device *dev)
604 {
605 struct iommu_group *group = dev->iommu_group;
606 struct group_device *device;
607
608 mutex_lock(&group->mutex);
609 for_each_group_device(group, device) {
610 if (device->dev != dev)
611 continue;
612
613 list_del(&device->list);
614 __iommu_group_free_device(group, device);
615 if (dev->iommu && dev->iommu->iommu_dev)
616 iommu_deinit_device(dev);
617 else
618 dev->iommu_group = NULL;
619 break;
620 }
621 mutex_unlock(&group->mutex);
622
623 /*
624 * Pairs with the get in iommu_init_device() or
625 * iommu_group_add_device()
626 */
627 iommu_group_put(group);
628 }
629
iommu_release_device(struct device * dev)630 static void iommu_release_device(struct device *dev)
631 {
632 struct iommu_group *group = dev->iommu_group;
633
634 if (group)
635 __iommu_group_remove_device(dev);
636
637 /* Free any fwspec if no iommu_driver was ever attached */
638 if (dev->iommu)
639 dev_iommu_free(dev);
640 }
641
iommu_set_def_domain_type(char * str)642 static int __init iommu_set_def_domain_type(char *str)
643 {
644 bool pt;
645 int ret;
646
647 ret = kstrtobool(str, &pt);
648 if (ret)
649 return ret;
650
651 if (pt)
652 iommu_set_default_passthrough(true);
653 else
654 iommu_set_default_translated(true);
655
656 return 0;
657 }
658 early_param("iommu.passthrough", iommu_set_def_domain_type);
659
iommu_dma_setup(char * str)660 static int __init iommu_dma_setup(char *str)
661 {
662 int ret = kstrtobool(str, &iommu_dma_strict);
663
664 if (!ret)
665 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
666 return ret;
667 }
668 early_param("iommu.strict", iommu_dma_setup);
669
iommu_set_dma_strict(void)670 void iommu_set_dma_strict(void)
671 {
672 iommu_dma_strict = true;
673 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ)
674 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
675 }
676
iommu_group_attr_show(struct kobject * kobj,struct attribute * __attr,char * buf)677 static ssize_t iommu_group_attr_show(struct kobject *kobj,
678 struct attribute *__attr, char *buf)
679 {
680 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
681 struct iommu_group *group = to_iommu_group(kobj);
682 ssize_t ret = -EIO;
683
684 if (attr->show)
685 ret = attr->show(group, buf);
686 return ret;
687 }
688
iommu_group_attr_store(struct kobject * kobj,struct attribute * __attr,const char * buf,size_t count)689 static ssize_t iommu_group_attr_store(struct kobject *kobj,
690 struct attribute *__attr,
691 const char *buf, size_t count)
692 {
693 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
694 struct iommu_group *group = to_iommu_group(kobj);
695 ssize_t ret = -EIO;
696
697 if (attr->store)
698 ret = attr->store(group, buf, count);
699 return ret;
700 }
701
702 static const struct sysfs_ops iommu_group_sysfs_ops = {
703 .show = iommu_group_attr_show,
704 .store = iommu_group_attr_store,
705 };
706
iommu_group_create_file(struct iommu_group * group,struct iommu_group_attribute * attr)707 static int iommu_group_create_file(struct iommu_group *group,
708 struct iommu_group_attribute *attr)
709 {
710 return sysfs_create_file(&group->kobj, &attr->attr);
711 }
712
iommu_group_remove_file(struct iommu_group * group,struct iommu_group_attribute * attr)713 static void iommu_group_remove_file(struct iommu_group *group,
714 struct iommu_group_attribute *attr)
715 {
716 sysfs_remove_file(&group->kobj, &attr->attr);
717 }
718
iommu_group_show_name(struct iommu_group * group,char * buf)719 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
720 {
721 return sysfs_emit(buf, "%s\n", group->name);
722 }
723
724 /**
725 * iommu_insert_resv_region - Insert a new region in the
726 * list of reserved regions.
727 * @new: new region to insert
728 * @regions: list of regions
729 *
730 * Elements are sorted by start address and overlapping segments
731 * of the same type are merged.
732 */
iommu_insert_resv_region(struct iommu_resv_region * new,struct list_head * regions)733 static int iommu_insert_resv_region(struct iommu_resv_region *new,
734 struct list_head *regions)
735 {
736 struct iommu_resv_region *iter, *tmp, *nr, *top;
737 LIST_HEAD(stack);
738
739 nr = iommu_alloc_resv_region(new->start, new->length,
740 new->prot, new->type, GFP_KERNEL);
741 if (!nr)
742 return -ENOMEM;
743
744 /* First add the new element based on start address sorting */
745 list_for_each_entry(iter, regions, list) {
746 if (nr->start < iter->start ||
747 (nr->start == iter->start && nr->type <= iter->type))
748 break;
749 }
750 list_add_tail(&nr->list, &iter->list);
751
752 /* Merge overlapping segments of type nr->type in @regions, if any */
753 list_for_each_entry_safe(iter, tmp, regions, list) {
754 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
755
756 /* no merge needed on elements of different types than @new */
757 if (iter->type != new->type) {
758 list_move_tail(&iter->list, &stack);
759 continue;
760 }
761
762 /* look for the last stack element of same type as @iter */
763 list_for_each_entry_reverse(top, &stack, list)
764 if (top->type == iter->type)
765 goto check_overlap;
766
767 list_move_tail(&iter->list, &stack);
768 continue;
769
770 check_overlap:
771 top_end = top->start + top->length - 1;
772
773 if (iter->start > top_end + 1) {
774 list_move_tail(&iter->list, &stack);
775 } else {
776 top->length = max(top_end, iter_end) - top->start + 1;
777 list_del(&iter->list);
778 kfree(iter);
779 }
780 }
781 list_splice(&stack, regions);
782 return 0;
783 }
784
785 static int
iommu_insert_device_resv_regions(struct list_head * dev_resv_regions,struct list_head * group_resv_regions)786 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
787 struct list_head *group_resv_regions)
788 {
789 struct iommu_resv_region *entry;
790 int ret = 0;
791
792 list_for_each_entry(entry, dev_resv_regions, list) {
793 ret = iommu_insert_resv_region(entry, group_resv_regions);
794 if (ret)
795 break;
796 }
797 return ret;
798 }
799
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)800 int iommu_get_group_resv_regions(struct iommu_group *group,
801 struct list_head *head)
802 {
803 struct group_device *device;
804 int ret = 0;
805
806 mutex_lock(&group->mutex);
807 for_each_group_device(group, device) {
808 struct list_head dev_resv_regions;
809
810 /*
811 * Non-API groups still expose reserved_regions in sysfs,
812 * so filter out calls that get here that way.
813 */
814 if (!device->dev->iommu)
815 break;
816
817 INIT_LIST_HEAD(&dev_resv_regions);
818 iommu_get_resv_regions(device->dev, &dev_resv_regions);
819 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
820 iommu_put_resv_regions(device->dev, &dev_resv_regions);
821 if (ret)
822 break;
823 }
824 mutex_unlock(&group->mutex);
825 return ret;
826 }
827 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
828
iommu_group_show_resv_regions(struct iommu_group * group,char * buf)829 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
830 char *buf)
831 {
832 struct iommu_resv_region *region, *next;
833 struct list_head group_resv_regions;
834 int offset = 0;
835
836 INIT_LIST_HEAD(&group_resv_regions);
837 iommu_get_group_resv_regions(group, &group_resv_regions);
838
839 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
840 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n",
841 (long long)region->start,
842 (long long)(region->start +
843 region->length - 1),
844 iommu_group_resv_type_string[region->type]);
845 kfree(region);
846 }
847
848 return offset;
849 }
850
iommu_group_show_type(struct iommu_group * group,char * buf)851 static ssize_t iommu_group_show_type(struct iommu_group *group,
852 char *buf)
853 {
854 char *type = "unknown";
855
856 mutex_lock(&group->mutex);
857 if (group->default_domain) {
858 switch (group->default_domain->type) {
859 case IOMMU_DOMAIN_BLOCKED:
860 type = "blocked";
861 break;
862 case IOMMU_DOMAIN_IDENTITY:
863 type = "identity";
864 break;
865 case IOMMU_DOMAIN_UNMANAGED:
866 type = "unmanaged";
867 break;
868 case IOMMU_DOMAIN_DMA:
869 type = "DMA";
870 break;
871 case IOMMU_DOMAIN_DMA_FQ:
872 type = "DMA-FQ";
873 break;
874 }
875 }
876 mutex_unlock(&group->mutex);
877
878 return sysfs_emit(buf, "%s\n", type);
879 }
880
881 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
882
883 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
884 iommu_group_show_resv_regions, NULL);
885
886 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
887 iommu_group_store_type);
888
iommu_group_release(struct kobject * kobj)889 static void iommu_group_release(struct kobject *kobj)
890 {
891 struct iommu_group *group = to_iommu_group(kobj);
892
893 pr_debug("Releasing group %d\n", group->id);
894
895 if (group->iommu_data_release)
896 group->iommu_data_release(group->iommu_data);
897
898 ida_free(&iommu_group_ida, group->id);
899
900 /* Domains are free'd by iommu_deinit_device() */
901 WARN_ON(group->default_domain);
902 WARN_ON(group->blocking_domain);
903
904 kfree(group->name);
905 kfree(group);
906 }
907
908 static const struct kobj_type iommu_group_ktype = {
909 .sysfs_ops = &iommu_group_sysfs_ops,
910 .release = iommu_group_release,
911 };
912
913 /**
914 * iommu_group_alloc - Allocate a new group
915 *
916 * This function is called by an iommu driver to allocate a new iommu
917 * group. The iommu group represents the minimum granularity of the iommu.
918 * Upon successful return, the caller holds a reference to the supplied
919 * group in order to hold the group until devices are added. Use
920 * iommu_group_put() to release this extra reference count, allowing the
921 * group to be automatically reclaimed once it has no devices or external
922 * references.
923 */
iommu_group_alloc(void)924 struct iommu_group *iommu_group_alloc(void)
925 {
926 struct iommu_group *group;
927 int ret;
928
929 group = kzalloc(sizeof(*group), GFP_KERNEL);
930 if (!group)
931 return ERR_PTR(-ENOMEM);
932
933 group->kobj.kset = iommu_group_kset;
934 mutex_init(&group->mutex);
935 INIT_LIST_HEAD(&group->devices);
936 INIT_LIST_HEAD(&group->entry);
937 xa_init(&group->pasid_array);
938
939 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
940 if (ret < 0) {
941 kfree(group);
942 return ERR_PTR(ret);
943 }
944 group->id = ret;
945
946 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
947 NULL, "%d", group->id);
948 if (ret) {
949 kobject_put(&group->kobj);
950 return ERR_PTR(ret);
951 }
952
953 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
954 if (!group->devices_kobj) {
955 kobject_put(&group->kobj); /* triggers .release & free */
956 return ERR_PTR(-ENOMEM);
957 }
958
959 /*
960 * The devices_kobj holds a reference on the group kobject, so
961 * as long as that exists so will the group. We can therefore
962 * use the devices_kobj for reference counting.
963 */
964 kobject_put(&group->kobj);
965
966 ret = iommu_group_create_file(group,
967 &iommu_group_attr_reserved_regions);
968 if (ret) {
969 kobject_put(group->devices_kobj);
970 return ERR_PTR(ret);
971 }
972
973 ret = iommu_group_create_file(group, &iommu_group_attr_type);
974 if (ret) {
975 kobject_put(group->devices_kobj);
976 return ERR_PTR(ret);
977 }
978
979 pr_debug("Allocated group %d\n", group->id);
980
981 return group;
982 }
983 EXPORT_SYMBOL_GPL(iommu_group_alloc);
984
985 /**
986 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
987 * @group: the group
988 *
989 * iommu drivers can store data in the group for use when doing iommu
990 * operations. This function provides a way to retrieve it. Caller
991 * should hold a group reference.
992 */
iommu_group_get_iommudata(struct iommu_group * group)993 void *iommu_group_get_iommudata(struct iommu_group *group)
994 {
995 return group->iommu_data;
996 }
997 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
998
999 /**
1000 * iommu_group_set_iommudata - set iommu_data for a group
1001 * @group: the group
1002 * @iommu_data: new data
1003 * @release: release function for iommu_data
1004 *
1005 * iommu drivers can store data in the group for use when doing iommu
1006 * operations. This function provides a way to set the data after
1007 * the group has been allocated. Caller should hold a group reference.
1008 */
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))1009 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
1010 void (*release)(void *iommu_data))
1011 {
1012 group->iommu_data = iommu_data;
1013 group->iommu_data_release = release;
1014 }
1015 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
1016
1017 /**
1018 * iommu_group_set_name - set name for a group
1019 * @group: the group
1020 * @name: name
1021 *
1022 * Allow iommu driver to set a name for a group. When set it will
1023 * appear in a name attribute file under the group in sysfs.
1024 */
iommu_group_set_name(struct iommu_group * group,const char * name)1025 int iommu_group_set_name(struct iommu_group *group, const char *name)
1026 {
1027 int ret;
1028
1029 if (group->name) {
1030 iommu_group_remove_file(group, &iommu_group_attr_name);
1031 kfree(group->name);
1032 group->name = NULL;
1033 if (!name)
1034 return 0;
1035 }
1036
1037 group->name = kstrdup(name, GFP_KERNEL);
1038 if (!group->name)
1039 return -ENOMEM;
1040
1041 ret = iommu_group_create_file(group, &iommu_group_attr_name);
1042 if (ret) {
1043 kfree(group->name);
1044 group->name = NULL;
1045 return ret;
1046 }
1047
1048 return 0;
1049 }
1050 EXPORT_SYMBOL_GPL(iommu_group_set_name);
1051
iommu_create_device_direct_mappings(struct iommu_domain * domain,struct device * dev)1052 static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
1053 struct device *dev)
1054 {
1055 struct iommu_resv_region *entry;
1056 struct list_head mappings;
1057 unsigned long pg_size;
1058 int ret = 0;
1059
1060 pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0;
1061 INIT_LIST_HEAD(&mappings);
1062
1063 if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size))
1064 return -EINVAL;
1065
1066 iommu_get_resv_regions(dev, &mappings);
1067
1068 /* We need to consider overlapping regions for different devices */
1069 list_for_each_entry(entry, &mappings, list) {
1070 dma_addr_t start, end, addr;
1071 size_t map_size = 0;
1072
1073 if (entry->type == IOMMU_RESV_DIRECT)
1074 dev->iommu->require_direct = 1;
1075
1076 if ((entry->type != IOMMU_RESV_DIRECT &&
1077 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) ||
1078 !iommu_is_dma_domain(domain))
1079 continue;
1080
1081 start = ALIGN(entry->start, pg_size);
1082 end = ALIGN(entry->start + entry->length, pg_size);
1083
1084 for (addr = start; addr <= end; addr += pg_size) {
1085 phys_addr_t phys_addr;
1086
1087 if (addr == end)
1088 goto map_end;
1089
1090 phys_addr = iommu_iova_to_phys(domain, addr);
1091 if (!phys_addr) {
1092 map_size += pg_size;
1093 continue;
1094 }
1095
1096 map_end:
1097 if (map_size) {
1098 ret = iommu_map(domain, addr - map_size,
1099 addr - map_size, map_size,
1100 entry->prot, GFP_KERNEL);
1101 if (ret)
1102 goto out;
1103 map_size = 0;
1104 }
1105 }
1106
1107 }
1108
1109 if (!list_empty(&mappings) && iommu_is_dma_domain(domain))
1110 iommu_flush_iotlb_all(domain);
1111
1112 out:
1113 iommu_put_resv_regions(dev, &mappings);
1114
1115 return ret;
1116 }
1117
1118 /* This is undone by __iommu_group_free_device() */
iommu_group_alloc_device(struct iommu_group * group,struct device * dev)1119 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
1120 struct device *dev)
1121 {
1122 int ret, i = 0;
1123 struct group_device *device;
1124
1125 device = kzalloc(sizeof(*device), GFP_KERNEL);
1126 if (!device)
1127 return ERR_PTR(-ENOMEM);
1128
1129 device->dev = dev;
1130
1131 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
1132 if (ret)
1133 goto err_free_device;
1134
1135 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
1136 rename:
1137 if (!device->name) {
1138 ret = -ENOMEM;
1139 goto err_remove_link;
1140 }
1141
1142 ret = sysfs_create_link_nowarn(group->devices_kobj,
1143 &dev->kobj, device->name);
1144 if (ret) {
1145 if (ret == -EEXIST && i >= 0) {
1146 /*
1147 * Account for the slim chance of collision
1148 * and append an instance to the name.
1149 */
1150 kfree(device->name);
1151 device->name = kasprintf(GFP_KERNEL, "%s.%d",
1152 kobject_name(&dev->kobj), i++);
1153 goto rename;
1154 }
1155 goto err_free_name;
1156 }
1157
1158 trace_add_device_to_group(group->id, dev);
1159
1160 dev_info(dev, "Adding to iommu group %d\n", group->id);
1161
1162 return device;
1163
1164 err_free_name:
1165 kfree(device->name);
1166 err_remove_link:
1167 sysfs_remove_link(&dev->kobj, "iommu_group");
1168 err_free_device:
1169 kfree(device);
1170 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
1171 return ERR_PTR(ret);
1172 }
1173
1174 /**
1175 * iommu_group_add_device - add a device to an iommu group
1176 * @group: the group into which to add the device (reference should be held)
1177 * @dev: the device
1178 *
1179 * This function is called by an iommu driver to add a device into a
1180 * group. Adding a device increments the group reference count.
1181 */
iommu_group_add_device(struct iommu_group * group,struct device * dev)1182 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
1183 {
1184 struct group_device *gdev;
1185
1186 gdev = iommu_group_alloc_device(group, dev);
1187 if (IS_ERR(gdev))
1188 return PTR_ERR(gdev);
1189
1190 iommu_group_ref_get(group);
1191 dev->iommu_group = group;
1192
1193 mutex_lock(&group->mutex);
1194 list_add_tail(&gdev->list, &group->devices);
1195 mutex_unlock(&group->mutex);
1196 return 0;
1197 }
1198 EXPORT_SYMBOL_GPL(iommu_group_add_device);
1199
1200 /**
1201 * iommu_group_remove_device - remove a device from it's current group
1202 * @dev: device to be removed
1203 *
1204 * This function is called by an iommu driver to remove the device from
1205 * it's current group. This decrements the iommu group reference count.
1206 */
iommu_group_remove_device(struct device * dev)1207 void iommu_group_remove_device(struct device *dev)
1208 {
1209 struct iommu_group *group = dev->iommu_group;
1210
1211 if (!group)
1212 return;
1213
1214 dev_info(dev, "Removing from iommu group %d\n", group->id);
1215
1216 __iommu_group_remove_device(dev);
1217 }
1218 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
1219
1220 /**
1221 * iommu_group_for_each_dev - iterate over each device in the group
1222 * @group: the group
1223 * @data: caller opaque data to be passed to callback function
1224 * @fn: caller supplied callback function
1225 *
1226 * This function is called by group users to iterate over group devices.
1227 * Callers should hold a reference count to the group during callback.
1228 * The group->mutex is held across callbacks, which will block calls to
1229 * iommu_group_add/remove_device.
1230 */
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1231 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1232 int (*fn)(struct device *, void *))
1233 {
1234 struct group_device *device;
1235 int ret = 0;
1236
1237 mutex_lock(&group->mutex);
1238 for_each_group_device(group, device) {
1239 ret = fn(device->dev, data);
1240 if (ret)
1241 break;
1242 }
1243 mutex_unlock(&group->mutex);
1244
1245 return ret;
1246 }
1247 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
1248
1249 /**
1250 * iommu_group_get - Return the group for a device and increment reference
1251 * @dev: get the group that this device belongs to
1252 *
1253 * This function is called by iommu drivers and users to get the group
1254 * for the specified device. If found, the group is returned and the group
1255 * reference in incremented, else NULL.
1256 */
iommu_group_get(struct device * dev)1257 struct iommu_group *iommu_group_get(struct device *dev)
1258 {
1259 struct iommu_group *group = dev->iommu_group;
1260
1261 if (group)
1262 kobject_get(group->devices_kobj);
1263
1264 return group;
1265 }
1266 EXPORT_SYMBOL_GPL(iommu_group_get);
1267
1268 /**
1269 * iommu_group_ref_get - Increment reference on a group
1270 * @group: the group to use, must not be NULL
1271 *
1272 * This function is called by iommu drivers to take additional references on an
1273 * existing group. Returns the given group for convenience.
1274 */
iommu_group_ref_get(struct iommu_group * group)1275 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1276 {
1277 kobject_get(group->devices_kobj);
1278 return group;
1279 }
1280 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1281
1282 /**
1283 * iommu_group_put - Decrement group reference
1284 * @group: the group to use
1285 *
1286 * This function is called by iommu drivers and users to release the
1287 * iommu group. Once the reference count is zero, the group is released.
1288 */
iommu_group_put(struct iommu_group * group)1289 void iommu_group_put(struct iommu_group *group)
1290 {
1291 if (group)
1292 kobject_put(group->devices_kobj);
1293 }
1294 EXPORT_SYMBOL_GPL(iommu_group_put);
1295
1296 /**
1297 * iommu_register_device_fault_handler() - Register a device fault handler
1298 * @dev: the device
1299 * @handler: the fault handler
1300 * @data: private data passed as argument to the handler
1301 *
1302 * When an IOMMU fault event is received, this handler gets called with the
1303 * fault event and data as argument. The handler should return 0 on success. If
1304 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1305 * complete the fault by calling iommu_page_response() with one of the following
1306 * response code:
1307 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1308 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1309 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1310 * page faults if possible.
1311 *
1312 * Return 0 if the fault handler was installed successfully, or an error.
1313 */
iommu_register_device_fault_handler(struct device * dev,iommu_dev_fault_handler_t handler,void * data)1314 int iommu_register_device_fault_handler(struct device *dev,
1315 iommu_dev_fault_handler_t handler,
1316 void *data)
1317 {
1318 struct dev_iommu *param = dev->iommu;
1319 int ret = 0;
1320
1321 if (!param)
1322 return -EINVAL;
1323
1324 mutex_lock(¶m->lock);
1325 /* Only allow one fault handler registered for each device */
1326 if (param->fault_param) {
1327 ret = -EBUSY;
1328 goto done_unlock;
1329 }
1330
1331 get_device(dev);
1332 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1333 if (!param->fault_param) {
1334 put_device(dev);
1335 ret = -ENOMEM;
1336 goto done_unlock;
1337 }
1338 param->fault_param->handler = handler;
1339 param->fault_param->data = data;
1340 mutex_init(¶m->fault_param->lock);
1341 INIT_LIST_HEAD(¶m->fault_param->faults);
1342
1343 done_unlock:
1344 mutex_unlock(¶m->lock);
1345
1346 return ret;
1347 }
1348 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1349
1350 /**
1351 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1352 * @dev: the device
1353 *
1354 * Remove the device fault handler installed with
1355 * iommu_register_device_fault_handler().
1356 *
1357 * Return 0 on success, or an error.
1358 */
iommu_unregister_device_fault_handler(struct device * dev)1359 int iommu_unregister_device_fault_handler(struct device *dev)
1360 {
1361 struct dev_iommu *param = dev->iommu;
1362 int ret = 0;
1363
1364 if (!param)
1365 return -EINVAL;
1366
1367 mutex_lock(¶m->lock);
1368
1369 if (!param->fault_param)
1370 goto unlock;
1371
1372 /* we cannot unregister handler if there are pending faults */
1373 if (!list_empty(¶m->fault_param->faults)) {
1374 ret = -EBUSY;
1375 goto unlock;
1376 }
1377
1378 kfree(param->fault_param);
1379 param->fault_param = NULL;
1380 put_device(dev);
1381 unlock:
1382 mutex_unlock(¶m->lock);
1383
1384 return ret;
1385 }
1386 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1387
1388 /**
1389 * iommu_report_device_fault() - Report fault event to device driver
1390 * @dev: the device
1391 * @evt: fault event data
1392 *
1393 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1394 * handler. When this function fails and the fault is recoverable, it is the
1395 * caller's responsibility to complete the fault.
1396 *
1397 * Return 0 on success, or an error.
1398 */
iommu_report_device_fault(struct device * dev,struct iommu_fault_event * evt)1399 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1400 {
1401 struct dev_iommu *param = dev->iommu;
1402 struct iommu_fault_event *evt_pending = NULL;
1403 struct iommu_fault_param *fparam;
1404 int ret = 0;
1405
1406 if (!param || !evt)
1407 return -EINVAL;
1408
1409 /* we only report device fault if there is a handler registered */
1410 mutex_lock(¶m->lock);
1411 fparam = param->fault_param;
1412 if (!fparam || !fparam->handler) {
1413 ret = -EINVAL;
1414 goto done_unlock;
1415 }
1416
1417 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1418 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1419 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1420 GFP_KERNEL);
1421 if (!evt_pending) {
1422 ret = -ENOMEM;
1423 goto done_unlock;
1424 }
1425 mutex_lock(&fparam->lock);
1426 list_add_tail(&evt_pending->list, &fparam->faults);
1427 mutex_unlock(&fparam->lock);
1428 }
1429
1430 ret = fparam->handler(&evt->fault, fparam->data);
1431 if (ret && evt_pending) {
1432 mutex_lock(&fparam->lock);
1433 list_del(&evt_pending->list);
1434 mutex_unlock(&fparam->lock);
1435 kfree(evt_pending);
1436 }
1437 done_unlock:
1438 mutex_unlock(¶m->lock);
1439 return ret;
1440 }
1441 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1442
iommu_page_response(struct device * dev,struct iommu_page_response * msg)1443 int iommu_page_response(struct device *dev,
1444 struct iommu_page_response *msg)
1445 {
1446 bool needs_pasid;
1447 int ret = -EINVAL;
1448 struct iommu_fault_event *evt;
1449 struct iommu_fault_page_request *prm;
1450 struct dev_iommu *param = dev->iommu;
1451 const struct iommu_ops *ops = dev_iommu_ops(dev);
1452 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1453
1454 if (!ops->page_response)
1455 return -ENODEV;
1456
1457 if (!param || !param->fault_param)
1458 return -EINVAL;
1459
1460 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1461 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1462 return -EINVAL;
1463
1464 /* Only send response if there is a fault report pending */
1465 mutex_lock(¶m->fault_param->lock);
1466 if (list_empty(¶m->fault_param->faults)) {
1467 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1468 goto done_unlock;
1469 }
1470 /*
1471 * Check if we have a matching page request pending to respond,
1472 * otherwise return -EINVAL
1473 */
1474 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1475 prm = &evt->fault.prm;
1476 if (prm->grpid != msg->grpid)
1477 continue;
1478
1479 /*
1480 * If the PASID is required, the corresponding request is
1481 * matched using the group ID, the PASID valid bit and the PASID
1482 * value. Otherwise only the group ID matches request and
1483 * response.
1484 */
1485 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1486 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1487 continue;
1488
1489 if (!needs_pasid && has_pasid) {
1490 /* No big deal, just clear it. */
1491 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1492 msg->pasid = 0;
1493 }
1494
1495 ret = ops->page_response(dev, evt, msg);
1496 list_del(&evt->list);
1497 kfree(evt);
1498 break;
1499 }
1500
1501 done_unlock:
1502 mutex_unlock(¶m->fault_param->lock);
1503 return ret;
1504 }
1505 EXPORT_SYMBOL_GPL(iommu_page_response);
1506
1507 /**
1508 * iommu_group_id - Return ID for a group
1509 * @group: the group to ID
1510 *
1511 * Return the unique ID for the group matching the sysfs group number.
1512 */
iommu_group_id(struct iommu_group * group)1513 int iommu_group_id(struct iommu_group *group)
1514 {
1515 return group->id;
1516 }
1517 EXPORT_SYMBOL_GPL(iommu_group_id);
1518
1519 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1520 unsigned long *devfns);
1521
1522 /*
1523 * To consider a PCI device isolated, we require ACS to support Source
1524 * Validation, Request Redirection, Completer Redirection, and Upstream
1525 * Forwarding. This effectively means that devices cannot spoof their
1526 * requester ID, requests and completions cannot be redirected, and all
1527 * transactions are forwarded upstream, even as it passes through a
1528 * bridge where the target device is downstream.
1529 */
1530 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1531
1532 /*
1533 * For multifunction devices which are not isolated from each other, find
1534 * all the other non-isolated functions and look for existing groups. For
1535 * each function, we also need to look for aliases to or from other devices
1536 * that may already have a group.
1537 */
get_pci_function_alias_group(struct pci_dev * pdev,unsigned long * devfns)1538 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1539 unsigned long *devfns)
1540 {
1541 struct pci_dev *tmp = NULL;
1542 struct iommu_group *group;
1543
1544 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1545 return NULL;
1546
1547 for_each_pci_dev(tmp) {
1548 if (tmp == pdev || tmp->bus != pdev->bus ||
1549 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1550 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1551 continue;
1552
1553 group = get_pci_alias_group(tmp, devfns);
1554 if (group) {
1555 pci_dev_put(tmp);
1556 return group;
1557 }
1558 }
1559
1560 return NULL;
1561 }
1562
1563 /*
1564 * Look for aliases to or from the given device for existing groups. DMA
1565 * aliases are only supported on the same bus, therefore the search
1566 * space is quite small (especially since we're really only looking at pcie
1567 * device, and therefore only expect multiple slots on the root complex or
1568 * downstream switch ports). It's conceivable though that a pair of
1569 * multifunction devices could have aliases between them that would cause a
1570 * loop. To prevent this, we use a bitmap to track where we've been.
1571 */
get_pci_alias_group(struct pci_dev * pdev,unsigned long * devfns)1572 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1573 unsigned long *devfns)
1574 {
1575 struct pci_dev *tmp = NULL;
1576 struct iommu_group *group;
1577
1578 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1579 return NULL;
1580
1581 group = iommu_group_get(&pdev->dev);
1582 if (group)
1583 return group;
1584
1585 for_each_pci_dev(tmp) {
1586 if (tmp == pdev || tmp->bus != pdev->bus)
1587 continue;
1588
1589 /* We alias them or they alias us */
1590 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1591 group = get_pci_alias_group(tmp, devfns);
1592 if (group) {
1593 pci_dev_put(tmp);
1594 return group;
1595 }
1596
1597 group = get_pci_function_alias_group(tmp, devfns);
1598 if (group) {
1599 pci_dev_put(tmp);
1600 return group;
1601 }
1602 }
1603 }
1604
1605 return NULL;
1606 }
1607
1608 struct group_for_pci_data {
1609 struct pci_dev *pdev;
1610 struct iommu_group *group;
1611 };
1612
1613 /*
1614 * DMA alias iterator callback, return the last seen device. Stop and return
1615 * the IOMMU group if we find one along the way.
1616 */
get_pci_alias_or_group(struct pci_dev * pdev,u16 alias,void * opaque)1617 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1618 {
1619 struct group_for_pci_data *data = opaque;
1620
1621 data->pdev = pdev;
1622 data->group = iommu_group_get(&pdev->dev);
1623
1624 return data->group != NULL;
1625 }
1626
1627 /*
1628 * Generic device_group call-back function. It just allocates one
1629 * iommu-group per device.
1630 */
generic_device_group(struct device * dev)1631 struct iommu_group *generic_device_group(struct device *dev)
1632 {
1633 return iommu_group_alloc();
1634 }
1635 EXPORT_SYMBOL_GPL(generic_device_group);
1636
1637 /*
1638 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1639 * to find or create an IOMMU group for a device.
1640 */
pci_device_group(struct device * dev)1641 struct iommu_group *pci_device_group(struct device *dev)
1642 {
1643 struct pci_dev *pdev = to_pci_dev(dev);
1644 struct group_for_pci_data data;
1645 struct pci_bus *bus;
1646 struct iommu_group *group = NULL;
1647 u64 devfns[4] = { 0 };
1648
1649 if (WARN_ON(!dev_is_pci(dev)))
1650 return ERR_PTR(-EINVAL);
1651
1652 /*
1653 * Find the upstream DMA alias for the device. A device must not
1654 * be aliased due to topology in order to have its own IOMMU group.
1655 * If we find an alias along the way that already belongs to a
1656 * group, use it.
1657 */
1658 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1659 return data.group;
1660
1661 pdev = data.pdev;
1662
1663 /*
1664 * Continue upstream from the point of minimum IOMMU granularity
1665 * due to aliases to the point where devices are protected from
1666 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1667 * group, use it.
1668 */
1669 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1670 if (!bus->self)
1671 continue;
1672
1673 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1674 break;
1675
1676 pdev = bus->self;
1677
1678 group = iommu_group_get(&pdev->dev);
1679 if (group)
1680 return group;
1681 }
1682
1683 /*
1684 * Look for existing groups on device aliases. If we alias another
1685 * device or another device aliases us, use the same group.
1686 */
1687 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1688 if (group)
1689 return group;
1690
1691 /*
1692 * Look for existing groups on non-isolated functions on the same
1693 * slot and aliases of those funcions, if any. No need to clear
1694 * the search bitmap, the tested devfns are still valid.
1695 */
1696 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1697 if (group)
1698 return group;
1699
1700 /* No shared group found, allocate new */
1701 return iommu_group_alloc();
1702 }
1703 EXPORT_SYMBOL_GPL(pci_device_group);
1704
1705 /* Get the IOMMU group for device on fsl-mc bus */
fsl_mc_device_group(struct device * dev)1706 struct iommu_group *fsl_mc_device_group(struct device *dev)
1707 {
1708 struct device *cont_dev = fsl_mc_cont_dev(dev);
1709 struct iommu_group *group;
1710
1711 group = iommu_group_get(cont_dev);
1712 if (!group)
1713 group = iommu_group_alloc();
1714 return group;
1715 }
1716 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1717
iommu_get_def_domain_type(struct device * dev)1718 static int iommu_get_def_domain_type(struct device *dev)
1719 {
1720 const struct iommu_ops *ops = dev_iommu_ops(dev);
1721
1722 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
1723 return IOMMU_DOMAIN_DMA;
1724
1725 if (ops->def_domain_type)
1726 return ops->def_domain_type(dev);
1727
1728 return 0;
1729 }
1730
1731 static struct iommu_domain *
__iommu_group_alloc_default_domain(const struct bus_type * bus,struct iommu_group * group,int req_type)1732 __iommu_group_alloc_default_domain(const struct bus_type *bus,
1733 struct iommu_group *group, int req_type)
1734 {
1735 if (group->default_domain && group->default_domain->type == req_type)
1736 return group->default_domain;
1737 return __iommu_domain_alloc(bus, req_type);
1738 }
1739
1740 /*
1741 * req_type of 0 means "auto" which means to select a domain based on
1742 * iommu_def_domain_type or what the driver actually supports.
1743 */
1744 static struct iommu_domain *
iommu_group_alloc_default_domain(struct iommu_group * group,int req_type)1745 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
1746 {
1747 const struct bus_type *bus =
1748 list_first_entry(&group->devices, struct group_device, list)
1749 ->dev->bus;
1750 struct iommu_domain *dom;
1751
1752 lockdep_assert_held(&group->mutex);
1753
1754 if (req_type)
1755 return __iommu_group_alloc_default_domain(bus, group, req_type);
1756
1757 /* The driver gave no guidance on what type to use, try the default */
1758 dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type);
1759 if (dom)
1760 return dom;
1761
1762 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */
1763 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA)
1764 return NULL;
1765 dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA);
1766 if (!dom)
1767 return NULL;
1768
1769 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1770 iommu_def_domain_type, group->name);
1771 return dom;
1772 }
1773
iommu_group_default_domain(struct iommu_group * group)1774 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1775 {
1776 return group->default_domain;
1777 }
1778
probe_iommu_group(struct device * dev,void * data)1779 static int probe_iommu_group(struct device *dev, void *data)
1780 {
1781 struct list_head *group_list = data;
1782 int ret;
1783
1784 mutex_lock(&iommu_probe_device_lock);
1785 ret = __iommu_probe_device(dev, group_list);
1786 mutex_unlock(&iommu_probe_device_lock);
1787 if (ret == -ENODEV)
1788 ret = 0;
1789
1790 return ret;
1791 }
1792
iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)1793 static int iommu_bus_notifier(struct notifier_block *nb,
1794 unsigned long action, void *data)
1795 {
1796 struct device *dev = data;
1797
1798 if (action == BUS_NOTIFY_ADD_DEVICE) {
1799 int ret;
1800
1801 ret = iommu_probe_device(dev);
1802 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1803 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1804 iommu_release_device(dev);
1805 return NOTIFY_OK;
1806 }
1807
1808 return 0;
1809 }
1810
1811 /* A target_type of 0 will select the best domain type and cannot fail */
iommu_get_default_domain_type(struct iommu_group * group,int target_type)1812 static int iommu_get_default_domain_type(struct iommu_group *group,
1813 int target_type)
1814 {
1815 int best_type = target_type;
1816 struct group_device *gdev;
1817 struct device *last_dev;
1818
1819 lockdep_assert_held(&group->mutex);
1820
1821 for_each_group_device(group, gdev) {
1822 unsigned int type = iommu_get_def_domain_type(gdev->dev);
1823
1824 if (best_type && type && best_type != type) {
1825 if (target_type) {
1826 dev_err_ratelimited(
1827 gdev->dev,
1828 "Device cannot be in %s domain\n",
1829 iommu_domain_type_str(target_type));
1830 return -1;
1831 }
1832
1833 dev_warn(
1834 gdev->dev,
1835 "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1836 iommu_domain_type_str(type), dev_name(last_dev),
1837 iommu_domain_type_str(best_type));
1838 return 0;
1839 }
1840 if (!best_type)
1841 best_type = type;
1842 last_dev = gdev->dev;
1843 }
1844 return best_type;
1845 }
1846
iommu_group_do_probe_finalize(struct device * dev)1847 static void iommu_group_do_probe_finalize(struct device *dev)
1848 {
1849 const struct iommu_ops *ops = dev_iommu_ops(dev);
1850
1851 if (ops->probe_finalize)
1852 ops->probe_finalize(dev);
1853 }
1854
bus_iommu_probe(const struct bus_type * bus)1855 int bus_iommu_probe(const struct bus_type *bus)
1856 {
1857 struct iommu_group *group, *next;
1858 LIST_HEAD(group_list);
1859 int ret;
1860
1861 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1862 if (ret)
1863 return ret;
1864
1865 list_for_each_entry_safe(group, next, &group_list, entry) {
1866 struct group_device *gdev;
1867
1868 mutex_lock(&group->mutex);
1869
1870 /* Remove item from the list */
1871 list_del_init(&group->entry);
1872
1873 /*
1874 * We go to the trouble of deferred default domain creation so
1875 * that the cross-group default domain type and the setup of the
1876 * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios.
1877 */
1878 ret = iommu_setup_default_domain(group, 0);
1879 if (ret) {
1880 mutex_unlock(&group->mutex);
1881 return ret;
1882 }
1883 mutex_unlock(&group->mutex);
1884
1885 /*
1886 * FIXME: Mis-locked because the ops->probe_finalize() call-back
1887 * of some IOMMU drivers calls arm_iommu_attach_device() which
1888 * in-turn might call back into IOMMU core code, where it tries
1889 * to take group->mutex, resulting in a deadlock.
1890 */
1891 for_each_group_device(group, gdev)
1892 iommu_group_do_probe_finalize(gdev->dev);
1893 }
1894
1895 return 0;
1896 }
1897
iommu_present(const struct bus_type * bus)1898 bool iommu_present(const struct bus_type *bus)
1899 {
1900 return bus->iommu_ops != NULL;
1901 }
1902 EXPORT_SYMBOL_GPL(iommu_present);
1903
1904 /**
1905 * device_iommu_capable() - check for a general IOMMU capability
1906 * @dev: device to which the capability would be relevant, if available
1907 * @cap: IOMMU capability
1908 *
1909 * Return: true if an IOMMU is present and supports the given capability
1910 * for the given device, otherwise false.
1911 */
device_iommu_capable(struct device * dev,enum iommu_cap cap)1912 bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1913 {
1914 const struct iommu_ops *ops;
1915
1916 if (!dev->iommu || !dev->iommu->iommu_dev)
1917 return false;
1918
1919 ops = dev_iommu_ops(dev);
1920 if (!ops->capable)
1921 return false;
1922
1923 return ops->capable(dev, cap);
1924 }
1925 EXPORT_SYMBOL_GPL(device_iommu_capable);
1926
1927 /**
1928 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi()
1929 * for a group
1930 * @group: Group to query
1931 *
1932 * IOMMU groups should not have differing values of
1933 * msi_device_has_isolated_msi() for devices in a group. However nothing
1934 * directly prevents this, so ensure mistakes don't result in isolation failures
1935 * by checking that all the devices are the same.
1936 */
iommu_group_has_isolated_msi(struct iommu_group * group)1937 bool iommu_group_has_isolated_msi(struct iommu_group *group)
1938 {
1939 struct group_device *group_dev;
1940 bool ret = true;
1941
1942 mutex_lock(&group->mutex);
1943 for_each_group_device(group, group_dev)
1944 ret &= msi_device_has_isolated_msi(group_dev->dev);
1945 mutex_unlock(&group->mutex);
1946 return ret;
1947 }
1948 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi);
1949
1950 /**
1951 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1952 * @domain: iommu domain
1953 * @handler: fault handler
1954 * @token: user data, will be passed back to the fault handler
1955 *
1956 * This function should be used by IOMMU users which want to be notified
1957 * whenever an IOMMU fault happens.
1958 *
1959 * The fault handler itself should return 0 on success, and an appropriate
1960 * error code otherwise.
1961 */
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1962 void iommu_set_fault_handler(struct iommu_domain *domain,
1963 iommu_fault_handler_t handler,
1964 void *token)
1965 {
1966 BUG_ON(!domain);
1967
1968 domain->handler = handler;
1969 domain->handler_token = token;
1970 }
1971 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1972
__iommu_domain_alloc(const struct bus_type * bus,unsigned type)1973 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
1974 unsigned type)
1975 {
1976 struct iommu_domain *domain;
1977 unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS;
1978
1979 if (bus == NULL || bus->iommu_ops == NULL)
1980 return NULL;
1981
1982 domain = bus->iommu_ops->domain_alloc(alloc_type);
1983 if (!domain)
1984 return NULL;
1985
1986 domain->type = type;
1987 /*
1988 * If not already set, assume all sizes by default; the driver
1989 * may override this later
1990 */
1991 if (!domain->pgsize_bitmap)
1992 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1993
1994 if (!domain->ops)
1995 domain->ops = bus->iommu_ops->default_domain_ops;
1996
1997 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
1998 iommu_domain_free(domain);
1999 domain = NULL;
2000 }
2001 return domain;
2002 }
2003
iommu_domain_alloc(const struct bus_type * bus)2004 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
2005 {
2006 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
2007 }
2008 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
2009
iommu_domain_free(struct iommu_domain * domain)2010 void iommu_domain_free(struct iommu_domain *domain)
2011 {
2012 if (domain->type == IOMMU_DOMAIN_SVA)
2013 mmdrop(domain->mm);
2014 iommu_put_dma_cookie(domain);
2015 domain->ops->free(domain);
2016 }
2017 EXPORT_SYMBOL_GPL(iommu_domain_free);
2018
2019 /*
2020 * Put the group's domain back to the appropriate core-owned domain - either the
2021 * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
2022 */
__iommu_group_set_core_domain(struct iommu_group * group)2023 static void __iommu_group_set_core_domain(struct iommu_group *group)
2024 {
2025 struct iommu_domain *new_domain;
2026
2027 if (group->owner)
2028 new_domain = group->blocking_domain;
2029 else
2030 new_domain = group->default_domain;
2031
2032 __iommu_group_set_domain_nofail(group, new_domain);
2033 }
2034
__iommu_attach_device(struct iommu_domain * domain,struct device * dev)2035 static int __iommu_attach_device(struct iommu_domain *domain,
2036 struct device *dev)
2037 {
2038 int ret;
2039
2040 if (unlikely(domain->ops->attach_dev == NULL))
2041 return -ENODEV;
2042
2043 ret = domain->ops->attach_dev(domain, dev);
2044 if (ret)
2045 return ret;
2046 dev->iommu->attach_deferred = 0;
2047 trace_attach_device_to_domain(dev);
2048 return 0;
2049 }
2050
2051 /**
2052 * iommu_attach_device - Attach an IOMMU domain to a device
2053 * @domain: IOMMU domain to attach
2054 * @dev: Device that will be attached
2055 *
2056 * Returns 0 on success and error code on failure
2057 *
2058 * Note that EINVAL can be treated as a soft failure, indicating
2059 * that certain configuration of the domain is incompatible with
2060 * the device. In this case attaching a different domain to the
2061 * device may succeed.
2062 */
iommu_attach_device(struct iommu_domain * domain,struct device * dev)2063 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
2064 {
2065 struct iommu_group *group;
2066 int ret;
2067
2068 group = iommu_group_get(dev);
2069 if (!group)
2070 return -ENODEV;
2071
2072 /*
2073 * Lock the group to make sure the device-count doesn't
2074 * change while we are attaching
2075 */
2076 mutex_lock(&group->mutex);
2077 ret = -EINVAL;
2078 if (list_count_nodes(&group->devices) != 1)
2079 goto out_unlock;
2080
2081 ret = __iommu_attach_group(domain, group);
2082
2083 out_unlock:
2084 mutex_unlock(&group->mutex);
2085 iommu_group_put(group);
2086
2087 return ret;
2088 }
2089 EXPORT_SYMBOL_GPL(iommu_attach_device);
2090
iommu_deferred_attach(struct device * dev,struct iommu_domain * domain)2091 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
2092 {
2093 if (dev->iommu && dev->iommu->attach_deferred)
2094 return __iommu_attach_device(domain, dev);
2095
2096 return 0;
2097 }
2098
iommu_detach_device(struct iommu_domain * domain,struct device * dev)2099 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2100 {
2101 struct iommu_group *group;
2102
2103 group = iommu_group_get(dev);
2104 if (!group)
2105 return;
2106
2107 mutex_lock(&group->mutex);
2108 if (WARN_ON(domain != group->domain) ||
2109 WARN_ON(list_count_nodes(&group->devices) != 1))
2110 goto out_unlock;
2111 __iommu_group_set_core_domain(group);
2112
2113 out_unlock:
2114 mutex_unlock(&group->mutex);
2115 iommu_group_put(group);
2116 }
2117 EXPORT_SYMBOL_GPL(iommu_detach_device);
2118
iommu_get_domain_for_dev(struct device * dev)2119 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2120 {
2121 struct iommu_domain *domain;
2122 struct iommu_group *group;
2123
2124 group = iommu_group_get(dev);
2125 if (!group)
2126 return NULL;
2127
2128 domain = group->domain;
2129
2130 iommu_group_put(group);
2131
2132 return domain;
2133 }
2134 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2135
2136 /*
2137 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2138 * guarantees that the group and its default domain are valid and correct.
2139 */
iommu_get_dma_domain(struct device * dev)2140 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2141 {
2142 return dev->iommu_group->default_domain;
2143 }
2144
__iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)2145 static int __iommu_attach_group(struct iommu_domain *domain,
2146 struct iommu_group *group)
2147 {
2148 if (group->domain && group->domain != group->default_domain &&
2149 group->domain != group->blocking_domain)
2150 return -EBUSY;
2151
2152 return __iommu_group_set_domain(group, domain);
2153 }
2154
2155 /**
2156 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2157 * @domain: IOMMU domain to attach
2158 * @group: IOMMU group that will be attached
2159 *
2160 * Returns 0 on success and error code on failure
2161 *
2162 * Note that EINVAL can be treated as a soft failure, indicating
2163 * that certain configuration of the domain is incompatible with
2164 * the group. In this case attaching a different domain to the
2165 * group may succeed.
2166 */
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)2167 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2168 {
2169 int ret;
2170
2171 mutex_lock(&group->mutex);
2172 ret = __iommu_attach_group(domain, group);
2173 mutex_unlock(&group->mutex);
2174
2175 return ret;
2176 }
2177 EXPORT_SYMBOL_GPL(iommu_attach_group);
2178
2179 /**
2180 * iommu_group_replace_domain - replace the domain that a group is attached to
2181 * @new_domain: new IOMMU domain to replace with
2182 * @group: IOMMU group that will be attached to the new domain
2183 *
2184 * This API allows the group to switch domains without being forced to go to
2185 * the blocking domain in-between.
2186 *
2187 * If the currently attached domain is a core domain (e.g. a default_domain),
2188 * it will act just like the iommu_attach_group().
2189 */
iommu_group_replace_domain(struct iommu_group * group,struct iommu_domain * new_domain)2190 int iommu_group_replace_domain(struct iommu_group *group,
2191 struct iommu_domain *new_domain)
2192 {
2193 int ret;
2194
2195 if (!new_domain)
2196 return -EINVAL;
2197
2198 mutex_lock(&group->mutex);
2199 ret = __iommu_group_set_domain(group, new_domain);
2200 mutex_unlock(&group->mutex);
2201 return ret;
2202 }
2203 EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, IOMMUFD_INTERNAL);
2204
__iommu_device_set_domain(struct iommu_group * group,struct device * dev,struct iommu_domain * new_domain,unsigned int flags)2205 static int __iommu_device_set_domain(struct iommu_group *group,
2206 struct device *dev,
2207 struct iommu_domain *new_domain,
2208 unsigned int flags)
2209 {
2210 int ret;
2211
2212 /*
2213 * If the device requires IOMMU_RESV_DIRECT then we cannot allow
2214 * the blocking domain to be attached as it does not contain the
2215 * required 1:1 mapping. This test effectively excludes the device
2216 * being used with iommu_group_claim_dma_owner() which will block
2217 * vfio and iommufd as well.
2218 */
2219 if (dev->iommu->require_direct &&
2220 (new_domain->type == IOMMU_DOMAIN_BLOCKED ||
2221 new_domain == group->blocking_domain)) {
2222 dev_warn(dev,
2223 "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n");
2224 return -EINVAL;
2225 }
2226
2227 if (dev->iommu->attach_deferred) {
2228 if (new_domain == group->default_domain)
2229 return 0;
2230 dev->iommu->attach_deferred = 0;
2231 }
2232
2233 ret = __iommu_attach_device(new_domain, dev);
2234 if (ret) {
2235 /*
2236 * If we have a blocking domain then try to attach that in hopes
2237 * of avoiding a UAF. Modern drivers should implement blocking
2238 * domains as global statics that cannot fail.
2239 */
2240 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) &&
2241 group->blocking_domain &&
2242 group->blocking_domain != new_domain)
2243 __iommu_attach_device(group->blocking_domain, dev);
2244 return ret;
2245 }
2246 return 0;
2247 }
2248
2249 /*
2250 * If 0 is returned the group's domain is new_domain. If an error is returned
2251 * then the group's domain will be set back to the existing domain unless
2252 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's
2253 * domains is left inconsistent. This is a driver bug to fail attach with a
2254 * previously good domain. We try to avoid a kernel UAF because of this.
2255 *
2256 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU
2257 * API works on domains and devices. Bridge that gap by iterating over the
2258 * devices in a group. Ideally we'd have a single device which represents the
2259 * requestor ID of the group, but we also allow IOMMU drivers to create policy
2260 * defined minimum sets, where the physical hardware may be able to distiguish
2261 * members, but we wish to group them at a higher level (ex. untrusted
2262 * multi-function PCI devices). Thus we attach each device.
2263 */
__iommu_group_set_domain_internal(struct iommu_group * group,struct iommu_domain * new_domain,unsigned int flags)2264 static int __iommu_group_set_domain_internal(struct iommu_group *group,
2265 struct iommu_domain *new_domain,
2266 unsigned int flags)
2267 {
2268 struct group_device *last_gdev;
2269 struct group_device *gdev;
2270 int result;
2271 int ret;
2272
2273 lockdep_assert_held(&group->mutex);
2274
2275 if (group->domain == new_domain)
2276 return 0;
2277
2278 /*
2279 * New drivers should support default domains, so set_platform_dma()
2280 * op will never be called. Otherwise the NULL domain represents some
2281 * platform specific behavior.
2282 */
2283 if (!new_domain) {
2284 for_each_group_device(group, gdev) {
2285 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev);
2286
2287 if (!WARN_ON(!ops->set_platform_dma_ops))
2288 ops->set_platform_dma_ops(gdev->dev);
2289 }
2290 group->domain = NULL;
2291 return 0;
2292 }
2293
2294 /*
2295 * Changing the domain is done by calling attach_dev() on the new
2296 * domain. This switch does not have to be atomic and DMA can be
2297 * discarded during the transition. DMA must only be able to access
2298 * either new_domain or group->domain, never something else.
2299 */
2300 result = 0;
2301 for_each_group_device(group, gdev) {
2302 ret = __iommu_device_set_domain(group, gdev->dev, new_domain,
2303 flags);
2304 if (ret) {
2305 result = ret;
2306 /*
2307 * Keep trying the other devices in the group. If a
2308 * driver fails attach to an otherwise good domain, and
2309 * does not support blocking domains, it should at least
2310 * drop its reference on the current domain so we don't
2311 * UAF.
2312 */
2313 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED)
2314 continue;
2315 goto err_revert;
2316 }
2317 }
2318 group->domain = new_domain;
2319 return result;
2320
2321 err_revert:
2322 /*
2323 * This is called in error unwind paths. A well behaved driver should
2324 * always allow us to attach to a domain that was already attached.
2325 */
2326 last_gdev = gdev;
2327 for_each_group_device(group, gdev) {
2328 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev);
2329
2330 /*
2331 * If set_platform_dma_ops is not present a NULL domain can
2332 * happen only for first probe, in which case we leave
2333 * group->domain as NULL and let release clean everything up.
2334 */
2335 if (group->domain)
2336 WARN_ON(__iommu_device_set_domain(
2337 group, gdev->dev, group->domain,
2338 IOMMU_SET_DOMAIN_MUST_SUCCEED));
2339 else if (ops->set_platform_dma_ops)
2340 ops->set_platform_dma_ops(gdev->dev);
2341 if (gdev == last_gdev)
2342 break;
2343 }
2344 return ret;
2345 }
2346
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)2347 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2348 {
2349 mutex_lock(&group->mutex);
2350 __iommu_group_set_core_domain(group);
2351 mutex_unlock(&group->mutex);
2352 }
2353 EXPORT_SYMBOL_GPL(iommu_detach_group);
2354
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)2355 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2356 {
2357 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2358 return iova;
2359
2360 if (domain->type == IOMMU_DOMAIN_BLOCKED)
2361 return 0;
2362
2363 return domain->ops->iova_to_phys(domain, iova);
2364 }
2365 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2366
iommu_pgsize(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,size_t * count)2367 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2368 phys_addr_t paddr, size_t size, size_t *count)
2369 {
2370 unsigned int pgsize_idx, pgsize_idx_next;
2371 unsigned long pgsizes;
2372 size_t offset, pgsize, pgsize_next;
2373 unsigned long addr_merge = paddr | iova;
2374
2375 /* Page sizes supported by the hardware and small enough for @size */
2376 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
2377
2378 /* Constrain the page sizes further based on the maximum alignment */
2379 if (likely(addr_merge))
2380 pgsizes &= GENMASK(__ffs(addr_merge), 0);
2381
2382 /* Make sure we have at least one suitable page size */
2383 BUG_ON(!pgsizes);
2384
2385 /* Pick the biggest page size remaining */
2386 pgsize_idx = __fls(pgsizes);
2387 pgsize = BIT(pgsize_idx);
2388 if (!count)
2389 return pgsize;
2390
2391 /* Find the next biggest support page size, if it exists */
2392 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2393 if (!pgsizes)
2394 goto out_set_count;
2395
2396 pgsize_idx_next = __ffs(pgsizes);
2397 pgsize_next = BIT(pgsize_idx_next);
2398
2399 /*
2400 * There's no point trying a bigger page size unless the virtual
2401 * and physical addresses are similarly offset within the larger page.
2402 */
2403 if ((iova ^ paddr) & (pgsize_next - 1))
2404 goto out_set_count;
2405
2406 /* Calculate the offset to the next page size alignment boundary */
2407 offset = pgsize_next - (addr_merge & (pgsize_next - 1));
2408
2409 /*
2410 * If size is big enough to accommodate the larger page, reduce
2411 * the number of smaller pages.
2412 */
2413 if (offset + pgsize_next <= size)
2414 size = offset;
2415
2416 out_set_count:
2417 *count = size >> pgsize_idx;
2418 return pgsize;
2419 }
2420
__iommu_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp,size_t * mapped)2421 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
2422 phys_addr_t paddr, size_t size, int prot,
2423 gfp_t gfp, size_t *mapped)
2424 {
2425 const struct iommu_domain_ops *ops = domain->ops;
2426 size_t pgsize, count;
2427 int ret;
2428
2429 pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2430
2431 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2432 iova, &paddr, pgsize, count);
2433
2434 if (ops->map_pages) {
2435 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
2436 gfp, mapped);
2437 } else {
2438 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2439 *mapped = ret ? 0 : pgsize;
2440 }
2441
2442 return ret;
2443 }
2444
__iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)2445 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2446 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2447 {
2448 const struct iommu_domain_ops *ops = domain->ops;
2449 unsigned long orig_iova = iova;
2450 unsigned int min_pagesz;
2451 size_t orig_size = size;
2452 phys_addr_t orig_paddr = paddr;
2453 int ret = 0;
2454
2455 if (unlikely(!(ops->map || ops->map_pages) ||
2456 domain->pgsize_bitmap == 0UL))
2457 return -ENODEV;
2458
2459 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2460 return -EINVAL;
2461
2462 /* find out the minimum page size supported */
2463 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2464
2465 /*
2466 * both the virtual address and the physical one, as well as
2467 * the size of the mapping, must be aligned (at least) to the
2468 * size of the smallest page supported by the hardware
2469 */
2470 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2471 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2472 iova, &paddr, size, min_pagesz);
2473 return -EINVAL;
2474 }
2475
2476 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2477
2478 while (size) {
2479 size_t mapped = 0;
2480
2481 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
2482 &mapped);
2483 /*
2484 * Some pages may have been mapped, even if an error occurred,
2485 * so we should account for those so they can be unmapped.
2486 */
2487 size -= mapped;
2488
2489 if (ret)
2490 break;
2491
2492 iova += mapped;
2493 paddr += mapped;
2494 }
2495
2496 /* unroll mapping in case something went wrong */
2497 if (ret)
2498 iommu_unmap(domain, orig_iova, orig_size - size);
2499 else
2500 trace_map(orig_iova, orig_paddr, orig_size);
2501
2502 return ret;
2503 }
2504
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)2505 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2506 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2507 {
2508 const struct iommu_domain_ops *ops = domain->ops;
2509 int ret;
2510
2511 might_sleep_if(gfpflags_allow_blocking(gfp));
2512
2513 /* Discourage passing strange GFP flags */
2514 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
2515 __GFP_HIGHMEM)))
2516 return -EINVAL;
2517
2518 ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2519 if (ret == 0 && ops->iotlb_sync_map)
2520 ops->iotlb_sync_map(domain, iova, size);
2521
2522 return ret;
2523 }
2524 EXPORT_SYMBOL_GPL(iommu_map);
2525
__iommu_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2526 static size_t __iommu_unmap_pages(struct iommu_domain *domain,
2527 unsigned long iova, size_t size,
2528 struct iommu_iotlb_gather *iotlb_gather)
2529 {
2530 const struct iommu_domain_ops *ops = domain->ops;
2531 size_t pgsize, count;
2532
2533 pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2534 return ops->unmap_pages ?
2535 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
2536 ops->unmap(domain, iova, pgsize, iotlb_gather);
2537 }
2538
__iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2539 static size_t __iommu_unmap(struct iommu_domain *domain,
2540 unsigned long iova, size_t size,
2541 struct iommu_iotlb_gather *iotlb_gather)
2542 {
2543 const struct iommu_domain_ops *ops = domain->ops;
2544 size_t unmapped_page, unmapped = 0;
2545 unsigned long orig_iova = iova;
2546 unsigned int min_pagesz;
2547
2548 if (unlikely(!(ops->unmap || ops->unmap_pages) ||
2549 domain->pgsize_bitmap == 0UL))
2550 return 0;
2551
2552 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2553 return 0;
2554
2555 /* find out the minimum page size supported */
2556 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2557
2558 /*
2559 * The virtual address, as well as the size of the mapping, must be
2560 * aligned (at least) to the size of the smallest page supported
2561 * by the hardware
2562 */
2563 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2564 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2565 iova, size, min_pagesz);
2566 return 0;
2567 }
2568
2569 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2570
2571 /*
2572 * Keep iterating until we either unmap 'size' bytes (or more)
2573 * or we hit an area that isn't mapped.
2574 */
2575 while (unmapped < size) {
2576 unmapped_page = __iommu_unmap_pages(domain, iova,
2577 size - unmapped,
2578 iotlb_gather);
2579 if (!unmapped_page)
2580 break;
2581
2582 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2583 iova, unmapped_page);
2584
2585 iova += unmapped_page;
2586 unmapped += unmapped_page;
2587 }
2588
2589 trace_unmap(orig_iova, size, unmapped);
2590 return unmapped;
2591 }
2592
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)2593 size_t iommu_unmap(struct iommu_domain *domain,
2594 unsigned long iova, size_t size)
2595 {
2596 struct iommu_iotlb_gather iotlb_gather;
2597 size_t ret;
2598
2599 iommu_iotlb_gather_init(&iotlb_gather);
2600 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2601 iommu_iotlb_sync(domain, &iotlb_gather);
2602
2603 return ret;
2604 }
2605 EXPORT_SYMBOL_GPL(iommu_unmap);
2606
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * iotlb_gather)2607 size_t iommu_unmap_fast(struct iommu_domain *domain,
2608 unsigned long iova, size_t size,
2609 struct iommu_iotlb_gather *iotlb_gather)
2610 {
2611 return __iommu_unmap(domain, iova, size, iotlb_gather);
2612 }
2613 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2614
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)2615 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2616 struct scatterlist *sg, unsigned int nents, int prot,
2617 gfp_t gfp)
2618 {
2619 const struct iommu_domain_ops *ops = domain->ops;
2620 size_t len = 0, mapped = 0;
2621 phys_addr_t start;
2622 unsigned int i = 0;
2623 int ret;
2624
2625 might_sleep_if(gfpflags_allow_blocking(gfp));
2626
2627 /* Discourage passing strange GFP flags */
2628 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
2629 __GFP_HIGHMEM)))
2630 return -EINVAL;
2631
2632 while (i <= nents) {
2633 phys_addr_t s_phys = sg_phys(sg);
2634
2635 if (len && s_phys != start + len) {
2636 ret = __iommu_map(domain, iova + mapped, start,
2637 len, prot, gfp);
2638
2639 if (ret)
2640 goto out_err;
2641
2642 mapped += len;
2643 len = 0;
2644 }
2645
2646 if (sg_dma_is_bus_address(sg))
2647 goto next;
2648
2649 if (len) {
2650 len += sg->length;
2651 } else {
2652 len = sg->length;
2653 start = s_phys;
2654 }
2655
2656 next:
2657 if (++i < nents)
2658 sg = sg_next(sg);
2659 }
2660
2661 if (ops->iotlb_sync_map)
2662 ops->iotlb_sync_map(domain, iova, mapped);
2663 return mapped;
2664
2665 out_err:
2666 /* undo mappings already done */
2667 iommu_unmap(domain, iova, mapped);
2668
2669 return ret;
2670 }
2671 EXPORT_SYMBOL_GPL(iommu_map_sg);
2672
2673 /**
2674 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2675 * @domain: the iommu domain where the fault has happened
2676 * @dev: the device where the fault has happened
2677 * @iova: the faulting address
2678 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2679 *
2680 * This function should be called by the low-level IOMMU implementations
2681 * whenever IOMMU faults happen, to allow high-level users, that are
2682 * interested in such events, to know about them.
2683 *
2684 * This event may be useful for several possible use cases:
2685 * - mere logging of the event
2686 * - dynamic TLB/PTE loading
2687 * - if restarting of the faulting device is required
2688 *
2689 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2690 * PTE/TLB loading will one day be supported, implementations will be able
2691 * to tell whether it succeeded or not according to this return value).
2692 *
2693 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2694 * (though fault handlers can also return -ENOSYS, in case they want to
2695 * elicit the default behavior of the IOMMU drivers).
2696 */
report_iommu_fault(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags)2697 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2698 unsigned long iova, int flags)
2699 {
2700 int ret = -ENOSYS;
2701
2702 /*
2703 * if upper layers showed interest and installed a fault handler,
2704 * invoke it.
2705 */
2706 if (domain->handler)
2707 ret = domain->handler(domain, dev, iova, flags,
2708 domain->handler_token);
2709
2710 trace_io_page_fault(dev, iova, flags);
2711 return ret;
2712 }
2713 EXPORT_SYMBOL_GPL(report_iommu_fault);
2714
iommu_init(void)2715 static int __init iommu_init(void)
2716 {
2717 iommu_group_kset = kset_create_and_add("iommu_groups",
2718 NULL, kernel_kobj);
2719 BUG_ON(!iommu_group_kset);
2720
2721 iommu_debugfs_setup();
2722
2723 return 0;
2724 }
2725 core_initcall(iommu_init);
2726
iommu_enable_nesting(struct iommu_domain * domain)2727 int iommu_enable_nesting(struct iommu_domain *domain)
2728 {
2729 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2730 return -EINVAL;
2731 if (!domain->ops->enable_nesting)
2732 return -EINVAL;
2733 return domain->ops->enable_nesting(domain);
2734 }
2735 EXPORT_SYMBOL_GPL(iommu_enable_nesting);
2736
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirk)2737 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
2738 unsigned long quirk)
2739 {
2740 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2741 return -EINVAL;
2742 if (!domain->ops->set_pgtable_quirks)
2743 return -EINVAL;
2744 return domain->ops->set_pgtable_quirks(domain, quirk);
2745 }
2746 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
2747
2748 /**
2749 * iommu_get_resv_regions - get reserved regions
2750 * @dev: device for which to get reserved regions
2751 * @list: reserved region list for device
2752 *
2753 * This returns a list of reserved IOVA regions specific to this device.
2754 * A domain user should not map IOVA in these ranges.
2755 */
iommu_get_resv_regions(struct device * dev,struct list_head * list)2756 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2757 {
2758 const struct iommu_ops *ops = dev_iommu_ops(dev);
2759
2760 if (ops->get_resv_regions)
2761 ops->get_resv_regions(dev, list);
2762 }
2763 EXPORT_SYMBOL_GPL(iommu_get_resv_regions);
2764
2765 /**
2766 * iommu_put_resv_regions - release reserved regions
2767 * @dev: device for which to free reserved regions
2768 * @list: reserved region list for device
2769 *
2770 * This releases a reserved region list acquired by iommu_get_resv_regions().
2771 */
iommu_put_resv_regions(struct device * dev,struct list_head * list)2772 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2773 {
2774 struct iommu_resv_region *entry, *next;
2775
2776 list_for_each_entry_safe(entry, next, list, list) {
2777 if (entry->free)
2778 entry->free(dev, entry);
2779 else
2780 kfree(entry);
2781 }
2782 }
2783 EXPORT_SYMBOL(iommu_put_resv_regions);
2784
iommu_alloc_resv_region(phys_addr_t start,size_t length,int prot,enum iommu_resv_type type,gfp_t gfp)2785 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2786 size_t length, int prot,
2787 enum iommu_resv_type type,
2788 gfp_t gfp)
2789 {
2790 struct iommu_resv_region *region;
2791
2792 region = kzalloc(sizeof(*region), gfp);
2793 if (!region)
2794 return NULL;
2795
2796 INIT_LIST_HEAD(®ion->list);
2797 region->start = start;
2798 region->length = length;
2799 region->prot = prot;
2800 region->type = type;
2801 return region;
2802 }
2803 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2804
iommu_set_default_passthrough(bool cmd_line)2805 void iommu_set_default_passthrough(bool cmd_line)
2806 {
2807 if (cmd_line)
2808 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2809 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2810 }
2811
iommu_set_default_translated(bool cmd_line)2812 void iommu_set_default_translated(bool cmd_line)
2813 {
2814 if (cmd_line)
2815 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2816 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2817 }
2818
iommu_default_passthrough(void)2819 bool iommu_default_passthrough(void)
2820 {
2821 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2822 }
2823 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2824
iommu_ops_from_fwnode(struct fwnode_handle * fwnode)2825 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2826 {
2827 const struct iommu_ops *ops = NULL;
2828 struct iommu_device *iommu;
2829
2830 spin_lock(&iommu_device_lock);
2831 list_for_each_entry(iommu, &iommu_device_list, list)
2832 if (iommu->fwnode == fwnode) {
2833 ops = iommu->ops;
2834 break;
2835 }
2836 spin_unlock(&iommu_device_lock);
2837 return ops;
2838 }
2839
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode,const struct iommu_ops * ops)2840 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2841 const struct iommu_ops *ops)
2842 {
2843 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2844
2845 if (fwspec)
2846 return ops == fwspec->ops ? 0 : -EINVAL;
2847
2848 if (!dev_iommu_get(dev))
2849 return -ENOMEM;
2850
2851 /* Preallocate for the overwhelmingly common case of 1 ID */
2852 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2853 if (!fwspec)
2854 return -ENOMEM;
2855
2856 of_node_get(to_of_node(iommu_fwnode));
2857 fwspec->iommu_fwnode = iommu_fwnode;
2858 fwspec->ops = ops;
2859 dev_iommu_fwspec_set(dev, fwspec);
2860 return 0;
2861 }
2862 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2863
iommu_fwspec_free(struct device * dev)2864 void iommu_fwspec_free(struct device *dev)
2865 {
2866 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2867
2868 if (fwspec) {
2869 fwnode_handle_put(fwspec->iommu_fwnode);
2870 kfree(fwspec);
2871 dev_iommu_fwspec_set(dev, NULL);
2872 }
2873 }
2874 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2875
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)2876 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2877 {
2878 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2879 int i, new_num;
2880
2881 if (!fwspec)
2882 return -EINVAL;
2883
2884 new_num = fwspec->num_ids + num_ids;
2885 if (new_num > 1) {
2886 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2887 GFP_KERNEL);
2888 if (!fwspec)
2889 return -ENOMEM;
2890
2891 dev_iommu_fwspec_set(dev, fwspec);
2892 }
2893
2894 for (i = 0; i < num_ids; i++)
2895 fwspec->ids[fwspec->num_ids + i] = ids[i];
2896
2897 fwspec->num_ids = new_num;
2898 return 0;
2899 }
2900 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2901
2902 /*
2903 * Per device IOMMU features.
2904 */
iommu_dev_enable_feature(struct device * dev,enum iommu_dev_features feat)2905 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2906 {
2907 if (dev->iommu && dev->iommu->iommu_dev) {
2908 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2909
2910 if (ops->dev_enable_feat)
2911 return ops->dev_enable_feat(dev, feat);
2912 }
2913
2914 return -ENODEV;
2915 }
2916 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2917
2918 /*
2919 * The device drivers should do the necessary cleanups before calling this.
2920 */
iommu_dev_disable_feature(struct device * dev,enum iommu_dev_features feat)2921 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2922 {
2923 if (dev->iommu && dev->iommu->iommu_dev) {
2924 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2925
2926 if (ops->dev_disable_feat)
2927 return ops->dev_disable_feat(dev, feat);
2928 }
2929
2930 return -EBUSY;
2931 }
2932 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2933
2934 /**
2935 * iommu_setup_default_domain - Set the default_domain for the group
2936 * @group: Group to change
2937 * @target_type: Domain type to set as the default_domain
2938 *
2939 * Allocate a default domain and set it as the current domain on the group. If
2940 * the group already has a default domain it will be changed to the target_type.
2941 * When target_type is 0 the default domain is selected based on driver and
2942 * system preferences.
2943 */
iommu_setup_default_domain(struct iommu_group * group,int target_type)2944 static int iommu_setup_default_domain(struct iommu_group *group,
2945 int target_type)
2946 {
2947 struct iommu_domain *old_dom = group->default_domain;
2948 struct group_device *gdev;
2949 struct iommu_domain *dom;
2950 bool direct_failed;
2951 int req_type;
2952 int ret;
2953
2954 lockdep_assert_held(&group->mutex);
2955
2956 req_type = iommu_get_default_domain_type(group, target_type);
2957 if (req_type < 0)
2958 return -EINVAL;
2959
2960 /*
2961 * There are still some drivers which don't support default domains, so
2962 * we ignore the failure and leave group->default_domain NULL.
2963 *
2964 * We assume that the iommu driver starts up the device in
2965 * 'set_platform_dma_ops' mode if it does not support default domains.
2966 */
2967 dom = iommu_group_alloc_default_domain(group, req_type);
2968 if (!dom) {
2969 /* Once in default_domain mode we never leave */
2970 if (group->default_domain)
2971 return -ENODEV;
2972 group->default_domain = NULL;
2973 return 0;
2974 }
2975
2976 if (group->default_domain == dom)
2977 return 0;
2978
2979 /*
2980 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be
2981 * mapped before their device is attached, in order to guarantee
2982 * continuity with any FW activity
2983 */
2984 direct_failed = false;
2985 for_each_group_device(group, gdev) {
2986 if (iommu_create_device_direct_mappings(dom, gdev->dev)) {
2987 direct_failed = true;
2988 dev_warn_once(
2989 gdev->dev->iommu->iommu_dev->dev,
2990 "IOMMU driver was not able to establish FW requested direct mapping.");
2991 }
2992 }
2993
2994 /* We must set default_domain early for __iommu_device_set_domain */
2995 group->default_domain = dom;
2996 if (!group->domain) {
2997 /*
2998 * Drivers are not allowed to fail the first domain attach.
2999 * The only way to recover from this is to fail attaching the
3000 * iommu driver and call ops->release_device. Put the domain
3001 * in group->default_domain so it is freed after.
3002 */
3003 ret = __iommu_group_set_domain_internal(
3004 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
3005 if (WARN_ON(ret))
3006 goto out_free_old;
3007 } else {
3008 ret = __iommu_group_set_domain(group, dom);
3009 if (ret)
3010 goto err_restore_def_domain;
3011 }
3012
3013 /*
3014 * Drivers are supposed to allow mappings to be installed in a domain
3015 * before device attachment, but some don't. Hack around this defect by
3016 * trying again after attaching. If this happens it means the device
3017 * will not continuously have the IOMMU_RESV_DIRECT map.
3018 */
3019 if (direct_failed) {
3020 for_each_group_device(group, gdev) {
3021 ret = iommu_create_device_direct_mappings(dom, gdev->dev);
3022 if (ret)
3023 goto err_restore_domain;
3024 }
3025 }
3026
3027 out_free_old:
3028 if (old_dom)
3029 iommu_domain_free(old_dom);
3030 return ret;
3031
3032 err_restore_domain:
3033 if (old_dom)
3034 __iommu_group_set_domain_internal(
3035 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
3036 err_restore_def_domain:
3037 if (old_dom) {
3038 iommu_domain_free(dom);
3039 group->default_domain = old_dom;
3040 }
3041 return ret;
3042 }
3043
3044 /*
3045 * Changing the default domain through sysfs requires the users to unbind the
3046 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3047 * transition. Return failure if this isn't met.
3048 *
3049 * We need to consider the race between this and the device release path.
3050 * group->mutex is used here to guarantee that the device release path
3051 * will not be entered at the same time.
3052 */
iommu_group_store_type(struct iommu_group * group,const char * buf,size_t count)3053 static ssize_t iommu_group_store_type(struct iommu_group *group,
3054 const char *buf, size_t count)
3055 {
3056 struct group_device *gdev;
3057 int ret, req_type;
3058
3059 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3060 return -EACCES;
3061
3062 if (WARN_ON(!group) || !group->default_domain)
3063 return -EINVAL;
3064
3065 if (sysfs_streq(buf, "identity"))
3066 req_type = IOMMU_DOMAIN_IDENTITY;
3067 else if (sysfs_streq(buf, "DMA"))
3068 req_type = IOMMU_DOMAIN_DMA;
3069 else if (sysfs_streq(buf, "DMA-FQ"))
3070 req_type = IOMMU_DOMAIN_DMA_FQ;
3071 else if (sysfs_streq(buf, "auto"))
3072 req_type = 0;
3073 else
3074 return -EINVAL;
3075
3076 mutex_lock(&group->mutex);
3077 /* We can bring up a flush queue without tearing down the domain. */
3078 if (req_type == IOMMU_DOMAIN_DMA_FQ &&
3079 group->default_domain->type == IOMMU_DOMAIN_DMA) {
3080 ret = iommu_dma_init_fq(group->default_domain);
3081 if (ret)
3082 goto out_unlock;
3083
3084 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ;
3085 ret = count;
3086 goto out_unlock;
3087 }
3088
3089 /* Otherwise, ensure that device exists and no driver is bound. */
3090 if (list_empty(&group->devices) || group->owner_cnt) {
3091 ret = -EPERM;
3092 goto out_unlock;
3093 }
3094
3095 ret = iommu_setup_default_domain(group, req_type);
3096 if (ret)
3097 goto out_unlock;
3098
3099 /*
3100 * Release the mutex here because ops->probe_finalize() call-back of
3101 * some vendor IOMMU drivers calls arm_iommu_attach_device() which
3102 * in-turn might call back into IOMMU core code, where it tries to take
3103 * group->mutex, resulting in a deadlock.
3104 */
3105 mutex_unlock(&group->mutex);
3106
3107 /* Make sure dma_ops is appropriatley set */
3108 for_each_group_device(group, gdev)
3109 iommu_group_do_probe_finalize(gdev->dev);
3110 return count;
3111
3112 out_unlock:
3113 mutex_unlock(&group->mutex);
3114 return ret ?: count;
3115 }
3116
iommu_is_default_domain(struct iommu_group * group)3117 static bool iommu_is_default_domain(struct iommu_group *group)
3118 {
3119 if (group->domain == group->default_domain)
3120 return true;
3121
3122 /*
3123 * If the default domain was set to identity and it is still an identity
3124 * domain then we consider this a pass. This happens because of
3125 * amd_iommu_init_device() replacing the default idenytity domain with an
3126 * identity domain that has a different configuration for AMDGPU.
3127 */
3128 if (group->default_domain &&
3129 group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
3130 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
3131 return true;
3132 return false;
3133 }
3134
3135 /**
3136 * iommu_device_use_default_domain() - Device driver wants to handle device
3137 * DMA through the kernel DMA API.
3138 * @dev: The device.
3139 *
3140 * The device driver about to bind @dev wants to do DMA through the kernel
3141 * DMA API. Return 0 if it is allowed, otherwise an error.
3142 */
iommu_device_use_default_domain(struct device * dev)3143 int iommu_device_use_default_domain(struct device *dev)
3144 {
3145 struct iommu_group *group = iommu_group_get(dev);
3146 int ret = 0;
3147
3148 if (!group)
3149 return 0;
3150
3151 mutex_lock(&group->mutex);
3152 if (group->owner_cnt) {
3153 if (group->owner || !iommu_is_default_domain(group) ||
3154 !xa_empty(&group->pasid_array)) {
3155 ret = -EBUSY;
3156 goto unlock_out;
3157 }
3158 }
3159
3160 group->owner_cnt++;
3161
3162 unlock_out:
3163 mutex_unlock(&group->mutex);
3164 iommu_group_put(group);
3165
3166 return ret;
3167 }
3168
3169 /**
3170 * iommu_device_unuse_default_domain() - Device driver stops handling device
3171 * DMA through the kernel DMA API.
3172 * @dev: The device.
3173 *
3174 * The device driver doesn't want to do DMA through kernel DMA API anymore.
3175 * It must be called after iommu_device_use_default_domain().
3176 */
iommu_device_unuse_default_domain(struct device * dev)3177 void iommu_device_unuse_default_domain(struct device *dev)
3178 {
3179 struct iommu_group *group = iommu_group_get(dev);
3180
3181 if (!group)
3182 return;
3183
3184 mutex_lock(&group->mutex);
3185 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array)))
3186 group->owner_cnt--;
3187
3188 mutex_unlock(&group->mutex);
3189 iommu_group_put(group);
3190 }
3191
__iommu_group_alloc_blocking_domain(struct iommu_group * group)3192 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
3193 {
3194 struct group_device *dev =
3195 list_first_entry(&group->devices, struct group_device, list);
3196
3197 if (group->blocking_domain)
3198 return 0;
3199
3200 group->blocking_domain =
3201 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED);
3202 if (!group->blocking_domain) {
3203 /*
3204 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
3205 * create an empty domain instead.
3206 */
3207 group->blocking_domain = __iommu_domain_alloc(
3208 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
3209 if (!group->blocking_domain)
3210 return -EINVAL;
3211 }
3212 return 0;
3213 }
3214
__iommu_take_dma_ownership(struct iommu_group * group,void * owner)3215 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner)
3216 {
3217 int ret;
3218
3219 if ((group->domain && group->domain != group->default_domain) ||
3220 !xa_empty(&group->pasid_array))
3221 return -EBUSY;
3222
3223 ret = __iommu_group_alloc_blocking_domain(group);
3224 if (ret)
3225 return ret;
3226 ret = __iommu_group_set_domain(group, group->blocking_domain);
3227 if (ret)
3228 return ret;
3229
3230 group->owner = owner;
3231 group->owner_cnt++;
3232 return 0;
3233 }
3234
3235 /**
3236 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3237 * @group: The group.
3238 * @owner: Caller specified pointer. Used for exclusive ownership.
3239 *
3240 * This is to support backward compatibility for vfio which manages the dma
3241 * ownership in iommu_group level. New invocations on this interface should be
3242 * prohibited. Only a single owner may exist for a group.
3243 */
iommu_group_claim_dma_owner(struct iommu_group * group,void * owner)3244 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
3245 {
3246 int ret = 0;
3247
3248 if (WARN_ON(!owner))
3249 return -EINVAL;
3250
3251 mutex_lock(&group->mutex);
3252 if (group->owner_cnt) {
3253 ret = -EPERM;
3254 goto unlock_out;
3255 }
3256
3257 ret = __iommu_take_dma_ownership(group, owner);
3258 unlock_out:
3259 mutex_unlock(&group->mutex);
3260
3261 return ret;
3262 }
3263 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
3264
3265 /**
3266 * iommu_device_claim_dma_owner() - Set DMA ownership of a device
3267 * @dev: The device.
3268 * @owner: Caller specified pointer. Used for exclusive ownership.
3269 *
3270 * Claim the DMA ownership of a device. Multiple devices in the same group may
3271 * concurrently claim ownership if they present the same owner value. Returns 0
3272 * on success and error code on failure
3273 */
iommu_device_claim_dma_owner(struct device * dev,void * owner)3274 int iommu_device_claim_dma_owner(struct device *dev, void *owner)
3275 {
3276 struct iommu_group *group;
3277 int ret = 0;
3278
3279 if (WARN_ON(!owner))
3280 return -EINVAL;
3281
3282 group = iommu_group_get(dev);
3283 if (!group)
3284 return -ENODEV;
3285
3286 mutex_lock(&group->mutex);
3287 if (group->owner_cnt) {
3288 if (group->owner != owner) {
3289 ret = -EPERM;
3290 goto unlock_out;
3291 }
3292 group->owner_cnt++;
3293 goto unlock_out;
3294 }
3295
3296 ret = __iommu_take_dma_ownership(group, owner);
3297 unlock_out:
3298 mutex_unlock(&group->mutex);
3299 iommu_group_put(group);
3300
3301 return ret;
3302 }
3303 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner);
3304
__iommu_release_dma_ownership(struct iommu_group * group)3305 static void __iommu_release_dma_ownership(struct iommu_group *group)
3306 {
3307 if (WARN_ON(!group->owner_cnt || !group->owner ||
3308 !xa_empty(&group->pasid_array)))
3309 return;
3310
3311 group->owner_cnt = 0;
3312 group->owner = NULL;
3313 __iommu_group_set_domain_nofail(group, group->default_domain);
3314 }
3315
3316 /**
3317 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3318 * @group: The group
3319 *
3320 * Release the DMA ownership claimed by iommu_group_claim_dma_owner().
3321 */
iommu_group_release_dma_owner(struct iommu_group * group)3322 void iommu_group_release_dma_owner(struct iommu_group *group)
3323 {
3324 mutex_lock(&group->mutex);
3325 __iommu_release_dma_ownership(group);
3326 mutex_unlock(&group->mutex);
3327 }
3328 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
3329
3330 /**
3331 * iommu_device_release_dma_owner() - Release DMA ownership of a device
3332 * @dev: The device.
3333 *
3334 * Release the DMA ownership claimed by iommu_device_claim_dma_owner().
3335 */
iommu_device_release_dma_owner(struct device * dev)3336 void iommu_device_release_dma_owner(struct device *dev)
3337 {
3338 struct iommu_group *group = iommu_group_get(dev);
3339
3340 mutex_lock(&group->mutex);
3341 if (group->owner_cnt > 1)
3342 group->owner_cnt--;
3343 else
3344 __iommu_release_dma_ownership(group);
3345 mutex_unlock(&group->mutex);
3346 iommu_group_put(group);
3347 }
3348 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner);
3349
3350 /**
3351 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3352 * @group: The group.
3353 *
3354 * This provides status query on a given group. It is racy and only for
3355 * non-binding status reporting.
3356 */
iommu_group_dma_owner_claimed(struct iommu_group * group)3357 bool iommu_group_dma_owner_claimed(struct iommu_group *group)
3358 {
3359 unsigned int user;
3360
3361 mutex_lock(&group->mutex);
3362 user = group->owner_cnt;
3363 mutex_unlock(&group->mutex);
3364
3365 return user;
3366 }
3367 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
3368
__iommu_set_group_pasid(struct iommu_domain * domain,struct iommu_group * group,ioasid_t pasid)3369 static int __iommu_set_group_pasid(struct iommu_domain *domain,
3370 struct iommu_group *group, ioasid_t pasid)
3371 {
3372 struct group_device *device, *last_gdev;
3373 int ret;
3374
3375 for_each_group_device(group, device) {
3376 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
3377 if (ret)
3378 goto err_revert;
3379 }
3380
3381 return 0;
3382
3383 err_revert:
3384 last_gdev = device;
3385 for_each_group_device(group, device) {
3386 const struct iommu_ops *ops = dev_iommu_ops(device->dev);
3387
3388 if (device == last_gdev)
3389 break;
3390 ops->remove_dev_pasid(device->dev, pasid);
3391 }
3392 return ret;
3393 }
3394
__iommu_remove_group_pasid(struct iommu_group * group,ioasid_t pasid)3395 static void __iommu_remove_group_pasid(struct iommu_group *group,
3396 ioasid_t pasid)
3397 {
3398 struct group_device *device;
3399 const struct iommu_ops *ops;
3400
3401 for_each_group_device(group, device) {
3402 ops = dev_iommu_ops(device->dev);
3403 ops->remove_dev_pasid(device->dev, pasid);
3404 }
3405 }
3406
3407 /*
3408 * iommu_attach_device_pasid() - Attach a domain to pasid of device
3409 * @domain: the iommu domain.
3410 * @dev: the attached device.
3411 * @pasid: the pasid of the device.
3412 *
3413 * Return: 0 on success, or an error.
3414 */
iommu_attach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)3415 int iommu_attach_device_pasid(struct iommu_domain *domain,
3416 struct device *dev, ioasid_t pasid)
3417 {
3418 struct iommu_group *group;
3419 void *curr;
3420 int ret;
3421
3422 if (!domain->ops->set_dev_pasid)
3423 return -EOPNOTSUPP;
3424
3425 group = iommu_group_get(dev);
3426 if (!group)
3427 return -ENODEV;
3428
3429 mutex_lock(&group->mutex);
3430 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
3431 if (curr) {
3432 ret = xa_err(curr) ? : -EBUSY;
3433 goto out_unlock;
3434 }
3435
3436 ret = __iommu_set_group_pasid(domain, group, pasid);
3437 if (ret)
3438 xa_erase(&group->pasid_array, pasid);
3439 out_unlock:
3440 mutex_unlock(&group->mutex);
3441 iommu_group_put(group);
3442
3443 return ret;
3444 }
3445 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
3446
3447 /*
3448 * iommu_detach_device_pasid() - Detach the domain from pasid of device
3449 * @domain: the iommu domain.
3450 * @dev: the attached device.
3451 * @pasid: the pasid of the device.
3452 *
3453 * The @domain must have been attached to @pasid of the @dev with
3454 * iommu_attach_device_pasid().
3455 */
iommu_detach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)3456 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
3457 ioasid_t pasid)
3458 {
3459 struct iommu_group *group = iommu_group_get(dev);
3460
3461 mutex_lock(&group->mutex);
3462 __iommu_remove_group_pasid(group, pasid);
3463 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
3464 mutex_unlock(&group->mutex);
3465
3466 iommu_group_put(group);
3467 }
3468 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
3469
3470 /*
3471 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev
3472 * @dev: the queried device
3473 * @pasid: the pasid of the device
3474 * @type: matched domain type, 0 for any match
3475 *
3476 * This is a variant of iommu_get_domain_for_dev(). It returns the existing
3477 * domain attached to pasid of a device. Callers must hold a lock around this
3478 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of
3479 * type is being manipulated. This API does not internally resolve races with
3480 * attach/detach.
3481 *
3482 * Return: attached domain on success, NULL otherwise.
3483 */
iommu_get_domain_for_dev_pasid(struct device * dev,ioasid_t pasid,unsigned int type)3484 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
3485 ioasid_t pasid,
3486 unsigned int type)
3487 {
3488 struct iommu_domain *domain;
3489 struct iommu_group *group;
3490
3491 group = iommu_group_get(dev);
3492 if (!group)
3493 return NULL;
3494
3495 xa_lock(&group->pasid_array);
3496 domain = xa_load(&group->pasid_array, pasid);
3497 if (type && domain && domain->type != type)
3498 domain = ERR_PTR(-EBUSY);
3499 xa_unlock(&group->pasid_array);
3500 iommu_group_put(group);
3501
3502 return domain;
3503 }
3504 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
3505
iommu_sva_domain_alloc(struct device * dev,struct mm_struct * mm)3506 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
3507 struct mm_struct *mm)
3508 {
3509 const struct iommu_ops *ops = dev_iommu_ops(dev);
3510 struct iommu_domain *domain;
3511
3512 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
3513 if (!domain)
3514 return NULL;
3515
3516 domain->type = IOMMU_DOMAIN_SVA;
3517 mmgrab(mm);
3518 domain->mm = mm;
3519 domain->iopf_handler = iommu_sva_handle_iopf;
3520 domain->fault_data = mm;
3521
3522 return domain;
3523 }
3524
iommu_alloc_global_pasid(struct device * dev)3525 ioasid_t iommu_alloc_global_pasid(struct device *dev)
3526 {
3527 int ret;
3528
3529 /* max_pasids == 0 means that the device does not support PASID */
3530 if (!dev->iommu->max_pasids)
3531 return IOMMU_PASID_INVALID;
3532
3533 /*
3534 * max_pasids is set up by vendor driver based on number of PASID bits
3535 * supported but the IDA allocation is inclusive.
3536 */
3537 ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID,
3538 dev->iommu->max_pasids - 1, GFP_KERNEL);
3539 return ret < 0 ? IOMMU_PASID_INVALID : ret;
3540 }
3541 EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid);
3542
iommu_free_global_pasid(ioasid_t pasid)3543 void iommu_free_global_pasid(ioasid_t pasid)
3544 {
3545 if (WARN_ON(pasid == IOMMU_PASID_INVALID))
3546 return;
3547
3548 ida_free(&iommu_global_pasid_ida, pasid);
3549 }
3550 EXPORT_SYMBOL_GPL(iommu_free_global_pasid);
3551