1a61127c2SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
24e0ee78fSHiroshi Doyu /*
34e0ee78fSHiroshi Doyu * OF helpers for IOMMU
44e0ee78fSHiroshi Doyu *
54e0ee78fSHiroshi Doyu * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
64e0ee78fSHiroshi Doyu */
74e0ee78fSHiroshi Doyu
84e0ee78fSHiroshi Doyu #include <linux/export.h>
97eba1d51SWill Deacon #include <linux/iommu.h>
104e0ee78fSHiroshi Doyu #include <linux/limits.h>
11386dce27SWill Deacon #include <linux/module.h>
124e0ee78fSHiroshi Doyu #include <linux/of.h>
13a5bf3cfcSThierry Reding #include <linux/of_address.h>
14cbff5634SBrian Norris #include <linux/of_iommu.h>
15b996444cSRobin Murphy #include <linux/of_pci.h>
16386dce27SWill Deacon #include <linux/pci.h>
17a42a7a1fSRobin Murphy #include <linux/slab.h>
18fa0656b4SNipun Gupta #include <linux/fsl/mc.h>
194e0ee78fSHiroshi Doyu
20da4b0275SRobin Murphy #define NO_IOMMU 1
21da4b0275SRobin Murphy
of_iommu_xlate(struct device * dev,struct of_phandle_args * iommu_spec)22da4b0275SRobin Murphy static int of_iommu_xlate(struct device *dev,
23da4b0275SRobin Murphy struct of_phandle_args *iommu_spec)
242a0c5754SRobin Murphy {
252a0c5754SRobin Murphy const struct iommu_ops *ops;
262a0c5754SRobin Murphy struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
27386dce27SWill Deacon int ret;
282a0c5754SRobin Murphy
292a0c5754SRobin Murphy ops = iommu_ops_from_fwnode(fwnode);
30d7b05582SRobin Murphy if ((ops && !ops->of_xlate) ||
31ac6bbf0cSRob Herring !of_device_is_available(iommu_spec->np))
32da4b0275SRobin Murphy return NO_IOMMU;
332a0c5754SRobin Murphy
34386dce27SWill Deacon ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
35386dce27SWill Deacon if (ret)
36386dce27SWill Deacon return ret;
37d7b05582SRobin Murphy /*
38d7b05582SRobin Murphy * The otherwise-empty fwspec handily serves to indicate the specific
39d7b05582SRobin Murphy * IOMMU device we're waiting for, which will be useful if we ever get
40d7b05582SRobin Murphy * a proper probe-ordering dependency mechanism in future.
41d7b05582SRobin Murphy */
42d7b05582SRobin Murphy if (!ops)
43a4f12490SSaravana Kannan return driver_deferred_probe_check_state(dev);
442a0c5754SRobin Murphy
45386dce27SWill Deacon if (!try_module_get(ops->owner))
46386dce27SWill Deacon return -ENODEV;
47386dce27SWill Deacon
48386dce27SWill Deacon ret = ops->of_xlate(dev, iommu_spec);
49386dce27SWill Deacon module_put(ops->owner);
50386dce27SWill Deacon return ret;
512a0c5754SRobin Murphy }
522a0c5754SRobin Murphy
of_iommu_configure_dev_id(struct device_node * master_np,struct device * dev,const u32 * id)53a081bd4aSLorenzo Pieralisi static int of_iommu_configure_dev_id(struct device_node *master_np,
54a081bd4aSLorenzo Pieralisi struct device *dev,
55a081bd4aSLorenzo Pieralisi const u32 *id)
56a081bd4aSLorenzo Pieralisi {
57a081bd4aSLorenzo Pieralisi struct of_phandle_args iommu_spec = { .args_count = 1 };
58a081bd4aSLorenzo Pieralisi int err;
59a081bd4aSLorenzo Pieralisi
60a081bd4aSLorenzo Pieralisi err = of_map_id(master_np, *id, "iommu-map",
61a081bd4aSLorenzo Pieralisi "iommu-map-mask", &iommu_spec.np,
62a081bd4aSLorenzo Pieralisi iommu_spec.args);
63a081bd4aSLorenzo Pieralisi if (err)
64a081bd4aSLorenzo Pieralisi return err == -ENODEV ? NO_IOMMU : err;
65a081bd4aSLorenzo Pieralisi
66a081bd4aSLorenzo Pieralisi err = of_iommu_xlate(dev, &iommu_spec);
67a081bd4aSLorenzo Pieralisi of_node_put(iommu_spec.np);
68a081bd4aSLorenzo Pieralisi return err;
69a081bd4aSLorenzo Pieralisi }
70a081bd4aSLorenzo Pieralisi
of_iommu_configure_dev(struct device_node * master_np,struct device * dev)71a081bd4aSLorenzo Pieralisi static int of_iommu_configure_dev(struct device_node *master_np,
72a081bd4aSLorenzo Pieralisi struct device *dev)
73a081bd4aSLorenzo Pieralisi {
74a081bd4aSLorenzo Pieralisi struct of_phandle_args iommu_spec;
75a081bd4aSLorenzo Pieralisi int err = NO_IOMMU, idx = 0;
76a081bd4aSLorenzo Pieralisi
77a081bd4aSLorenzo Pieralisi while (!of_parse_phandle_with_args(master_np, "iommus",
78a081bd4aSLorenzo Pieralisi "#iommu-cells",
79a081bd4aSLorenzo Pieralisi idx, &iommu_spec)) {
80a081bd4aSLorenzo Pieralisi err = of_iommu_xlate(dev, &iommu_spec);
81a081bd4aSLorenzo Pieralisi of_node_put(iommu_spec.np);
82a081bd4aSLorenzo Pieralisi idx++;
83a081bd4aSLorenzo Pieralisi if (err)
84a081bd4aSLorenzo Pieralisi break;
85a081bd4aSLorenzo Pieralisi }
86a081bd4aSLorenzo Pieralisi
87a081bd4aSLorenzo Pieralisi return err;
88a081bd4aSLorenzo Pieralisi }
89a081bd4aSLorenzo Pieralisi
90d87beb74SRobin Murphy struct of_pci_iommu_alias_info {
91d87beb74SRobin Murphy struct device *dev;
92d87beb74SRobin Murphy struct device_node *np;
93d87beb74SRobin Murphy };
94b996444cSRobin Murphy
of_pci_iommu_init(struct pci_dev * pdev,u16 alias,void * data)95d87beb74SRobin Murphy static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
96b996444cSRobin Murphy {
97d87beb74SRobin Murphy struct of_pci_iommu_alias_info *info = data;
98a081bd4aSLorenzo Pieralisi u32 input_id = alias;
99b996444cSRobin Murphy
100a081bd4aSLorenzo Pieralisi return of_iommu_configure_dev_id(info->np, info->dev, &input_id);
1012a0c5754SRobin Murphy }
1027eba1d51SWill Deacon
of_iommu_configure_device(struct device_node * master_np,struct device * dev,const u32 * id)103a081bd4aSLorenzo Pieralisi static int of_iommu_configure_device(struct device_node *master_np,
104a081bd4aSLorenzo Pieralisi struct device *dev, const u32 *id)
105fa0656b4SNipun Gupta {
106a081bd4aSLorenzo Pieralisi return (id) ? of_iommu_configure_dev_id(master_np, dev, id) :
107a081bd4aSLorenzo Pieralisi of_iommu_configure_dev(master_np, dev);
108fa0656b4SNipun Gupta }
109fa0656b4SNipun Gupta
of_iommu_configure(struct device * dev,struct device_node * master_np,const u32 * id)1102a0c5754SRobin Murphy const struct iommu_ops *of_iommu_configure(struct device *dev,
111a081bd4aSLorenzo Pieralisi struct device_node *master_np,
112a081bd4aSLorenzo Pieralisi const u32 *id)
1132a0c5754SRobin Murphy {
114d87beb74SRobin Murphy const struct iommu_ops *ops = NULL;
115c1114090SRobin Murphy struct iommu_fwspec *fwspec;
116da4b0275SRobin Murphy int err = NO_IOMMU;
1172a0c5754SRobin Murphy
1182a0c5754SRobin Murphy if (!master_np)
1197eba1d51SWill Deacon return NULL;
1202a0c5754SRobin Murphy
121c1114090SRobin Murphy /* Serialise to make dev->iommu stable under our potential fwspec */
122c1114090SRobin Murphy mutex_lock(&iommu_probe_device_lock);
123c1114090SRobin Murphy fwspec = dev_iommu_fwspec_get(dev);
124d7b05582SRobin Murphy if (fwspec) {
125c1114090SRobin Murphy if (fwspec->ops) {
126c1114090SRobin Murphy mutex_unlock(&iommu_probe_device_lock);
127d7b05582SRobin Murphy return fwspec->ops;
128c1114090SRobin Murphy }
129d7b05582SRobin Murphy /* In the deferred case, start again from scratch */
130d7b05582SRobin Murphy iommu_fwspec_free(dev);
131d7b05582SRobin Murphy }
132d7b05582SRobin Murphy
133d87beb74SRobin Murphy /*
134d87beb74SRobin Murphy * We don't currently walk up the tree looking for a parent IOMMU.
135d87beb74SRobin Murphy * See the `Notes:' section of
136d87beb74SRobin Murphy * Documentation/devicetree/bindings/iommu/iommu.txt
137d87beb74SRobin Murphy */
138d87beb74SRobin Murphy if (dev_is_pci(dev)) {
139d87beb74SRobin Murphy struct of_pci_iommu_alias_info info = {
140d87beb74SRobin Murphy .dev = dev,
141d87beb74SRobin Murphy .np = master_np,
142d87beb74SRobin Murphy };
143d87beb74SRobin Murphy
1446bf6c247SWill Deacon pci_request_acs();
145d87beb74SRobin Murphy err = pci_for_each_dma_alias(to_pci_dev(dev),
146d87beb74SRobin Murphy of_pci_iommu_init, &info);
147d87beb74SRobin Murphy } else {
148a081bd4aSLorenzo Pieralisi err = of_iommu_configure_device(master_np, dev, id);
14989535821SJean-Philippe Brucker }
1505c7e6bd7SJoerg Roedel
151da4b0275SRobin Murphy /*
152da4b0275SRobin Murphy * Two success conditions can be represented by non-negative err here:
153da4b0275SRobin Murphy * >0 : there is no IOMMU, or one was unavailable for non-fatal reasons
154da4b0275SRobin Murphy * 0 : we found an IOMMU, and dev->fwspec is initialised appropriately
155da4b0275SRobin Murphy * <0 : any actual error
156da4b0275SRobin Murphy */
1575c7e6bd7SJoerg Roedel if (!err) {
1585c7e6bd7SJoerg Roedel /* The fwspec pointer changed, read it again */
1595c7e6bd7SJoerg Roedel fwspec = dev_iommu_fwspec_get(dev);
1605c7e6bd7SJoerg Roedel ops = fwspec->ops;
1615c7e6bd7SJoerg Roedel }
162c1114090SRobin Murphy mutex_unlock(&iommu_probe_device_lock);
163c1114090SRobin Murphy
164d7b05582SRobin Murphy /*
165d7b05582SRobin Murphy * If we have reason to believe the IOMMU driver missed the initial
166641fb0efSJoerg Roedel * probe for dev, replay it to get things in order.
167d7b05582SRobin Murphy */
1686eb4da8cSJason Gunthorpe if (!err && dev->bus)
169641fb0efSJoerg Roedel err = iommu_probe_device(dev);
1702a0c5754SRobin Murphy
171a37b19a3SSricharan R /* Ignore all other errors apart from EPROBE_DEFER */
172da4b0275SRobin Murphy if (err == -EPROBE_DEFER) {
173da4b0275SRobin Murphy ops = ERR_PTR(err);
174da4b0275SRobin Murphy } else if (err < 0) {
175da4b0275SRobin Murphy dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
176a37b19a3SSricharan R ops = NULL;
177a37b19a3SSricharan R }
178a37b19a3SSricharan R
1797b07cbefSLaurent Pinchart return ops;
1807eba1d51SWill Deacon }
181a5bf3cfcSThierry Reding
1824762315dSRandy Dunlap static enum iommu_resv_type __maybe_unused
iommu_resv_region_get_type(struct device * dev,struct resource * phys,phys_addr_t start,size_t length)1834762315dSRandy Dunlap iommu_resv_region_get_type(struct device *dev,
1844762315dSRandy Dunlap struct resource *phys,
185a5bf3cfcSThierry Reding phys_addr_t start, size_t length)
186a5bf3cfcSThierry Reding {
187a5bf3cfcSThierry Reding phys_addr_t end = start + length - 1;
188a5bf3cfcSThierry Reding
189a5bf3cfcSThierry Reding /*
190a5bf3cfcSThierry Reding * IOMMU regions without an associated physical region cannot be
191a5bf3cfcSThierry Reding * mapped and are simply reservations.
192a5bf3cfcSThierry Reding */
193a5bf3cfcSThierry Reding if (phys->start >= phys->end)
194a5bf3cfcSThierry Reding return IOMMU_RESV_RESERVED;
195a5bf3cfcSThierry Reding
196a5bf3cfcSThierry Reding /* may be IOMMU_RESV_DIRECT_RELAXABLE for certain cases */
197a5bf3cfcSThierry Reding if (start == phys->start && end == phys->end)
198a5bf3cfcSThierry Reding return IOMMU_RESV_DIRECT;
199a5bf3cfcSThierry Reding
200e8841b22SDaniel Mentz dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
201a5bf3cfcSThierry Reding &start, &end);
202a5bf3cfcSThierry Reding return IOMMU_RESV_RESERVED;
203a5bf3cfcSThierry Reding }
204a5bf3cfcSThierry Reding
205a5bf3cfcSThierry Reding /**
206a5bf3cfcSThierry Reding * of_iommu_get_resv_regions - reserved region driver helper for device tree
207a5bf3cfcSThierry Reding * @dev: device for which to get reserved regions
208a5bf3cfcSThierry Reding * @list: reserved region list
209a5bf3cfcSThierry Reding *
210a5bf3cfcSThierry Reding * IOMMU drivers can use this to implement their .get_resv_regions() callback
211a5bf3cfcSThierry Reding * for memory regions attached to a device tree node. See the reserved-memory
212a5bf3cfcSThierry Reding * device tree bindings on how to use these:
213a5bf3cfcSThierry Reding *
214a5bf3cfcSThierry Reding * Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
215a5bf3cfcSThierry Reding */
of_iommu_get_resv_regions(struct device * dev,struct list_head * list)216a5bf3cfcSThierry Reding void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
217a5bf3cfcSThierry Reding {
218a5bf3cfcSThierry Reding #if IS_ENABLED(CONFIG_OF_ADDRESS)
219a5bf3cfcSThierry Reding struct of_phandle_iterator it;
220a5bf3cfcSThierry Reding int err;
221a5bf3cfcSThierry Reding
222a5bf3cfcSThierry Reding of_for_each_phandle(&it, err, dev->of_node, "memory-region", NULL, 0) {
223a5bf3cfcSThierry Reding const __be32 *maps, *end;
224a5bf3cfcSThierry Reding struct resource phys;
225a5bf3cfcSThierry Reding int size;
226a5bf3cfcSThierry Reding
227a5bf3cfcSThierry Reding memset(&phys, 0, sizeof(phys));
228a5bf3cfcSThierry Reding
229a5bf3cfcSThierry Reding /*
230a5bf3cfcSThierry Reding * The "reg" property is optional and can be omitted by reserved-memory regions
231a5bf3cfcSThierry Reding * that represent reservations in the IOVA space, which are regions that should
232a5bf3cfcSThierry Reding * not be mapped.
233a5bf3cfcSThierry Reding */
234a5bf3cfcSThierry Reding if (of_find_property(it.node, "reg", NULL)) {
235a5bf3cfcSThierry Reding err = of_address_to_resource(it.node, 0, &phys);
236a5bf3cfcSThierry Reding if (err < 0) {
237a5bf3cfcSThierry Reding dev_err(dev, "failed to parse memory region %pOF: %d\n",
238a5bf3cfcSThierry Reding it.node, err);
239a5bf3cfcSThierry Reding continue;
240a5bf3cfcSThierry Reding }
241a5bf3cfcSThierry Reding }
242a5bf3cfcSThierry Reding
243a5bf3cfcSThierry Reding maps = of_get_property(it.node, "iommu-addresses", &size);
244a5bf3cfcSThierry Reding if (!maps)
245a5bf3cfcSThierry Reding continue;
246a5bf3cfcSThierry Reding
247a5bf3cfcSThierry Reding end = maps + size / sizeof(__be32);
248a5bf3cfcSThierry Reding
249a5bf3cfcSThierry Reding while (maps < end) {
250a5bf3cfcSThierry Reding struct device_node *np;
251a5bf3cfcSThierry Reding u32 phandle;
252a5bf3cfcSThierry Reding
253a5bf3cfcSThierry Reding phandle = be32_to_cpup(maps++);
254a5bf3cfcSThierry Reding np = of_find_node_by_phandle(phandle);
255a5bf3cfcSThierry Reding
256a5bf3cfcSThierry Reding if (np == dev->of_node) {
257a5bf3cfcSThierry Reding int prot = IOMMU_READ | IOMMU_WRITE;
258a5bf3cfcSThierry Reding struct iommu_resv_region *region;
259a5bf3cfcSThierry Reding enum iommu_resv_type type;
260a5bf3cfcSThierry Reding phys_addr_t iova;
261a5bf3cfcSThierry Reding size_t length;
262a5bf3cfcSThierry Reding
26328d8fe66SLaurentiu Tudor if (of_dma_is_coherent(dev->of_node))
26428d8fe66SLaurentiu Tudor prot |= IOMMU_CACHE;
26528d8fe66SLaurentiu Tudor
266a5bf3cfcSThierry Reding maps = of_translate_dma_region(np, maps, &iova, &length);
267*98b8a550SAshish Mhetre if (length == 0) {
268*98b8a550SAshish Mhetre dev_warn(dev, "Cannot reserve IOVA region of 0 size\n");
269*98b8a550SAshish Mhetre continue;
270*98b8a550SAshish Mhetre }
271a5bf3cfcSThierry Reding type = iommu_resv_region_get_type(dev, &phys, iova, length);
272a5bf3cfcSThierry Reding
273a5bf3cfcSThierry Reding region = iommu_alloc_resv_region(iova, length, prot, type,
274a5bf3cfcSThierry Reding GFP_KERNEL);
275a5bf3cfcSThierry Reding if (region)
276a5bf3cfcSThierry Reding list_add_tail(®ion->list, list);
277a5bf3cfcSThierry Reding }
278a5bf3cfcSThierry Reding }
279a5bf3cfcSThierry Reding }
280a5bf3cfcSThierry Reding #endif
281a5bf3cfcSThierry Reding }
282a5bf3cfcSThierry Reding EXPORT_SYMBOL(of_iommu_get_resv_regions);
283