1a61127c2SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 24e0ee78fSHiroshi Doyu /* 34e0ee78fSHiroshi Doyu * OF helpers for IOMMU 44e0ee78fSHiroshi Doyu * 54e0ee78fSHiroshi Doyu * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 64e0ee78fSHiroshi Doyu */ 74e0ee78fSHiroshi Doyu 84e0ee78fSHiroshi Doyu #include <linux/export.h> 97eba1d51SWill Deacon #include <linux/iommu.h> 104e0ee78fSHiroshi Doyu #include <linux/limits.h> 11386dce27SWill Deacon #include <linux/module.h> 124e0ee78fSHiroshi Doyu #include <linux/of.h> 13a5bf3cfcSThierry Reding #include <linux/of_address.h> 14cbff5634SBrian Norris #include <linux/of_iommu.h> 15b996444cSRobin Murphy #include <linux/of_pci.h> 16386dce27SWill Deacon #include <linux/pci.h> 17a42a7a1fSRobin Murphy #include <linux/slab.h> 18fa0656b4SNipun Gupta #include <linux/fsl/mc.h> 194e0ee78fSHiroshi Doyu 20da4b0275SRobin Murphy #define NO_IOMMU 1 21da4b0275SRobin Murphy 22da4b0275SRobin Murphy static int of_iommu_xlate(struct device *dev, 23da4b0275SRobin Murphy struct of_phandle_args *iommu_spec) 242a0c5754SRobin Murphy { 252a0c5754SRobin Murphy const struct iommu_ops *ops; 262a0c5754SRobin Murphy struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; 27386dce27SWill Deacon int ret; 282a0c5754SRobin Murphy 292a0c5754SRobin Murphy ops = iommu_ops_from_fwnode(fwnode); 30d7b05582SRobin Murphy if ((ops && !ops->of_xlate) || 31ac6bbf0cSRob Herring !of_device_is_available(iommu_spec->np)) 32da4b0275SRobin Murphy return NO_IOMMU; 332a0c5754SRobin Murphy 34386dce27SWill Deacon ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); 35386dce27SWill Deacon if (ret) 36386dce27SWill Deacon return ret; 37d7b05582SRobin Murphy /* 38d7b05582SRobin Murphy * The otherwise-empty fwspec handily serves to indicate the specific 39d7b05582SRobin Murphy * IOMMU device we're waiting for, which will be useful if we ever get 40d7b05582SRobin Murphy * a proper probe-ordering dependency mechanism in future. 41d7b05582SRobin Murphy */ 42d7b05582SRobin Murphy if (!ops) 43a4f12490SSaravana Kannan return driver_deferred_probe_check_state(dev); 442a0c5754SRobin Murphy 45386dce27SWill Deacon if (!try_module_get(ops->owner)) 46386dce27SWill Deacon return -ENODEV; 47386dce27SWill Deacon 48386dce27SWill Deacon ret = ops->of_xlate(dev, iommu_spec); 49386dce27SWill Deacon module_put(ops->owner); 50386dce27SWill Deacon return ret; 512a0c5754SRobin Murphy } 522a0c5754SRobin Murphy 53a081bd4aSLorenzo Pieralisi static int of_iommu_configure_dev_id(struct device_node *master_np, 54a081bd4aSLorenzo Pieralisi struct device *dev, 55a081bd4aSLorenzo Pieralisi const u32 *id) 56a081bd4aSLorenzo Pieralisi { 57a081bd4aSLorenzo Pieralisi struct of_phandle_args iommu_spec = { .args_count = 1 }; 58a081bd4aSLorenzo Pieralisi int err; 59a081bd4aSLorenzo Pieralisi 60a081bd4aSLorenzo Pieralisi err = of_map_id(master_np, *id, "iommu-map", 61a081bd4aSLorenzo Pieralisi "iommu-map-mask", &iommu_spec.np, 62a081bd4aSLorenzo Pieralisi iommu_spec.args); 63a081bd4aSLorenzo Pieralisi if (err) 64a081bd4aSLorenzo Pieralisi return err == -ENODEV ? NO_IOMMU : err; 65a081bd4aSLorenzo Pieralisi 66a081bd4aSLorenzo Pieralisi err = of_iommu_xlate(dev, &iommu_spec); 67a081bd4aSLorenzo Pieralisi of_node_put(iommu_spec.np); 68a081bd4aSLorenzo Pieralisi return err; 69a081bd4aSLorenzo Pieralisi } 70a081bd4aSLorenzo Pieralisi 71a081bd4aSLorenzo Pieralisi static int of_iommu_configure_dev(struct device_node *master_np, 72a081bd4aSLorenzo Pieralisi struct device *dev) 73a081bd4aSLorenzo Pieralisi { 74a081bd4aSLorenzo Pieralisi struct of_phandle_args iommu_spec; 75a081bd4aSLorenzo Pieralisi int err = NO_IOMMU, idx = 0; 76a081bd4aSLorenzo Pieralisi 77a081bd4aSLorenzo Pieralisi while (!of_parse_phandle_with_args(master_np, "iommus", 78a081bd4aSLorenzo Pieralisi "#iommu-cells", 79a081bd4aSLorenzo Pieralisi idx, &iommu_spec)) { 80a081bd4aSLorenzo Pieralisi err = of_iommu_xlate(dev, &iommu_spec); 81a081bd4aSLorenzo Pieralisi of_node_put(iommu_spec.np); 82a081bd4aSLorenzo Pieralisi idx++; 83a081bd4aSLorenzo Pieralisi if (err) 84a081bd4aSLorenzo Pieralisi break; 85a081bd4aSLorenzo Pieralisi } 86a081bd4aSLorenzo Pieralisi 87a081bd4aSLorenzo Pieralisi return err; 88a081bd4aSLorenzo Pieralisi } 89a081bd4aSLorenzo Pieralisi 90d87beb74SRobin Murphy struct of_pci_iommu_alias_info { 91d87beb74SRobin Murphy struct device *dev; 92d87beb74SRobin Murphy struct device_node *np; 93d87beb74SRobin Murphy }; 94b996444cSRobin Murphy 95d87beb74SRobin Murphy static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) 96b996444cSRobin Murphy { 97d87beb74SRobin Murphy struct of_pci_iommu_alias_info *info = data; 98a081bd4aSLorenzo Pieralisi u32 input_id = alias; 99b996444cSRobin Murphy 100a081bd4aSLorenzo Pieralisi return of_iommu_configure_dev_id(info->np, info->dev, &input_id); 1012a0c5754SRobin Murphy } 1027eba1d51SWill Deacon 103a081bd4aSLorenzo Pieralisi static int of_iommu_configure_device(struct device_node *master_np, 104a081bd4aSLorenzo Pieralisi struct device *dev, const u32 *id) 105fa0656b4SNipun Gupta { 106a081bd4aSLorenzo Pieralisi return (id) ? of_iommu_configure_dev_id(master_np, dev, id) : 107a081bd4aSLorenzo Pieralisi of_iommu_configure_dev(master_np, dev); 108fa0656b4SNipun Gupta } 109fa0656b4SNipun Gupta 1102a0c5754SRobin Murphy const struct iommu_ops *of_iommu_configure(struct device *dev, 111a081bd4aSLorenzo Pieralisi struct device_node *master_np, 112a081bd4aSLorenzo Pieralisi const u32 *id) 1132a0c5754SRobin Murphy { 114d87beb74SRobin Murphy const struct iommu_ops *ops = NULL; 1155c7e6bd7SJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 116da4b0275SRobin Murphy int err = NO_IOMMU; 1172a0c5754SRobin Murphy 1182a0c5754SRobin Murphy if (!master_np) 1197eba1d51SWill Deacon return NULL; 1202a0c5754SRobin Murphy 121d7b05582SRobin Murphy if (fwspec) { 122d7b05582SRobin Murphy if (fwspec->ops) 123d7b05582SRobin Murphy return fwspec->ops; 124d7b05582SRobin Murphy 125d7b05582SRobin Murphy /* In the deferred case, start again from scratch */ 126d7b05582SRobin Murphy iommu_fwspec_free(dev); 127d7b05582SRobin Murphy } 128d7b05582SRobin Murphy 129d87beb74SRobin Murphy /* 130d87beb74SRobin Murphy * We don't currently walk up the tree looking for a parent IOMMU. 131d87beb74SRobin Murphy * See the `Notes:' section of 132d87beb74SRobin Murphy * Documentation/devicetree/bindings/iommu/iommu.txt 133d87beb74SRobin Murphy */ 134d87beb74SRobin Murphy if (dev_is_pci(dev)) { 135d87beb74SRobin Murphy struct of_pci_iommu_alias_info info = { 136d87beb74SRobin Murphy .dev = dev, 137d87beb74SRobin Murphy .np = master_np, 138d87beb74SRobin Murphy }; 139d87beb74SRobin Murphy 1406bf6c247SWill Deacon pci_request_acs(); 141d87beb74SRobin Murphy err = pci_for_each_dma_alias(to_pci_dev(dev), 142d87beb74SRobin Murphy of_pci_iommu_init, &info); 143d87beb74SRobin Murphy } else { 144a081bd4aSLorenzo Pieralisi err = of_iommu_configure_device(master_np, dev, id); 14589535821SJean-Philippe Brucker } 1465c7e6bd7SJoerg Roedel 147da4b0275SRobin Murphy /* 148da4b0275SRobin Murphy * Two success conditions can be represented by non-negative err here: 149da4b0275SRobin Murphy * >0 : there is no IOMMU, or one was unavailable for non-fatal reasons 150da4b0275SRobin Murphy * 0 : we found an IOMMU, and dev->fwspec is initialised appropriately 151da4b0275SRobin Murphy * <0 : any actual error 152da4b0275SRobin Murphy */ 1535c7e6bd7SJoerg Roedel if (!err) { 1545c7e6bd7SJoerg Roedel /* The fwspec pointer changed, read it again */ 1555c7e6bd7SJoerg Roedel fwspec = dev_iommu_fwspec_get(dev); 1565c7e6bd7SJoerg Roedel ops = fwspec->ops; 1575c7e6bd7SJoerg Roedel } 158d7b05582SRobin Murphy /* 159d7b05582SRobin Murphy * If we have reason to believe the IOMMU driver missed the initial 160641fb0efSJoerg Roedel * probe for dev, replay it to get things in order. 161d7b05582SRobin Murphy */ 162*6eb4da8cSJason Gunthorpe if (!err && dev->bus) 163641fb0efSJoerg Roedel err = iommu_probe_device(dev); 1642a0c5754SRobin Murphy 165a37b19a3SSricharan R /* Ignore all other errors apart from EPROBE_DEFER */ 166da4b0275SRobin Murphy if (err == -EPROBE_DEFER) { 167da4b0275SRobin Murphy ops = ERR_PTR(err); 168da4b0275SRobin Murphy } else if (err < 0) { 169da4b0275SRobin Murphy dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); 170a37b19a3SSricharan R ops = NULL; 171a37b19a3SSricharan R } 172a37b19a3SSricharan R 1737b07cbefSLaurent Pinchart return ops; 1747eba1d51SWill Deacon } 175a5bf3cfcSThierry Reding 1764762315dSRandy Dunlap static enum iommu_resv_type __maybe_unused 1774762315dSRandy Dunlap iommu_resv_region_get_type(struct device *dev, 1784762315dSRandy Dunlap struct resource *phys, 179a5bf3cfcSThierry Reding phys_addr_t start, size_t length) 180a5bf3cfcSThierry Reding { 181a5bf3cfcSThierry Reding phys_addr_t end = start + length - 1; 182a5bf3cfcSThierry Reding 183a5bf3cfcSThierry Reding /* 184a5bf3cfcSThierry Reding * IOMMU regions without an associated physical region cannot be 185a5bf3cfcSThierry Reding * mapped and are simply reservations. 186a5bf3cfcSThierry Reding */ 187a5bf3cfcSThierry Reding if (phys->start >= phys->end) 188a5bf3cfcSThierry Reding return IOMMU_RESV_RESERVED; 189a5bf3cfcSThierry Reding 190a5bf3cfcSThierry Reding /* may be IOMMU_RESV_DIRECT_RELAXABLE for certain cases */ 191a5bf3cfcSThierry Reding if (start == phys->start && end == phys->end) 192a5bf3cfcSThierry Reding return IOMMU_RESV_DIRECT; 193a5bf3cfcSThierry Reding 194a5bf3cfcSThierry Reding dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys, 195a5bf3cfcSThierry Reding &start, &end); 196a5bf3cfcSThierry Reding return IOMMU_RESV_RESERVED; 197a5bf3cfcSThierry Reding } 198a5bf3cfcSThierry Reding 199a5bf3cfcSThierry Reding /** 200a5bf3cfcSThierry Reding * of_iommu_get_resv_regions - reserved region driver helper for device tree 201a5bf3cfcSThierry Reding * @dev: device for which to get reserved regions 202a5bf3cfcSThierry Reding * @list: reserved region list 203a5bf3cfcSThierry Reding * 204a5bf3cfcSThierry Reding * IOMMU drivers can use this to implement their .get_resv_regions() callback 205a5bf3cfcSThierry Reding * for memory regions attached to a device tree node. See the reserved-memory 206a5bf3cfcSThierry Reding * device tree bindings on how to use these: 207a5bf3cfcSThierry Reding * 208a5bf3cfcSThierry Reding * Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt 209a5bf3cfcSThierry Reding */ 210a5bf3cfcSThierry Reding void of_iommu_get_resv_regions(struct device *dev, struct list_head *list) 211a5bf3cfcSThierry Reding { 212a5bf3cfcSThierry Reding #if IS_ENABLED(CONFIG_OF_ADDRESS) 213a5bf3cfcSThierry Reding struct of_phandle_iterator it; 214a5bf3cfcSThierry Reding int err; 215a5bf3cfcSThierry Reding 216a5bf3cfcSThierry Reding of_for_each_phandle(&it, err, dev->of_node, "memory-region", NULL, 0) { 217a5bf3cfcSThierry Reding const __be32 *maps, *end; 218a5bf3cfcSThierry Reding struct resource phys; 219a5bf3cfcSThierry Reding int size; 220a5bf3cfcSThierry Reding 221a5bf3cfcSThierry Reding memset(&phys, 0, sizeof(phys)); 222a5bf3cfcSThierry Reding 223a5bf3cfcSThierry Reding /* 224a5bf3cfcSThierry Reding * The "reg" property is optional and can be omitted by reserved-memory regions 225a5bf3cfcSThierry Reding * that represent reservations in the IOVA space, which are regions that should 226a5bf3cfcSThierry Reding * not be mapped. 227a5bf3cfcSThierry Reding */ 228a5bf3cfcSThierry Reding if (of_find_property(it.node, "reg", NULL)) { 229a5bf3cfcSThierry Reding err = of_address_to_resource(it.node, 0, &phys); 230a5bf3cfcSThierry Reding if (err < 0) { 231a5bf3cfcSThierry Reding dev_err(dev, "failed to parse memory region %pOF: %d\n", 232a5bf3cfcSThierry Reding it.node, err); 233a5bf3cfcSThierry Reding continue; 234a5bf3cfcSThierry Reding } 235a5bf3cfcSThierry Reding } 236a5bf3cfcSThierry Reding 237a5bf3cfcSThierry Reding maps = of_get_property(it.node, "iommu-addresses", &size); 238a5bf3cfcSThierry Reding if (!maps) 239a5bf3cfcSThierry Reding continue; 240a5bf3cfcSThierry Reding 241a5bf3cfcSThierry Reding end = maps + size / sizeof(__be32); 242a5bf3cfcSThierry Reding 243a5bf3cfcSThierry Reding while (maps < end) { 244a5bf3cfcSThierry Reding struct device_node *np; 245a5bf3cfcSThierry Reding u32 phandle; 246a5bf3cfcSThierry Reding 247a5bf3cfcSThierry Reding phandle = be32_to_cpup(maps++); 248a5bf3cfcSThierry Reding np = of_find_node_by_phandle(phandle); 249a5bf3cfcSThierry Reding 250a5bf3cfcSThierry Reding if (np == dev->of_node) { 251a5bf3cfcSThierry Reding int prot = IOMMU_READ | IOMMU_WRITE; 252a5bf3cfcSThierry Reding struct iommu_resv_region *region; 253a5bf3cfcSThierry Reding enum iommu_resv_type type; 254a5bf3cfcSThierry Reding phys_addr_t iova; 255a5bf3cfcSThierry Reding size_t length; 256a5bf3cfcSThierry Reding 257a5bf3cfcSThierry Reding maps = of_translate_dma_region(np, maps, &iova, &length); 258a5bf3cfcSThierry Reding type = iommu_resv_region_get_type(dev, &phys, iova, length); 259a5bf3cfcSThierry Reding 260a5bf3cfcSThierry Reding region = iommu_alloc_resv_region(iova, length, prot, type, 261a5bf3cfcSThierry Reding GFP_KERNEL); 262a5bf3cfcSThierry Reding if (region) 263a5bf3cfcSThierry Reding list_add_tail(®ion->list, list); 264a5bf3cfcSThierry Reding } 265a5bf3cfcSThierry Reding } 266a5bf3cfcSThierry Reding } 267a5bf3cfcSThierry Reding #endif 268a5bf3cfcSThierry Reding } 269a5bf3cfcSThierry Reding EXPORT_SYMBOL(of_iommu_get_resv_regions); 270