1e86d1aa8SWill Deacon // SPDX-License-Identifier: GPL-2.0-only
2e86d1aa8SWill Deacon // Copyright (C) 2019-2020 NVIDIA CORPORATION. All rights reserved.
3e86d1aa8SWill Deacon
4e86d1aa8SWill Deacon #include <linux/bitfield.h>
5e86d1aa8SWill Deacon #include <linux/delay.h>
6e86d1aa8SWill Deacon #include <linux/of.h>
7e86d1aa8SWill Deacon #include <linux/platform_device.h>
8e86d1aa8SWill Deacon #include <linux/slab.h>
9e86d1aa8SWill Deacon
108eb68595SThierry Reding #include <soc/tegra/mc.h>
118eb68595SThierry Reding
12e86d1aa8SWill Deacon #include "arm-smmu.h"
13e86d1aa8SWill Deacon
14e86d1aa8SWill Deacon /*
15e86d1aa8SWill Deacon * Tegra194 has three ARM MMU-500 Instances.
16e86d1aa8SWill Deacon * Two of them are used together and must be programmed identically for
17e86d1aa8SWill Deacon * interleaved IOVA accesses across them and translates accesses from
18e86d1aa8SWill Deacon * non-isochronous HW devices.
19e86d1aa8SWill Deacon * Third one is used for translating accesses from isochronous HW devices.
208eb68595SThierry Reding *
218eb68595SThierry Reding * In addition, the SMMU driver needs to coordinate with the memory controller
228eb68595SThierry Reding * driver to ensure that the right SID override is programmed for any given
238eb68595SThierry Reding * memory client. This is necessary to allow for use-case such as seamlessly
248eb68595SThierry Reding * handing over the display controller configuration from the firmware to the
258eb68595SThierry Reding * kernel.
268eb68595SThierry Reding *
27e86d1aa8SWill Deacon * This implementation supports programming of the two instances that must
288eb68595SThierry Reding * be programmed identically and takes care of invoking the memory controller
298eb68595SThierry Reding * driver for SID override programming after devices have been attached to an
308eb68595SThierry Reding * SMMU instance.
31e86d1aa8SWill Deacon */
327ecbf253SThierry Reding #define MAX_SMMU_INSTANCES 2
33e86d1aa8SWill Deacon
34e86d1aa8SWill Deacon struct nvidia_smmu {
35e86d1aa8SWill Deacon struct arm_smmu_device smmu;
367ecbf253SThierry Reding void __iomem *bases[MAX_SMMU_INSTANCES];
377ecbf253SThierry Reding unsigned int num_instances;
388eb68595SThierry Reding struct tegra_mc *mc;
39e86d1aa8SWill Deacon };
40e86d1aa8SWill Deacon
to_nvidia_smmu(struct arm_smmu_device * smmu)417ecbf253SThierry Reding static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu)
427ecbf253SThierry Reding {
437ecbf253SThierry Reding return container_of(smmu, struct nvidia_smmu, smmu);
447ecbf253SThierry Reding }
457ecbf253SThierry Reding
nvidia_smmu_page(struct arm_smmu_device * smmu,unsigned int inst,int page)46e86d1aa8SWill Deacon static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
47e86d1aa8SWill Deacon unsigned int inst, int page)
48e86d1aa8SWill Deacon {
49e86d1aa8SWill Deacon struct nvidia_smmu *nvidia_smmu;
50e86d1aa8SWill Deacon
51e86d1aa8SWill Deacon nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu);
52e86d1aa8SWill Deacon return nvidia_smmu->bases[inst] + (page << smmu->pgshift);
53e86d1aa8SWill Deacon }
54e86d1aa8SWill Deacon
nvidia_smmu_read_reg(struct arm_smmu_device * smmu,int page,int offset)55e86d1aa8SWill Deacon static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
56e86d1aa8SWill Deacon int page, int offset)
57e86d1aa8SWill Deacon {
58e86d1aa8SWill Deacon void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
59e86d1aa8SWill Deacon
60e86d1aa8SWill Deacon return readl_relaxed(reg);
61e86d1aa8SWill Deacon }
62e86d1aa8SWill Deacon
nvidia_smmu_write_reg(struct arm_smmu_device * smmu,int page,int offset,u32 val)63e86d1aa8SWill Deacon static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
64e86d1aa8SWill Deacon int page, int offset, u32 val)
65e86d1aa8SWill Deacon {
667ecbf253SThierry Reding struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
67e86d1aa8SWill Deacon unsigned int i;
68e86d1aa8SWill Deacon
697ecbf253SThierry Reding for (i = 0; i < nvidia->num_instances; i++) {
70e86d1aa8SWill Deacon void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
71e86d1aa8SWill Deacon
72e86d1aa8SWill Deacon writel_relaxed(val, reg);
73e86d1aa8SWill Deacon }
74e86d1aa8SWill Deacon }
75e86d1aa8SWill Deacon
nvidia_smmu_read_reg64(struct arm_smmu_device * smmu,int page,int offset)76e86d1aa8SWill Deacon static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
77e86d1aa8SWill Deacon int page, int offset)
78e86d1aa8SWill Deacon {
79e86d1aa8SWill Deacon void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
80e86d1aa8SWill Deacon
81e86d1aa8SWill Deacon return readq_relaxed(reg);
82e86d1aa8SWill Deacon }
83e86d1aa8SWill Deacon
nvidia_smmu_write_reg64(struct arm_smmu_device * smmu,int page,int offset,u64 val)84e86d1aa8SWill Deacon static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
85e86d1aa8SWill Deacon int page, int offset, u64 val)
86e86d1aa8SWill Deacon {
877ecbf253SThierry Reding struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
88e86d1aa8SWill Deacon unsigned int i;
89e86d1aa8SWill Deacon
907ecbf253SThierry Reding for (i = 0; i < nvidia->num_instances; i++) {
91e86d1aa8SWill Deacon void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
92e86d1aa8SWill Deacon
93e86d1aa8SWill Deacon writeq_relaxed(val, reg);
94e86d1aa8SWill Deacon }
95e86d1aa8SWill Deacon }
96e86d1aa8SWill Deacon
nvidia_smmu_tlb_sync(struct arm_smmu_device * smmu,int page,int sync,int status)97e86d1aa8SWill Deacon static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
98e86d1aa8SWill Deacon int sync, int status)
99e86d1aa8SWill Deacon {
1007ecbf253SThierry Reding struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
101e86d1aa8SWill Deacon unsigned int delay;
102e86d1aa8SWill Deacon
103e86d1aa8SWill Deacon arm_smmu_writel(smmu, page, sync, 0);
104e86d1aa8SWill Deacon
105e86d1aa8SWill Deacon for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
106e86d1aa8SWill Deacon unsigned int spin_cnt;
107e86d1aa8SWill Deacon
108e86d1aa8SWill Deacon for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
109e86d1aa8SWill Deacon u32 val = 0;
110e86d1aa8SWill Deacon unsigned int i;
111e86d1aa8SWill Deacon
1127ecbf253SThierry Reding for (i = 0; i < nvidia->num_instances; i++) {
113e86d1aa8SWill Deacon void __iomem *reg;
114e86d1aa8SWill Deacon
115e86d1aa8SWill Deacon reg = nvidia_smmu_page(smmu, i, page) + status;
116e86d1aa8SWill Deacon val |= readl_relaxed(reg);
117e86d1aa8SWill Deacon }
118e86d1aa8SWill Deacon
119e86d1aa8SWill Deacon if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
120e86d1aa8SWill Deacon return;
121e86d1aa8SWill Deacon
122e86d1aa8SWill Deacon cpu_relax();
123e86d1aa8SWill Deacon }
124e86d1aa8SWill Deacon
125e86d1aa8SWill Deacon udelay(delay);
126e86d1aa8SWill Deacon }
127e86d1aa8SWill Deacon
128e86d1aa8SWill Deacon dev_err_ratelimited(smmu->dev,
129e86d1aa8SWill Deacon "TLB sync timed out -- SMMU may be deadlocked\n");
130e86d1aa8SWill Deacon }
131e86d1aa8SWill Deacon
nvidia_smmu_reset(struct arm_smmu_device * smmu)132e86d1aa8SWill Deacon static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
133e86d1aa8SWill Deacon {
1347ecbf253SThierry Reding struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
135e86d1aa8SWill Deacon unsigned int i;
136e86d1aa8SWill Deacon
1377ecbf253SThierry Reding for (i = 0; i < nvidia->num_instances; i++) {
138e86d1aa8SWill Deacon u32 val;
139e86d1aa8SWill Deacon void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
140e86d1aa8SWill Deacon ARM_SMMU_GR0_sGFSR;
141e86d1aa8SWill Deacon
142e86d1aa8SWill Deacon /* clear global FSR */
143e86d1aa8SWill Deacon val = readl_relaxed(reg);
144e86d1aa8SWill Deacon writel_relaxed(val, reg);
145e86d1aa8SWill Deacon }
146e86d1aa8SWill Deacon
147e86d1aa8SWill Deacon return 0;
148e86d1aa8SWill Deacon }
149e86d1aa8SWill Deacon
nvidia_smmu_global_fault_inst(int irq,struct arm_smmu_device * smmu,int inst)150e86d1aa8SWill Deacon static irqreturn_t nvidia_smmu_global_fault_inst(int irq,
151e86d1aa8SWill Deacon struct arm_smmu_device *smmu,
152e86d1aa8SWill Deacon int inst)
153e86d1aa8SWill Deacon {
154e86d1aa8SWill Deacon u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
155e86d1aa8SWill Deacon void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0);
156e86d1aa8SWill Deacon
157e86d1aa8SWill Deacon gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
158e86d1aa8SWill Deacon if (!gfsr)
159e86d1aa8SWill Deacon return IRQ_NONE;
160e86d1aa8SWill Deacon
161e86d1aa8SWill Deacon gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
162e86d1aa8SWill Deacon gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
163e86d1aa8SWill Deacon gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
164e86d1aa8SWill Deacon
165e86d1aa8SWill Deacon dev_err_ratelimited(smmu->dev,
166e86d1aa8SWill Deacon "Unexpected global fault, this could be serious\n");
167e86d1aa8SWill Deacon dev_err_ratelimited(smmu->dev,
168e86d1aa8SWill Deacon "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
169e86d1aa8SWill Deacon gfsr, gfsynr0, gfsynr1, gfsynr2);
170e86d1aa8SWill Deacon
171e86d1aa8SWill Deacon writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
172e86d1aa8SWill Deacon return IRQ_HANDLED;
173e86d1aa8SWill Deacon }
174e86d1aa8SWill Deacon
nvidia_smmu_global_fault(int irq,void * dev)175e86d1aa8SWill Deacon static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
176e86d1aa8SWill Deacon {
177e86d1aa8SWill Deacon unsigned int inst;
178e86d1aa8SWill Deacon irqreturn_t ret = IRQ_NONE;
179e86d1aa8SWill Deacon struct arm_smmu_device *smmu = dev;
1807ecbf253SThierry Reding struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
181e86d1aa8SWill Deacon
1827ecbf253SThierry Reding for (inst = 0; inst < nvidia->num_instances; inst++) {
183e86d1aa8SWill Deacon irqreturn_t irq_ret;
184e86d1aa8SWill Deacon
185e86d1aa8SWill Deacon irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
186e86d1aa8SWill Deacon if (irq_ret == IRQ_HANDLED)
187e86d1aa8SWill Deacon ret = IRQ_HANDLED;
188e86d1aa8SWill Deacon }
189e86d1aa8SWill Deacon
190e86d1aa8SWill Deacon return ret;
191e86d1aa8SWill Deacon }
192e86d1aa8SWill Deacon
nvidia_smmu_context_fault_bank(int irq,struct arm_smmu_device * smmu,int idx,int inst)193e86d1aa8SWill Deacon static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
194e86d1aa8SWill Deacon struct arm_smmu_device *smmu,
195e86d1aa8SWill Deacon int idx, int inst)
196e86d1aa8SWill Deacon {
197e86d1aa8SWill Deacon u32 fsr, fsynr, cbfrsynra;
198e86d1aa8SWill Deacon unsigned long iova;
199e86d1aa8SWill Deacon void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1);
200e86d1aa8SWill Deacon void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
201e86d1aa8SWill Deacon
202e86d1aa8SWill Deacon fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
203e86d1aa8SWill Deacon if (!(fsr & ARM_SMMU_FSR_FAULT))
204e86d1aa8SWill Deacon return IRQ_NONE;
205e86d1aa8SWill Deacon
206e86d1aa8SWill Deacon fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
207e86d1aa8SWill Deacon iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
208e86d1aa8SWill Deacon cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx));
209e86d1aa8SWill Deacon
210e86d1aa8SWill Deacon dev_err_ratelimited(smmu->dev,
211e86d1aa8SWill Deacon "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
212e86d1aa8SWill Deacon fsr, iova, fsynr, cbfrsynra, idx);
213e86d1aa8SWill Deacon
214e86d1aa8SWill Deacon writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
215e86d1aa8SWill Deacon return IRQ_HANDLED;
216e86d1aa8SWill Deacon }
217e86d1aa8SWill Deacon
nvidia_smmu_context_fault(int irq,void * dev)218e86d1aa8SWill Deacon static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
219e86d1aa8SWill Deacon {
220e86d1aa8SWill Deacon int idx;
221e86d1aa8SWill Deacon unsigned int inst;
222e86d1aa8SWill Deacon irqreturn_t ret = IRQ_NONE;
223e86d1aa8SWill Deacon struct arm_smmu_device *smmu;
224e86d1aa8SWill Deacon struct iommu_domain *domain = dev;
225e86d1aa8SWill Deacon struct arm_smmu_domain *smmu_domain;
2267ecbf253SThierry Reding struct nvidia_smmu *nvidia;
227e86d1aa8SWill Deacon
228e86d1aa8SWill Deacon smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
229e86d1aa8SWill Deacon smmu = smmu_domain->smmu;
2307ecbf253SThierry Reding nvidia = to_nvidia_smmu(smmu);
231e86d1aa8SWill Deacon
2327ecbf253SThierry Reding for (inst = 0; inst < nvidia->num_instances; inst++) {
233e86d1aa8SWill Deacon irqreturn_t irq_ret;
234e86d1aa8SWill Deacon
235e86d1aa8SWill Deacon /*
236e86d1aa8SWill Deacon * Interrupt line is shared between all contexts.
237e86d1aa8SWill Deacon * Check for faults across all contexts.
238e86d1aa8SWill Deacon */
239e86d1aa8SWill Deacon for (idx = 0; idx < smmu->num_context_banks; idx++) {
240e86d1aa8SWill Deacon irq_ret = nvidia_smmu_context_fault_bank(irq, smmu,
241e86d1aa8SWill Deacon idx, inst);
242e86d1aa8SWill Deacon if (irq_ret == IRQ_HANDLED)
243e86d1aa8SWill Deacon ret = IRQ_HANDLED;
244e86d1aa8SWill Deacon }
245e86d1aa8SWill Deacon }
246e86d1aa8SWill Deacon
247e86d1aa8SWill Deacon return ret;
248e86d1aa8SWill Deacon }
249e86d1aa8SWill Deacon
nvidia_smmu_probe_finalize(struct arm_smmu_device * smmu,struct device * dev)2508eb68595SThierry Reding static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct device *dev)
2518eb68595SThierry Reding {
2528eb68595SThierry Reding struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
2538eb68595SThierry Reding int err;
2548eb68595SThierry Reding
2558eb68595SThierry Reding err = tegra_mc_probe_device(nvidia->mc, dev);
2568eb68595SThierry Reding if (err < 0)
2578eb68595SThierry Reding dev_err(smmu->dev, "memory controller probe failed for %s: %d\n",
2588eb68595SThierry Reding dev_name(dev), err);
2598eb68595SThierry Reding }
2608eb68595SThierry Reding
nvidia_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)261*4a25f2eaSAshish Mhetre static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
262*4a25f2eaSAshish Mhetre struct io_pgtable_cfg *pgtbl_cfg,
263*4a25f2eaSAshish Mhetre struct device *dev)
264*4a25f2eaSAshish Mhetre {
265*4a25f2eaSAshish Mhetre struct arm_smmu_device *smmu = smmu_domain->smmu;
266*4a25f2eaSAshish Mhetre const struct device_node *np = smmu->dev->of_node;
267*4a25f2eaSAshish Mhetre
268*4a25f2eaSAshish Mhetre /*
269*4a25f2eaSAshish Mhetre * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
270*4a25f2eaSAshish Mhetre * entries to not be invalidated correctly. The problem is that the walk
271*4a25f2eaSAshish Mhetre * cache index generated for IOVA is not same across translation and
272*4a25f2eaSAshish Mhetre * invalidation requests. This is leading to page faults when PMD entry
273*4a25f2eaSAshish Mhetre * is released during unmap and populated with new PTE table during
274*4a25f2eaSAshish Mhetre * subsequent map request. Disabling large page mappings avoids the
275*4a25f2eaSAshish Mhetre * release of PMD entry and avoid translations seeing stale PMD entry in
276*4a25f2eaSAshish Mhetre * walk cache.
277*4a25f2eaSAshish Mhetre * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
278*4a25f2eaSAshish Mhetre * Tegra234.
279*4a25f2eaSAshish Mhetre */
280*4a25f2eaSAshish Mhetre if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
281*4a25f2eaSAshish Mhetre of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
282*4a25f2eaSAshish Mhetre smmu->pgsize_bitmap = PAGE_SIZE;
283*4a25f2eaSAshish Mhetre pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
284*4a25f2eaSAshish Mhetre }
285*4a25f2eaSAshish Mhetre
286*4a25f2eaSAshish Mhetre return 0;
287*4a25f2eaSAshish Mhetre }
288*4a25f2eaSAshish Mhetre
289e86d1aa8SWill Deacon static const struct arm_smmu_impl nvidia_smmu_impl = {
290e86d1aa8SWill Deacon .read_reg = nvidia_smmu_read_reg,
291e86d1aa8SWill Deacon .write_reg = nvidia_smmu_write_reg,
292e86d1aa8SWill Deacon .read_reg64 = nvidia_smmu_read_reg64,
293e86d1aa8SWill Deacon .write_reg64 = nvidia_smmu_write_reg64,
294e86d1aa8SWill Deacon .reset = nvidia_smmu_reset,
295e86d1aa8SWill Deacon .tlb_sync = nvidia_smmu_tlb_sync,
296e86d1aa8SWill Deacon .global_fault = nvidia_smmu_global_fault,
297e86d1aa8SWill Deacon .context_fault = nvidia_smmu_context_fault,
2988eb68595SThierry Reding .probe_finalize = nvidia_smmu_probe_finalize,
299*4a25f2eaSAshish Mhetre .init_context = nvidia_smmu_init_context,
300e86d1aa8SWill Deacon };
301e86d1aa8SWill Deacon
3027ecbf253SThierry Reding static const struct arm_smmu_impl nvidia_smmu_single_impl = {
3038eb68595SThierry Reding .probe_finalize = nvidia_smmu_probe_finalize,
304*4a25f2eaSAshish Mhetre .init_context = nvidia_smmu_init_context,
3057ecbf253SThierry Reding };
3067ecbf253SThierry Reding
nvidia_smmu_impl_init(struct arm_smmu_device * smmu)307e86d1aa8SWill Deacon struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
308e86d1aa8SWill Deacon {
309e86d1aa8SWill Deacon struct resource *res;
310e86d1aa8SWill Deacon struct device *dev = smmu->dev;
311e86d1aa8SWill Deacon struct nvidia_smmu *nvidia_smmu;
312e86d1aa8SWill Deacon struct platform_device *pdev = to_platform_device(dev);
3137ecbf253SThierry Reding unsigned int i;
314e86d1aa8SWill Deacon
315af9da914SRobin Murphy nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL);
316e86d1aa8SWill Deacon if (!nvidia_smmu)
317e86d1aa8SWill Deacon return ERR_PTR(-ENOMEM);
318e86d1aa8SWill Deacon
3198eb68595SThierry Reding nvidia_smmu->mc = devm_tegra_memory_controller_get(dev);
3208eb68595SThierry Reding if (IS_ERR(nvidia_smmu->mc))
3218eb68595SThierry Reding return ERR_CAST(nvidia_smmu->mc);
3228eb68595SThierry Reding
323e86d1aa8SWill Deacon /* Instance 0 is ioremapped by arm-smmu.c. */
324e86d1aa8SWill Deacon nvidia_smmu->bases[0] = smmu->base;
3257ecbf253SThierry Reding nvidia_smmu->num_instances++;
326e86d1aa8SWill Deacon
3277ecbf253SThierry Reding for (i = 1; i < MAX_SMMU_INSTANCES; i++) {
3287ecbf253SThierry Reding res = platform_get_resource(pdev, IORESOURCE_MEM, i);
329e86d1aa8SWill Deacon if (!res)
3307ecbf253SThierry Reding break;
331e86d1aa8SWill Deacon
3327ecbf253SThierry Reding nvidia_smmu->bases[i] = devm_ioremap_resource(dev, res);
3337ecbf253SThierry Reding if (IS_ERR(nvidia_smmu->bases[i]))
3347ecbf253SThierry Reding return ERR_CAST(nvidia_smmu->bases[i]);
335e86d1aa8SWill Deacon
3367ecbf253SThierry Reding nvidia_smmu->num_instances++;
3377ecbf253SThierry Reding }
3387ecbf253SThierry Reding
3397ecbf253SThierry Reding if (nvidia_smmu->num_instances == 1)
3407ecbf253SThierry Reding nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl;
3417ecbf253SThierry Reding else
342e86d1aa8SWill Deacon nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
343e86d1aa8SWill Deacon
344e86d1aa8SWill Deacon return &nvidia_smmu->smmu;
345e86d1aa8SWill Deacon }
346