xref: /openbmc/linux/drivers/iommu/msm_iommu.c (revision 84b102f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3  *
4  * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
5  */
6 
7 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/platform_device.h>
11 #include <linux/errno.h>
12 #include <linux/io.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/slab.h>
18 #include <linux/iommu.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
21 #include <linux/of_iommu.h>
22 
23 #include <asm/cacheflush.h>
24 #include <linux/sizes.h>
25 
26 #include "msm_iommu_hw-8xxx.h"
27 #include "msm_iommu.h"
28 
29 #define MRC(reg, processor, op1, crn, crm, op2)				\
30 __asm__ __volatile__ (							\
31 "   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
32 : "=r" (reg))
33 
34 /* bitmap of the page sizes currently supported */
35 #define MSM_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36 
37 static DEFINE_SPINLOCK(msm_iommu_lock);
38 static LIST_HEAD(qcom_iommu_devices);
39 static struct iommu_ops msm_iommu_ops;
40 
41 struct msm_priv {
42 	struct list_head list_attached;
43 	struct iommu_domain domain;
44 	struct io_pgtable_cfg	cfg;
45 	struct io_pgtable_ops	*iop;
46 	struct device		*dev;
47 	spinlock_t		pgtlock; /* pagetable lock */
48 };
49 
50 static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
51 {
52 	return container_of(dom, struct msm_priv, domain);
53 }
54 
55 static int __enable_clocks(struct msm_iommu_dev *iommu)
56 {
57 	int ret;
58 
59 	ret = clk_enable(iommu->pclk);
60 	if (ret)
61 		goto fail;
62 
63 	if (iommu->clk) {
64 		ret = clk_enable(iommu->clk);
65 		if (ret)
66 			clk_disable(iommu->pclk);
67 	}
68 fail:
69 	return ret;
70 }
71 
72 static void __disable_clocks(struct msm_iommu_dev *iommu)
73 {
74 	if (iommu->clk)
75 		clk_disable(iommu->clk);
76 	clk_disable(iommu->pclk);
77 }
78 
79 static void msm_iommu_reset(void __iomem *base, int ncb)
80 {
81 	int ctx;
82 
83 	SET_RPUE(base, 0);
84 	SET_RPUEIE(base, 0);
85 	SET_ESRRESTORE(base, 0);
86 	SET_TBE(base, 0);
87 	SET_CR(base, 0);
88 	SET_SPDMBE(base, 0);
89 	SET_TESTBUSCR(base, 0);
90 	SET_TLBRSW(base, 0);
91 	SET_GLOBAL_TLBIALL(base, 0);
92 	SET_RPU_ACR(base, 0);
93 	SET_TLBLKCRWE(base, 1);
94 
95 	for (ctx = 0; ctx < ncb; ctx++) {
96 		SET_BPRCOSH(base, ctx, 0);
97 		SET_BPRCISH(base, ctx, 0);
98 		SET_BPRCNSH(base, ctx, 0);
99 		SET_BPSHCFG(base, ctx, 0);
100 		SET_BPMTCFG(base, ctx, 0);
101 		SET_ACTLR(base, ctx, 0);
102 		SET_SCTLR(base, ctx, 0);
103 		SET_FSRRESTORE(base, ctx, 0);
104 		SET_TTBR0(base, ctx, 0);
105 		SET_TTBR1(base, ctx, 0);
106 		SET_TTBCR(base, ctx, 0);
107 		SET_BFBCR(base, ctx, 0);
108 		SET_PAR(base, ctx, 0);
109 		SET_FAR(base, ctx, 0);
110 		SET_CTX_TLBIALL(base, ctx, 0);
111 		SET_TLBFLPTER(base, ctx, 0);
112 		SET_TLBSLPTER(base, ctx, 0);
113 		SET_TLBLKCR(base, ctx, 0);
114 		SET_CONTEXTIDR(base, ctx, 0);
115 	}
116 }
117 
118 static void __flush_iotlb(void *cookie)
119 {
120 	struct msm_priv *priv = cookie;
121 	struct msm_iommu_dev *iommu = NULL;
122 	struct msm_iommu_ctx_dev *master;
123 	int ret = 0;
124 
125 	list_for_each_entry(iommu, &priv->list_attached, dom_node) {
126 		ret = __enable_clocks(iommu);
127 		if (ret)
128 			goto fail;
129 
130 		list_for_each_entry(master, &iommu->ctx_list, list)
131 			SET_CTX_TLBIALL(iommu->base, master->num, 0);
132 
133 		__disable_clocks(iommu);
134 	}
135 fail:
136 	return;
137 }
138 
139 static void __flush_iotlb_range(unsigned long iova, size_t size,
140 				size_t granule, bool leaf, void *cookie)
141 {
142 	struct msm_priv *priv = cookie;
143 	struct msm_iommu_dev *iommu = NULL;
144 	struct msm_iommu_ctx_dev *master;
145 	int ret = 0;
146 	int temp_size;
147 
148 	list_for_each_entry(iommu, &priv->list_attached, dom_node) {
149 		ret = __enable_clocks(iommu);
150 		if (ret)
151 			goto fail;
152 
153 		list_for_each_entry(master, &iommu->ctx_list, list) {
154 			temp_size = size;
155 			do {
156 				iova &= TLBIVA_VA;
157 				iova |= GET_CONTEXTIDR_ASID(iommu->base,
158 							    master->num);
159 				SET_TLBIVA(iommu->base, master->num, iova);
160 				iova += granule;
161 			} while (temp_size -= granule);
162 		}
163 
164 		__disable_clocks(iommu);
165 	}
166 
167 fail:
168 	return;
169 }
170 
171 static void __flush_iotlb_walk(unsigned long iova, size_t size,
172 			       size_t granule, void *cookie)
173 {
174 	__flush_iotlb_range(iova, size, granule, false, cookie);
175 }
176 
177 static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
178 			       unsigned long iova, size_t granule, void *cookie)
179 {
180 	__flush_iotlb_range(iova, granule, granule, true, cookie);
181 }
182 
183 static const struct iommu_flush_ops msm_iommu_flush_ops = {
184 	.tlb_flush_all = __flush_iotlb,
185 	.tlb_flush_walk = __flush_iotlb_walk,
186 	.tlb_add_page = __flush_iotlb_page,
187 };
188 
189 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
190 {
191 	int idx;
192 
193 	do {
194 		idx = find_next_zero_bit(map, end, start);
195 		if (idx == end)
196 			return -ENOSPC;
197 	} while (test_and_set_bit(idx, map));
198 
199 	return idx;
200 }
201 
202 static void msm_iommu_free_ctx(unsigned long *map, int idx)
203 {
204 	clear_bit(idx, map);
205 }
206 
207 static void config_mids(struct msm_iommu_dev *iommu,
208 			struct msm_iommu_ctx_dev *master)
209 {
210 	int mid, ctx, i;
211 
212 	for (i = 0; i < master->num_mids; i++) {
213 		mid = master->mids[i];
214 		ctx = master->num;
215 
216 		SET_M2VCBR_N(iommu->base, mid, 0);
217 		SET_CBACR_N(iommu->base, ctx, 0);
218 
219 		/* Set VMID = 0 */
220 		SET_VMID(iommu->base, mid, 0);
221 
222 		/* Set the context number for that MID to this context */
223 		SET_CBNDX(iommu->base, mid, ctx);
224 
225 		/* Set MID associated with this context bank to 0*/
226 		SET_CBVMID(iommu->base, ctx, 0);
227 
228 		/* Set the ASID for TLB tagging for this context */
229 		SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
230 
231 		/* Set security bit override to be Non-secure */
232 		SET_NSCFG(iommu->base, mid, 3);
233 	}
234 }
235 
236 static void __reset_context(void __iomem *base, int ctx)
237 {
238 	SET_BPRCOSH(base, ctx, 0);
239 	SET_BPRCISH(base, ctx, 0);
240 	SET_BPRCNSH(base, ctx, 0);
241 	SET_BPSHCFG(base, ctx, 0);
242 	SET_BPMTCFG(base, ctx, 0);
243 	SET_ACTLR(base, ctx, 0);
244 	SET_SCTLR(base, ctx, 0);
245 	SET_FSRRESTORE(base, ctx, 0);
246 	SET_TTBR0(base, ctx, 0);
247 	SET_TTBR1(base, ctx, 0);
248 	SET_TTBCR(base, ctx, 0);
249 	SET_BFBCR(base, ctx, 0);
250 	SET_PAR(base, ctx, 0);
251 	SET_FAR(base, ctx, 0);
252 	SET_CTX_TLBIALL(base, ctx, 0);
253 	SET_TLBFLPTER(base, ctx, 0);
254 	SET_TLBSLPTER(base, ctx, 0);
255 	SET_TLBLKCR(base, ctx, 0);
256 }
257 
258 static void __program_context(void __iomem *base, int ctx,
259 			      struct msm_priv *priv)
260 {
261 	__reset_context(base, ctx);
262 
263 	/* Turn on TEX Remap */
264 	SET_TRE(base, ctx, 1);
265 	SET_AFE(base, ctx, 1);
266 
267 	/* Set up HTW mode */
268 	/* TLB miss configuration: perform HTW on miss */
269 	SET_TLBMCFG(base, ctx, 0x3);
270 
271 	/* V2P configuration: HTW for access */
272 	SET_V2PCFG(base, ctx, 0x3);
273 
274 	SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
275 	SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
276 	SET_TTBR1(base, ctx, 0);
277 
278 	/* Set prrr and nmrr */
279 	SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
280 	SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
281 
282 	/* Invalidate the TLB for this context */
283 	SET_CTX_TLBIALL(base, ctx, 0);
284 
285 	/* Set interrupt number to "secure" interrupt */
286 	SET_IRPTNDX(base, ctx, 0);
287 
288 	/* Enable context fault interrupt */
289 	SET_CFEIE(base, ctx, 1);
290 
291 	/* Stall access on a context fault and let the handler deal with it */
292 	SET_CFCFG(base, ctx, 1);
293 
294 	/* Redirect all cacheable requests to L2 slave port. */
295 	SET_RCISH(base, ctx, 1);
296 	SET_RCOSH(base, ctx, 1);
297 	SET_RCNSH(base, ctx, 1);
298 
299 	/* Turn on BFB prefetch */
300 	SET_BFBDFE(base, ctx, 1);
301 
302 	/* Enable the MMU */
303 	SET_M(base, ctx, 1);
304 }
305 
306 static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
307 {
308 	struct msm_priv *priv;
309 
310 	if (type != IOMMU_DOMAIN_UNMANAGED)
311 		return NULL;
312 
313 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
314 	if (!priv)
315 		goto fail_nomem;
316 
317 	INIT_LIST_HEAD(&priv->list_attached);
318 
319 	priv->domain.geometry.aperture_start = 0;
320 	priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
321 	priv->domain.geometry.force_aperture = true;
322 
323 	return &priv->domain;
324 
325 fail_nomem:
326 	kfree(priv);
327 	return NULL;
328 }
329 
330 static void msm_iommu_domain_free(struct iommu_domain *domain)
331 {
332 	struct msm_priv *priv;
333 	unsigned long flags;
334 
335 	spin_lock_irqsave(&msm_iommu_lock, flags);
336 	priv = to_msm_priv(domain);
337 	kfree(priv);
338 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
339 }
340 
341 static int msm_iommu_domain_config(struct msm_priv *priv)
342 {
343 	spin_lock_init(&priv->pgtlock);
344 
345 	priv->cfg = (struct io_pgtable_cfg) {
346 		.quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
347 		.pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
348 		.ias = 32,
349 		.oas = 32,
350 		.tlb = &msm_iommu_flush_ops,
351 		.iommu_dev = priv->dev,
352 	};
353 
354 	priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
355 	if (!priv->iop) {
356 		dev_err(priv->dev, "Failed to allocate pgtable\n");
357 		return -EINVAL;
358 	}
359 
360 	msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
361 
362 	return 0;
363 }
364 
365 /* Must be called under msm_iommu_lock */
366 static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
367 {
368 	struct msm_iommu_dev *iommu, *ret = NULL;
369 	struct msm_iommu_ctx_dev *master;
370 
371 	list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
372 		master = list_first_entry(&iommu->ctx_list,
373 					  struct msm_iommu_ctx_dev,
374 					  list);
375 		if (master->of_node == dev->of_node) {
376 			ret = iommu;
377 			break;
378 		}
379 	}
380 
381 	return ret;
382 }
383 
384 static struct iommu_device *msm_iommu_probe_device(struct device *dev)
385 {
386 	struct msm_iommu_dev *iommu;
387 	unsigned long flags;
388 
389 	spin_lock_irqsave(&msm_iommu_lock, flags);
390 	iommu = find_iommu_for_dev(dev);
391 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
392 
393 	if (!iommu)
394 		return ERR_PTR(-ENODEV);
395 
396 	return &iommu->iommu;
397 }
398 
399 static void msm_iommu_release_device(struct device *dev)
400 {
401 }
402 
403 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
404 {
405 	int ret = 0;
406 	unsigned long flags;
407 	struct msm_iommu_dev *iommu;
408 	struct msm_priv *priv = to_msm_priv(domain);
409 	struct msm_iommu_ctx_dev *master;
410 
411 	priv->dev = dev;
412 	msm_iommu_domain_config(priv);
413 
414 	spin_lock_irqsave(&msm_iommu_lock, flags);
415 	list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
416 		master = list_first_entry(&iommu->ctx_list,
417 					  struct msm_iommu_ctx_dev,
418 					  list);
419 		if (master->of_node == dev->of_node) {
420 			ret = __enable_clocks(iommu);
421 			if (ret)
422 				goto fail;
423 
424 			list_for_each_entry(master, &iommu->ctx_list, list) {
425 				if (master->num) {
426 					dev_err(dev, "domain already attached");
427 					ret = -EEXIST;
428 					goto fail;
429 				}
430 				master->num =
431 					msm_iommu_alloc_ctx(iommu->context_map,
432 							    0, iommu->ncb);
433 				if (IS_ERR_VALUE(master->num)) {
434 					ret = -ENODEV;
435 					goto fail;
436 				}
437 				config_mids(iommu, master);
438 				__program_context(iommu->base, master->num,
439 						  priv);
440 			}
441 			__disable_clocks(iommu);
442 			list_add(&iommu->dom_node, &priv->list_attached);
443 		}
444 	}
445 
446 fail:
447 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
448 
449 	return ret;
450 }
451 
452 static void msm_iommu_detach_dev(struct iommu_domain *domain,
453 				 struct device *dev)
454 {
455 	struct msm_priv *priv = to_msm_priv(domain);
456 	unsigned long flags;
457 	struct msm_iommu_dev *iommu;
458 	struct msm_iommu_ctx_dev *master;
459 	int ret;
460 
461 	free_io_pgtable_ops(priv->iop);
462 
463 	spin_lock_irqsave(&msm_iommu_lock, flags);
464 	list_for_each_entry(iommu, &priv->list_attached, dom_node) {
465 		ret = __enable_clocks(iommu);
466 		if (ret)
467 			goto fail;
468 
469 		list_for_each_entry(master, &iommu->ctx_list, list) {
470 			msm_iommu_free_ctx(iommu->context_map, master->num);
471 			__reset_context(iommu->base, master->num);
472 		}
473 		__disable_clocks(iommu);
474 	}
475 fail:
476 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
477 }
478 
479 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
480 			 phys_addr_t pa, size_t len, int prot, gfp_t gfp)
481 {
482 	struct msm_priv *priv = to_msm_priv(domain);
483 	unsigned long flags;
484 	int ret;
485 
486 	spin_lock_irqsave(&priv->pgtlock, flags);
487 	ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
488 	spin_unlock_irqrestore(&priv->pgtlock, flags);
489 
490 	return ret;
491 }
492 
493 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
494 			      size_t len, struct iommu_iotlb_gather *gather)
495 {
496 	struct msm_priv *priv = to_msm_priv(domain);
497 	unsigned long flags;
498 
499 	spin_lock_irqsave(&priv->pgtlock, flags);
500 	len = priv->iop->unmap(priv->iop, iova, len, gather);
501 	spin_unlock_irqrestore(&priv->pgtlock, flags);
502 
503 	return len;
504 }
505 
506 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
507 					  dma_addr_t va)
508 {
509 	struct msm_priv *priv;
510 	struct msm_iommu_dev *iommu;
511 	struct msm_iommu_ctx_dev *master;
512 	unsigned int par;
513 	unsigned long flags;
514 	phys_addr_t ret = 0;
515 
516 	spin_lock_irqsave(&msm_iommu_lock, flags);
517 
518 	priv = to_msm_priv(domain);
519 	iommu = list_first_entry(&priv->list_attached,
520 				 struct msm_iommu_dev, dom_node);
521 
522 	if (list_empty(&iommu->ctx_list))
523 		goto fail;
524 
525 	master = list_first_entry(&iommu->ctx_list,
526 				  struct msm_iommu_ctx_dev, list);
527 	if (!master)
528 		goto fail;
529 
530 	ret = __enable_clocks(iommu);
531 	if (ret)
532 		goto fail;
533 
534 	/* Invalidate context TLB */
535 	SET_CTX_TLBIALL(iommu->base, master->num, 0);
536 	SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
537 
538 	par = GET_PAR(iommu->base, master->num);
539 
540 	/* We are dealing with a supersection */
541 	if (GET_NOFAULT_SS(iommu->base, master->num))
542 		ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
543 	else	/* Upper 20 bits from PAR, lower 12 from VA */
544 		ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
545 
546 	if (GET_FAULT(iommu->base, master->num))
547 		ret = 0;
548 
549 	__disable_clocks(iommu);
550 fail:
551 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
552 	return ret;
553 }
554 
555 static bool msm_iommu_capable(enum iommu_cap cap)
556 {
557 	return false;
558 }
559 
560 static void print_ctx_regs(void __iomem *base, int ctx)
561 {
562 	unsigned int fsr = GET_FSR(base, ctx);
563 	pr_err("FAR    = %08x    PAR    = %08x\n",
564 	       GET_FAR(base, ctx), GET_PAR(base, ctx));
565 	pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
566 			(fsr & 0x02) ? "TF " : "",
567 			(fsr & 0x04) ? "AFF " : "",
568 			(fsr & 0x08) ? "APF " : "",
569 			(fsr & 0x10) ? "TLBMF " : "",
570 			(fsr & 0x20) ? "HTWDEEF " : "",
571 			(fsr & 0x40) ? "HTWSEEF " : "",
572 			(fsr & 0x80) ? "MHF " : "",
573 			(fsr & 0x10000) ? "SL " : "",
574 			(fsr & 0x40000000) ? "SS " : "",
575 			(fsr & 0x80000000) ? "MULTI " : "");
576 
577 	pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
578 	       GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
579 	pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
580 	       GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
581 	pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
582 	       GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
583 }
584 
585 static void insert_iommu_master(struct device *dev,
586 				struct msm_iommu_dev **iommu,
587 				struct of_phandle_args *spec)
588 {
589 	struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
590 	int sid;
591 
592 	if (list_empty(&(*iommu)->ctx_list)) {
593 		master = kzalloc(sizeof(*master), GFP_ATOMIC);
594 		master->of_node = dev->of_node;
595 		list_add(&master->list, &(*iommu)->ctx_list);
596 		dev_iommu_priv_set(dev, master);
597 	}
598 
599 	for (sid = 0; sid < master->num_mids; sid++)
600 		if (master->mids[sid] == spec->args[0]) {
601 			dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
602 				 sid);
603 			return;
604 		}
605 
606 	master->mids[master->num_mids++] = spec->args[0];
607 }
608 
609 static int qcom_iommu_of_xlate(struct device *dev,
610 			       struct of_phandle_args *spec)
611 {
612 	struct msm_iommu_dev *iommu;
613 	unsigned long flags;
614 	int ret = 0;
615 
616 	spin_lock_irqsave(&msm_iommu_lock, flags);
617 	list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
618 		if (iommu->dev->of_node == spec->np)
619 			break;
620 
621 	if (!iommu || iommu->dev->of_node != spec->np) {
622 		ret = -ENODEV;
623 		goto fail;
624 	}
625 
626 	insert_iommu_master(dev, &iommu, spec);
627 fail:
628 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
629 
630 	return ret;
631 }
632 
633 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
634 {
635 	struct msm_iommu_dev *iommu = dev_id;
636 	unsigned int fsr;
637 	int i, ret;
638 
639 	spin_lock(&msm_iommu_lock);
640 
641 	if (!iommu) {
642 		pr_err("Invalid device ID in context interrupt handler\n");
643 		goto fail;
644 	}
645 
646 	pr_err("Unexpected IOMMU page fault!\n");
647 	pr_err("base = %08x\n", (unsigned int)iommu->base);
648 
649 	ret = __enable_clocks(iommu);
650 	if (ret)
651 		goto fail;
652 
653 	for (i = 0; i < iommu->ncb; i++) {
654 		fsr = GET_FSR(iommu->base, i);
655 		if (fsr) {
656 			pr_err("Fault occurred in context %d.\n", i);
657 			pr_err("Interesting registers:\n");
658 			print_ctx_regs(iommu->base, i);
659 			SET_FSR(iommu->base, i, 0x4000000F);
660 		}
661 	}
662 	__disable_clocks(iommu);
663 fail:
664 	spin_unlock(&msm_iommu_lock);
665 	return 0;
666 }
667 
668 static struct iommu_ops msm_iommu_ops = {
669 	.capable = msm_iommu_capable,
670 	.domain_alloc = msm_iommu_domain_alloc,
671 	.domain_free = msm_iommu_domain_free,
672 	.attach_dev = msm_iommu_attach_dev,
673 	.detach_dev = msm_iommu_detach_dev,
674 	.map = msm_iommu_map,
675 	.unmap = msm_iommu_unmap,
676 	/*
677 	 * Nothing is needed here, the barrier to guarantee
678 	 * completion of the tlb sync operation is implicitly
679 	 * taken care when the iommu client does a writel before
680 	 * kick starting the other master.
681 	 */
682 	.iotlb_sync = NULL,
683 	.iova_to_phys = msm_iommu_iova_to_phys,
684 	.probe_device = msm_iommu_probe_device,
685 	.release_device = msm_iommu_release_device,
686 	.device_group = generic_device_group,
687 	.pgsize_bitmap = MSM_IOMMU_PGSIZES,
688 	.of_xlate = qcom_iommu_of_xlate,
689 };
690 
691 static int msm_iommu_probe(struct platform_device *pdev)
692 {
693 	struct resource *r;
694 	resource_size_t ioaddr;
695 	struct msm_iommu_dev *iommu;
696 	int ret, par, val;
697 
698 	iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
699 	if (!iommu)
700 		return -ENODEV;
701 
702 	iommu->dev = &pdev->dev;
703 	INIT_LIST_HEAD(&iommu->ctx_list);
704 
705 	iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
706 	if (IS_ERR(iommu->pclk)) {
707 		dev_err(iommu->dev, "could not get smmu_pclk\n");
708 		return PTR_ERR(iommu->pclk);
709 	}
710 
711 	ret = clk_prepare(iommu->pclk);
712 	if (ret) {
713 		dev_err(iommu->dev, "could not prepare smmu_pclk\n");
714 		return ret;
715 	}
716 
717 	iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
718 	if (IS_ERR(iommu->clk)) {
719 		dev_err(iommu->dev, "could not get iommu_clk\n");
720 		clk_unprepare(iommu->pclk);
721 		return PTR_ERR(iommu->clk);
722 	}
723 
724 	ret = clk_prepare(iommu->clk);
725 	if (ret) {
726 		dev_err(iommu->dev, "could not prepare iommu_clk\n");
727 		clk_unprepare(iommu->pclk);
728 		return ret;
729 	}
730 
731 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
732 	iommu->base = devm_ioremap_resource(iommu->dev, r);
733 	if (IS_ERR(iommu->base)) {
734 		dev_err(iommu->dev, "could not get iommu base\n");
735 		ret = PTR_ERR(iommu->base);
736 		goto fail;
737 	}
738 	ioaddr = r->start;
739 
740 	iommu->irq = platform_get_irq(pdev, 0);
741 	if (iommu->irq < 0) {
742 		ret = -ENODEV;
743 		goto fail;
744 	}
745 
746 	ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
747 	if (ret) {
748 		dev_err(iommu->dev, "could not get ncb\n");
749 		goto fail;
750 	}
751 	iommu->ncb = val;
752 
753 	msm_iommu_reset(iommu->base, iommu->ncb);
754 	SET_M(iommu->base, 0, 1);
755 	SET_PAR(iommu->base, 0, 0);
756 	SET_V2PCFG(iommu->base, 0, 1);
757 	SET_V2PPR(iommu->base, 0, 0);
758 	par = GET_PAR(iommu->base, 0);
759 	SET_V2PCFG(iommu->base, 0, 0);
760 	SET_M(iommu->base, 0, 0);
761 
762 	if (!par) {
763 		pr_err("Invalid PAR value detected\n");
764 		ret = -ENODEV;
765 		goto fail;
766 	}
767 
768 	ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
769 					msm_iommu_fault_handler,
770 					IRQF_ONESHOT | IRQF_SHARED,
771 					"msm_iommu_secure_irpt_handler",
772 					iommu);
773 	if (ret) {
774 		pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
775 		goto fail;
776 	}
777 
778 	list_add(&iommu->dev_node, &qcom_iommu_devices);
779 
780 	ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
781 				     "msm-smmu.%pa", &ioaddr);
782 	if (ret) {
783 		pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
784 		goto fail;
785 	}
786 
787 	iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
788 	iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
789 
790 	ret = iommu_device_register(&iommu->iommu);
791 	if (ret) {
792 		pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
793 		goto fail;
794 	}
795 
796 	bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
797 
798 	pr_info("device mapped at %p, irq %d with %d ctx banks\n",
799 		iommu->base, iommu->irq, iommu->ncb);
800 
801 	return ret;
802 fail:
803 	clk_unprepare(iommu->clk);
804 	clk_unprepare(iommu->pclk);
805 	return ret;
806 }
807 
808 static const struct of_device_id msm_iommu_dt_match[] = {
809 	{ .compatible = "qcom,apq8064-iommu" },
810 	{}
811 };
812 
813 static int msm_iommu_remove(struct platform_device *pdev)
814 {
815 	struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
816 
817 	clk_unprepare(iommu->clk);
818 	clk_unprepare(iommu->pclk);
819 	return 0;
820 }
821 
822 static struct platform_driver msm_iommu_driver = {
823 	.driver = {
824 		.name	= "msm_iommu",
825 		.of_match_table = msm_iommu_dt_match,
826 	},
827 	.probe		= msm_iommu_probe,
828 	.remove		= msm_iommu_remove,
829 };
830 
831 static int __init msm_iommu_driver_init(void)
832 {
833 	int ret;
834 
835 	ret = platform_driver_register(&msm_iommu_driver);
836 	if (ret != 0)
837 		pr_err("Failed to register IOMMU driver\n");
838 
839 	return ret;
840 }
841 subsys_initcall(msm_iommu_driver_init);
842 
843