xref: /openbmc/linux/drivers/iommu/msm_iommu.c (revision da2ef666)
1 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/errno.h>
23 #include <linux/io.h>
24 #include <linux/interrupt.h>
25 #include <linux/list.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/iommu.h>
29 #include <linux/clk.h>
30 #include <linux/err.h>
31 #include <linux/of_iommu.h>
32 
33 #include <asm/cacheflush.h>
34 #include <asm/sizes.h>
35 
36 #include "msm_iommu_hw-8xxx.h"
37 #include "msm_iommu.h"
38 #include "io-pgtable.h"
39 
40 #define MRC(reg, processor, op1, crn, crm, op2)				\
41 __asm__ __volatile__ (							\
42 "   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 "\n"  \
43 : "=r" (reg))
44 
45 /* bitmap of the page sizes currently supported */
46 #define MSM_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)
47 
48 DEFINE_SPINLOCK(msm_iommu_lock);
49 static LIST_HEAD(qcom_iommu_devices);
50 static struct iommu_ops msm_iommu_ops;
51 
52 struct msm_priv {
53 	struct list_head list_attached;
54 	struct iommu_domain domain;
55 	struct io_pgtable_cfg	cfg;
56 	struct io_pgtable_ops	*iop;
57 	struct device		*dev;
58 	spinlock_t		pgtlock; /* pagetable lock */
59 };
60 
61 static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
62 {
63 	return container_of(dom, struct msm_priv, domain);
64 }
65 
66 static int __enable_clocks(struct msm_iommu_dev *iommu)
67 {
68 	int ret;
69 
70 	ret = clk_enable(iommu->pclk);
71 	if (ret)
72 		goto fail;
73 
74 	if (iommu->clk) {
75 		ret = clk_enable(iommu->clk);
76 		if (ret)
77 			clk_disable(iommu->pclk);
78 	}
79 fail:
80 	return ret;
81 }
82 
83 static void __disable_clocks(struct msm_iommu_dev *iommu)
84 {
85 	if (iommu->clk)
86 		clk_disable(iommu->clk);
87 	clk_disable(iommu->pclk);
88 }
89 
90 static void msm_iommu_reset(void __iomem *base, int ncb)
91 {
92 	int ctx;
93 
94 	SET_RPUE(base, 0);
95 	SET_RPUEIE(base, 0);
96 	SET_ESRRESTORE(base, 0);
97 	SET_TBE(base, 0);
98 	SET_CR(base, 0);
99 	SET_SPDMBE(base, 0);
100 	SET_TESTBUSCR(base, 0);
101 	SET_TLBRSW(base, 0);
102 	SET_GLOBAL_TLBIALL(base, 0);
103 	SET_RPU_ACR(base, 0);
104 	SET_TLBLKCRWE(base, 1);
105 
106 	for (ctx = 0; ctx < ncb; ctx++) {
107 		SET_BPRCOSH(base, ctx, 0);
108 		SET_BPRCISH(base, ctx, 0);
109 		SET_BPRCNSH(base, ctx, 0);
110 		SET_BPSHCFG(base, ctx, 0);
111 		SET_BPMTCFG(base, ctx, 0);
112 		SET_ACTLR(base, ctx, 0);
113 		SET_SCTLR(base, ctx, 0);
114 		SET_FSRRESTORE(base, ctx, 0);
115 		SET_TTBR0(base, ctx, 0);
116 		SET_TTBR1(base, ctx, 0);
117 		SET_TTBCR(base, ctx, 0);
118 		SET_BFBCR(base, ctx, 0);
119 		SET_PAR(base, ctx, 0);
120 		SET_FAR(base, ctx, 0);
121 		SET_CTX_TLBIALL(base, ctx, 0);
122 		SET_TLBFLPTER(base, ctx, 0);
123 		SET_TLBSLPTER(base, ctx, 0);
124 		SET_TLBLKCR(base, ctx, 0);
125 		SET_CONTEXTIDR(base, ctx, 0);
126 	}
127 }
128 
129 static void __flush_iotlb(void *cookie)
130 {
131 	struct msm_priv *priv = cookie;
132 	struct msm_iommu_dev *iommu = NULL;
133 	struct msm_iommu_ctx_dev *master;
134 	int ret = 0;
135 
136 	list_for_each_entry(iommu, &priv->list_attached, dom_node) {
137 		ret = __enable_clocks(iommu);
138 		if (ret)
139 			goto fail;
140 
141 		list_for_each_entry(master, &iommu->ctx_list, list)
142 			SET_CTX_TLBIALL(iommu->base, master->num, 0);
143 
144 		__disable_clocks(iommu);
145 	}
146 fail:
147 	return;
148 }
149 
150 static void __flush_iotlb_range(unsigned long iova, size_t size,
151 				size_t granule, bool leaf, void *cookie)
152 {
153 	struct msm_priv *priv = cookie;
154 	struct msm_iommu_dev *iommu = NULL;
155 	struct msm_iommu_ctx_dev *master;
156 	int ret = 0;
157 	int temp_size;
158 
159 	list_for_each_entry(iommu, &priv->list_attached, dom_node) {
160 		ret = __enable_clocks(iommu);
161 		if (ret)
162 			goto fail;
163 
164 		list_for_each_entry(master, &iommu->ctx_list, list) {
165 			temp_size = size;
166 			do {
167 				iova &= TLBIVA_VA;
168 				iova |= GET_CONTEXTIDR_ASID(iommu->base,
169 							    master->num);
170 				SET_TLBIVA(iommu->base, master->num, iova);
171 				iova += granule;
172 			} while (temp_size -= granule);
173 		}
174 
175 		__disable_clocks(iommu);
176 	}
177 
178 fail:
179 	return;
180 }
181 
182 static void __flush_iotlb_sync(void *cookie)
183 {
184 	/*
185 	 * Nothing is needed here, the barrier to guarantee
186 	 * completion of the tlb sync operation is implicitly
187 	 * taken care when the iommu client does a writel before
188 	 * kick starting the other master.
189 	 */
190 }
191 
192 static const struct iommu_gather_ops msm_iommu_gather_ops = {
193 	.tlb_flush_all = __flush_iotlb,
194 	.tlb_add_flush = __flush_iotlb_range,
195 	.tlb_sync = __flush_iotlb_sync,
196 };
197 
198 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
199 {
200 	int idx;
201 
202 	do {
203 		idx = find_next_zero_bit(map, end, start);
204 		if (idx == end)
205 			return -ENOSPC;
206 	} while (test_and_set_bit(idx, map));
207 
208 	return idx;
209 }
210 
211 static void msm_iommu_free_ctx(unsigned long *map, int idx)
212 {
213 	clear_bit(idx, map);
214 }
215 
216 static void config_mids(struct msm_iommu_dev *iommu,
217 			struct msm_iommu_ctx_dev *master)
218 {
219 	int mid, ctx, i;
220 
221 	for (i = 0; i < master->num_mids; i++) {
222 		mid = master->mids[i];
223 		ctx = master->num;
224 
225 		SET_M2VCBR_N(iommu->base, mid, 0);
226 		SET_CBACR_N(iommu->base, ctx, 0);
227 
228 		/* Set VMID = 0 */
229 		SET_VMID(iommu->base, mid, 0);
230 
231 		/* Set the context number for that MID to this context */
232 		SET_CBNDX(iommu->base, mid, ctx);
233 
234 		/* Set MID associated with this context bank to 0*/
235 		SET_CBVMID(iommu->base, ctx, 0);
236 
237 		/* Set the ASID for TLB tagging for this context */
238 		SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
239 
240 		/* Set security bit override to be Non-secure */
241 		SET_NSCFG(iommu->base, mid, 3);
242 	}
243 }
244 
245 static void __reset_context(void __iomem *base, int ctx)
246 {
247 	SET_BPRCOSH(base, ctx, 0);
248 	SET_BPRCISH(base, ctx, 0);
249 	SET_BPRCNSH(base, ctx, 0);
250 	SET_BPSHCFG(base, ctx, 0);
251 	SET_BPMTCFG(base, ctx, 0);
252 	SET_ACTLR(base, ctx, 0);
253 	SET_SCTLR(base, ctx, 0);
254 	SET_FSRRESTORE(base, ctx, 0);
255 	SET_TTBR0(base, ctx, 0);
256 	SET_TTBR1(base, ctx, 0);
257 	SET_TTBCR(base, ctx, 0);
258 	SET_BFBCR(base, ctx, 0);
259 	SET_PAR(base, ctx, 0);
260 	SET_FAR(base, ctx, 0);
261 	SET_CTX_TLBIALL(base, ctx, 0);
262 	SET_TLBFLPTER(base, ctx, 0);
263 	SET_TLBSLPTER(base, ctx, 0);
264 	SET_TLBLKCR(base, ctx, 0);
265 }
266 
267 static void __program_context(void __iomem *base, int ctx,
268 			      struct msm_priv *priv)
269 {
270 	__reset_context(base, ctx);
271 
272 	/* Turn on TEX Remap */
273 	SET_TRE(base, ctx, 1);
274 	SET_AFE(base, ctx, 1);
275 
276 	/* Set up HTW mode */
277 	/* TLB miss configuration: perform HTW on miss */
278 	SET_TLBMCFG(base, ctx, 0x3);
279 
280 	/* V2P configuration: HTW for access */
281 	SET_V2PCFG(base, ctx, 0x3);
282 
283 	SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
284 	SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
285 	SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
286 
287 	/* Set prrr and nmrr */
288 	SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
289 	SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
290 
291 	/* Invalidate the TLB for this context */
292 	SET_CTX_TLBIALL(base, ctx, 0);
293 
294 	/* Set interrupt number to "secure" interrupt */
295 	SET_IRPTNDX(base, ctx, 0);
296 
297 	/* Enable context fault interrupt */
298 	SET_CFEIE(base, ctx, 1);
299 
300 	/* Stall access on a context fault and let the handler deal with it */
301 	SET_CFCFG(base, ctx, 1);
302 
303 	/* Redirect all cacheable requests to L2 slave port. */
304 	SET_RCISH(base, ctx, 1);
305 	SET_RCOSH(base, ctx, 1);
306 	SET_RCNSH(base, ctx, 1);
307 
308 	/* Turn on BFB prefetch */
309 	SET_BFBDFE(base, ctx, 1);
310 
311 	/* Enable the MMU */
312 	SET_M(base, ctx, 1);
313 }
314 
315 static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
316 {
317 	struct msm_priv *priv;
318 
319 	if (type != IOMMU_DOMAIN_UNMANAGED)
320 		return NULL;
321 
322 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
323 	if (!priv)
324 		goto fail_nomem;
325 
326 	INIT_LIST_HEAD(&priv->list_attached);
327 
328 	priv->domain.geometry.aperture_start = 0;
329 	priv->domain.geometry.aperture_end   = (1ULL << 32) - 1;
330 	priv->domain.geometry.force_aperture = true;
331 
332 	return &priv->domain;
333 
334 fail_nomem:
335 	kfree(priv);
336 	return NULL;
337 }
338 
339 static void msm_iommu_domain_free(struct iommu_domain *domain)
340 {
341 	struct msm_priv *priv;
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&msm_iommu_lock, flags);
345 	priv = to_msm_priv(domain);
346 	kfree(priv);
347 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
348 }
349 
350 static int msm_iommu_domain_config(struct msm_priv *priv)
351 {
352 	spin_lock_init(&priv->pgtlock);
353 
354 	priv->cfg = (struct io_pgtable_cfg) {
355 		.quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
356 		.pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
357 		.ias = 32,
358 		.oas = 32,
359 		.tlb = &msm_iommu_gather_ops,
360 		.iommu_dev = priv->dev,
361 	};
362 
363 	priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
364 	if (!priv->iop) {
365 		dev_err(priv->dev, "Failed to allocate pgtable\n");
366 		return -EINVAL;
367 	}
368 
369 	msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
370 
371 	return 0;
372 }
373 
374 /* Must be called under msm_iommu_lock */
375 static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
376 {
377 	struct msm_iommu_dev *iommu, *ret = NULL;
378 	struct msm_iommu_ctx_dev *master;
379 
380 	list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
381 		master = list_first_entry(&iommu->ctx_list,
382 					  struct msm_iommu_ctx_dev,
383 					  list);
384 		if (master->of_node == dev->of_node) {
385 			ret = iommu;
386 			break;
387 		}
388 	}
389 
390 	return ret;
391 }
392 
393 static int msm_iommu_add_device(struct device *dev)
394 {
395 	struct msm_iommu_dev *iommu;
396 	struct iommu_group *group;
397 	unsigned long flags;
398 
399 	spin_lock_irqsave(&msm_iommu_lock, flags);
400 	iommu = find_iommu_for_dev(dev);
401 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
402 
403 	if (iommu)
404 		iommu_device_link(&iommu->iommu, dev);
405 	else
406 		return -ENODEV;
407 
408 	group = iommu_group_get_for_dev(dev);
409 	if (IS_ERR(group))
410 		return PTR_ERR(group);
411 
412 	iommu_group_put(group);
413 
414 	return 0;
415 }
416 
417 static void msm_iommu_remove_device(struct device *dev)
418 {
419 	struct msm_iommu_dev *iommu;
420 	unsigned long flags;
421 
422 	spin_lock_irqsave(&msm_iommu_lock, flags);
423 	iommu = find_iommu_for_dev(dev);
424 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
425 
426 	if (iommu)
427 		iommu_device_unlink(&iommu->iommu, dev);
428 
429 	iommu_group_remove_device(dev);
430 }
431 
432 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
433 {
434 	int ret = 0;
435 	unsigned long flags;
436 	struct msm_iommu_dev *iommu;
437 	struct msm_priv *priv = to_msm_priv(domain);
438 	struct msm_iommu_ctx_dev *master;
439 
440 	priv->dev = dev;
441 	msm_iommu_domain_config(priv);
442 
443 	spin_lock_irqsave(&msm_iommu_lock, flags);
444 	list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
445 		master = list_first_entry(&iommu->ctx_list,
446 					  struct msm_iommu_ctx_dev,
447 					  list);
448 		if (master->of_node == dev->of_node) {
449 			ret = __enable_clocks(iommu);
450 			if (ret)
451 				goto fail;
452 
453 			list_for_each_entry(master, &iommu->ctx_list, list) {
454 				if (master->num) {
455 					dev_err(dev, "domain already attached");
456 					ret = -EEXIST;
457 					goto fail;
458 				}
459 				master->num =
460 					msm_iommu_alloc_ctx(iommu->context_map,
461 							    0, iommu->ncb);
462 					if (IS_ERR_VALUE(master->num)) {
463 						ret = -ENODEV;
464 						goto fail;
465 					}
466 				config_mids(iommu, master);
467 				__program_context(iommu->base, master->num,
468 						  priv);
469 			}
470 			__disable_clocks(iommu);
471 			list_add(&iommu->dom_node, &priv->list_attached);
472 		}
473 	}
474 
475 fail:
476 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
477 
478 	return ret;
479 }
480 
481 static void msm_iommu_detach_dev(struct iommu_domain *domain,
482 				 struct device *dev)
483 {
484 	struct msm_priv *priv = to_msm_priv(domain);
485 	unsigned long flags;
486 	struct msm_iommu_dev *iommu;
487 	struct msm_iommu_ctx_dev *master;
488 	int ret;
489 
490 	free_io_pgtable_ops(priv->iop);
491 
492 	spin_lock_irqsave(&msm_iommu_lock, flags);
493 	list_for_each_entry(iommu, &priv->list_attached, dom_node) {
494 		ret = __enable_clocks(iommu);
495 		if (ret)
496 			goto fail;
497 
498 		list_for_each_entry(master, &iommu->ctx_list, list) {
499 			msm_iommu_free_ctx(iommu->context_map, master->num);
500 			__reset_context(iommu->base, master->num);
501 		}
502 		__disable_clocks(iommu);
503 	}
504 fail:
505 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
506 }
507 
508 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
509 			 phys_addr_t pa, size_t len, int prot)
510 {
511 	struct msm_priv *priv = to_msm_priv(domain);
512 	unsigned long flags;
513 	int ret;
514 
515 	spin_lock_irqsave(&priv->pgtlock, flags);
516 	ret = priv->iop->map(priv->iop, iova, pa, len, prot);
517 	spin_unlock_irqrestore(&priv->pgtlock, flags);
518 
519 	return ret;
520 }
521 
522 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
523 			      size_t len)
524 {
525 	struct msm_priv *priv = to_msm_priv(domain);
526 	unsigned long flags;
527 
528 	spin_lock_irqsave(&priv->pgtlock, flags);
529 	len = priv->iop->unmap(priv->iop, iova, len);
530 	spin_unlock_irqrestore(&priv->pgtlock, flags);
531 
532 	return len;
533 }
534 
535 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
536 					  dma_addr_t va)
537 {
538 	struct msm_priv *priv;
539 	struct msm_iommu_dev *iommu;
540 	struct msm_iommu_ctx_dev *master;
541 	unsigned int par;
542 	unsigned long flags;
543 	phys_addr_t ret = 0;
544 
545 	spin_lock_irqsave(&msm_iommu_lock, flags);
546 
547 	priv = to_msm_priv(domain);
548 	iommu = list_first_entry(&priv->list_attached,
549 				 struct msm_iommu_dev, dom_node);
550 
551 	if (list_empty(&iommu->ctx_list))
552 		goto fail;
553 
554 	master = list_first_entry(&iommu->ctx_list,
555 				  struct msm_iommu_ctx_dev, list);
556 	if (!master)
557 		goto fail;
558 
559 	ret = __enable_clocks(iommu);
560 	if (ret)
561 		goto fail;
562 
563 	/* Invalidate context TLB */
564 	SET_CTX_TLBIALL(iommu->base, master->num, 0);
565 	SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
566 
567 	par = GET_PAR(iommu->base, master->num);
568 
569 	/* We are dealing with a supersection */
570 	if (GET_NOFAULT_SS(iommu->base, master->num))
571 		ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
572 	else	/* Upper 20 bits from PAR, lower 12 from VA */
573 		ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
574 
575 	if (GET_FAULT(iommu->base, master->num))
576 		ret = 0;
577 
578 	__disable_clocks(iommu);
579 fail:
580 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
581 	return ret;
582 }
583 
584 static bool msm_iommu_capable(enum iommu_cap cap)
585 {
586 	return false;
587 }
588 
589 static void print_ctx_regs(void __iomem *base, int ctx)
590 {
591 	unsigned int fsr = GET_FSR(base, ctx);
592 	pr_err("FAR    = %08x    PAR    = %08x\n",
593 	       GET_FAR(base, ctx), GET_PAR(base, ctx));
594 	pr_err("FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
595 			(fsr & 0x02) ? "TF " : "",
596 			(fsr & 0x04) ? "AFF " : "",
597 			(fsr & 0x08) ? "APF " : "",
598 			(fsr & 0x10) ? "TLBMF " : "",
599 			(fsr & 0x20) ? "HTWDEEF " : "",
600 			(fsr & 0x40) ? "HTWSEEF " : "",
601 			(fsr & 0x80) ? "MHF " : "",
602 			(fsr & 0x10000) ? "SL " : "",
603 			(fsr & 0x40000000) ? "SS " : "",
604 			(fsr & 0x80000000) ? "MULTI " : "");
605 
606 	pr_err("FSYNR0 = %08x    FSYNR1 = %08x\n",
607 	       GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
608 	pr_err("TTBR0  = %08x    TTBR1  = %08x\n",
609 	       GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
610 	pr_err("SCTLR  = %08x    ACTLR  = %08x\n",
611 	       GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
612 }
613 
614 static void insert_iommu_master(struct device *dev,
615 				struct msm_iommu_dev **iommu,
616 				struct of_phandle_args *spec)
617 {
618 	struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
619 	int sid;
620 
621 	if (list_empty(&(*iommu)->ctx_list)) {
622 		master = kzalloc(sizeof(*master), GFP_ATOMIC);
623 		master->of_node = dev->of_node;
624 		list_add(&master->list, &(*iommu)->ctx_list);
625 		dev->archdata.iommu = master;
626 	}
627 
628 	for (sid = 0; sid < master->num_mids; sid++)
629 		if (master->mids[sid] == spec->args[0]) {
630 			dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
631 				 sid);
632 			return;
633 		}
634 
635 	master->mids[master->num_mids++] = spec->args[0];
636 }
637 
638 static int qcom_iommu_of_xlate(struct device *dev,
639 			       struct of_phandle_args *spec)
640 {
641 	struct msm_iommu_dev *iommu;
642 	unsigned long flags;
643 	int ret = 0;
644 
645 	spin_lock_irqsave(&msm_iommu_lock, flags);
646 	list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
647 		if (iommu->dev->of_node == spec->np)
648 			break;
649 
650 	if (!iommu || iommu->dev->of_node != spec->np) {
651 		ret = -ENODEV;
652 		goto fail;
653 	}
654 
655 	insert_iommu_master(dev, &iommu, spec);
656 fail:
657 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
658 
659 	return ret;
660 }
661 
662 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
663 {
664 	struct msm_iommu_dev *iommu = dev_id;
665 	unsigned int fsr;
666 	int i, ret;
667 
668 	spin_lock(&msm_iommu_lock);
669 
670 	if (!iommu) {
671 		pr_err("Invalid device ID in context interrupt handler\n");
672 		goto fail;
673 	}
674 
675 	pr_err("Unexpected IOMMU page fault!\n");
676 	pr_err("base = %08x\n", (unsigned int)iommu->base);
677 
678 	ret = __enable_clocks(iommu);
679 	if (ret)
680 		goto fail;
681 
682 	for (i = 0; i < iommu->ncb; i++) {
683 		fsr = GET_FSR(iommu->base, i);
684 		if (fsr) {
685 			pr_err("Fault occurred in context %d.\n", i);
686 			pr_err("Interesting registers:\n");
687 			print_ctx_regs(iommu->base, i);
688 			SET_FSR(iommu->base, i, 0x4000000F);
689 		}
690 	}
691 	__disable_clocks(iommu);
692 fail:
693 	spin_unlock(&msm_iommu_lock);
694 	return 0;
695 }
696 
697 static struct iommu_ops msm_iommu_ops = {
698 	.capable = msm_iommu_capable,
699 	.domain_alloc = msm_iommu_domain_alloc,
700 	.domain_free = msm_iommu_domain_free,
701 	.attach_dev = msm_iommu_attach_dev,
702 	.detach_dev = msm_iommu_detach_dev,
703 	.map = msm_iommu_map,
704 	.unmap = msm_iommu_unmap,
705 	.iova_to_phys = msm_iommu_iova_to_phys,
706 	.add_device = msm_iommu_add_device,
707 	.remove_device = msm_iommu_remove_device,
708 	.device_group = generic_device_group,
709 	.pgsize_bitmap = MSM_IOMMU_PGSIZES,
710 	.of_xlate = qcom_iommu_of_xlate,
711 };
712 
713 static int msm_iommu_probe(struct platform_device *pdev)
714 {
715 	struct resource *r;
716 	resource_size_t ioaddr;
717 	struct msm_iommu_dev *iommu;
718 	int ret, par, val;
719 
720 	iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
721 	if (!iommu)
722 		return -ENODEV;
723 
724 	iommu->dev = &pdev->dev;
725 	INIT_LIST_HEAD(&iommu->ctx_list);
726 
727 	iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
728 	if (IS_ERR(iommu->pclk)) {
729 		dev_err(iommu->dev, "could not get smmu_pclk\n");
730 		return PTR_ERR(iommu->pclk);
731 	}
732 
733 	ret = clk_prepare(iommu->pclk);
734 	if (ret) {
735 		dev_err(iommu->dev, "could not prepare smmu_pclk\n");
736 		return ret;
737 	}
738 
739 	iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
740 	if (IS_ERR(iommu->clk)) {
741 		dev_err(iommu->dev, "could not get iommu_clk\n");
742 		clk_unprepare(iommu->pclk);
743 		return PTR_ERR(iommu->clk);
744 	}
745 
746 	ret = clk_prepare(iommu->clk);
747 	if (ret) {
748 		dev_err(iommu->dev, "could not prepare iommu_clk\n");
749 		clk_unprepare(iommu->pclk);
750 		return ret;
751 	}
752 
753 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
754 	iommu->base = devm_ioremap_resource(iommu->dev, r);
755 	if (IS_ERR(iommu->base)) {
756 		dev_err(iommu->dev, "could not get iommu base\n");
757 		ret = PTR_ERR(iommu->base);
758 		goto fail;
759 	}
760 	ioaddr = r->start;
761 
762 	iommu->irq = platform_get_irq(pdev, 0);
763 	if (iommu->irq < 0) {
764 		dev_err(iommu->dev, "could not get iommu irq\n");
765 		ret = -ENODEV;
766 		goto fail;
767 	}
768 
769 	ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
770 	if (ret) {
771 		dev_err(iommu->dev, "could not get ncb\n");
772 		goto fail;
773 	}
774 	iommu->ncb = val;
775 
776 	msm_iommu_reset(iommu->base, iommu->ncb);
777 	SET_M(iommu->base, 0, 1);
778 	SET_PAR(iommu->base, 0, 0);
779 	SET_V2PCFG(iommu->base, 0, 1);
780 	SET_V2PPR(iommu->base, 0, 0);
781 	par = GET_PAR(iommu->base, 0);
782 	SET_V2PCFG(iommu->base, 0, 0);
783 	SET_M(iommu->base, 0, 0);
784 
785 	if (!par) {
786 		pr_err("Invalid PAR value detected\n");
787 		ret = -ENODEV;
788 		goto fail;
789 	}
790 
791 	ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
792 					msm_iommu_fault_handler,
793 					IRQF_ONESHOT | IRQF_SHARED,
794 					"msm_iommu_secure_irpt_handler",
795 					iommu);
796 	if (ret) {
797 		pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
798 		goto fail;
799 	}
800 
801 	list_add(&iommu->dev_node, &qcom_iommu_devices);
802 
803 	ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
804 				     "msm-smmu.%pa", &ioaddr);
805 	if (ret) {
806 		pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
807 		goto fail;
808 	}
809 
810 	iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
811 	iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
812 
813 	ret = iommu_device_register(&iommu->iommu);
814 	if (ret) {
815 		pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
816 		goto fail;
817 	}
818 
819 	bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
820 
821 	pr_info("device mapped at %p, irq %d with %d ctx banks\n",
822 		iommu->base, iommu->irq, iommu->ncb);
823 
824 	return ret;
825 fail:
826 	clk_unprepare(iommu->clk);
827 	clk_unprepare(iommu->pclk);
828 	return ret;
829 }
830 
831 static const struct of_device_id msm_iommu_dt_match[] = {
832 	{ .compatible = "qcom,apq8064-iommu" },
833 	{}
834 };
835 
836 static int msm_iommu_remove(struct platform_device *pdev)
837 {
838 	struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
839 
840 	clk_unprepare(iommu->clk);
841 	clk_unprepare(iommu->pclk);
842 	return 0;
843 }
844 
845 static struct platform_driver msm_iommu_driver = {
846 	.driver = {
847 		.name	= "msm_iommu",
848 		.of_match_table = msm_iommu_dt_match,
849 	},
850 	.probe		= msm_iommu_probe,
851 	.remove		= msm_iommu_remove,
852 };
853 
854 static int __init msm_iommu_driver_init(void)
855 {
856 	int ret;
857 
858 	ret = platform_driver_register(&msm_iommu_driver);
859 	if (ret != 0)
860 		pr_err("Failed to register IOMMU driver\n");
861 
862 	return ret;
863 }
864 
865 static void __exit msm_iommu_driver_exit(void)
866 {
867 	platform_driver_unregister(&msm_iommu_driver);
868 }
869 
870 subsys_initcall(msm_iommu_driver_init);
871 module_exit(msm_iommu_driver_exit);
872 
873 MODULE_LICENSE("GPL v2");
874 MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
875