xref: /openbmc/linux/drivers/iommu/tegra-smmu.c (revision e2f1cf25)
1 /*
2  * Copyright (C) 2011-2014 NVIDIA CORPORATION.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 
19 #include <soc/tegra/ahb.h>
20 #include <soc/tegra/mc.h>
21 
22 struct tegra_smmu {
23 	void __iomem *regs;
24 	struct device *dev;
25 
26 	struct tegra_mc *mc;
27 	const struct tegra_smmu_soc *soc;
28 
29 	unsigned long pfn_mask;
30 
31 	unsigned long *asids;
32 	struct mutex lock;
33 
34 	struct list_head list;
35 
36 	struct dentry *debugfs;
37 };
38 
39 struct tegra_smmu_as {
40 	struct iommu_domain domain;
41 	struct tegra_smmu *smmu;
42 	unsigned int use_count;
43 	struct page *count;
44 	struct page *pd;
45 	unsigned id;
46 	u32 attr;
47 };
48 
49 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
50 {
51 	return container_of(dom, struct tegra_smmu_as, domain);
52 }
53 
54 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
55 			       unsigned long offset)
56 {
57 	writel(value, smmu->regs + offset);
58 }
59 
60 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
61 {
62 	return readl(smmu->regs + offset);
63 }
64 
65 #define SMMU_CONFIG 0x010
66 #define  SMMU_CONFIG_ENABLE (1 << 0)
67 
68 #define SMMU_TLB_CONFIG 0x14
69 #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
70 #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
71 #define  SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
72 
73 #define SMMU_PTC_CONFIG 0x18
74 #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
75 #define  SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
76 #define  SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
77 
78 #define SMMU_PTB_ASID 0x01c
79 #define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
80 
81 #define SMMU_PTB_DATA 0x020
82 #define  SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
83 
84 #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
85 
86 #define SMMU_TLB_FLUSH 0x030
87 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
88 #define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
89 #define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
90 #define  SMMU_TLB_FLUSH_ASID(x)          (((x) & 0x7f) << 24)
91 #define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
92 					  SMMU_TLB_FLUSH_VA_MATCH_SECTION)
93 #define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
94 					  SMMU_TLB_FLUSH_VA_MATCH_GROUP)
95 #define  SMMU_TLB_FLUSH_ASID_MATCH       (1 << 31)
96 
97 #define SMMU_PTC_FLUSH 0x034
98 #define  SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
99 #define  SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
100 
101 #define SMMU_PTC_FLUSH_HI 0x9b8
102 #define  SMMU_PTC_FLUSH_HI_MASK 0x3
103 
104 /* per-SWGROUP SMMU_*_ASID register */
105 #define SMMU_ASID_ENABLE (1 << 31)
106 #define SMMU_ASID_MASK 0x7f
107 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
108 
109 /* page table definitions */
110 #define SMMU_NUM_PDE 1024
111 #define SMMU_NUM_PTE 1024
112 
113 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
114 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
115 
116 #define SMMU_PDE_SHIFT 22
117 #define SMMU_PTE_SHIFT 12
118 
119 #define SMMU_PD_READABLE	(1 << 31)
120 #define SMMU_PD_WRITABLE	(1 << 30)
121 #define SMMU_PD_NONSECURE	(1 << 29)
122 
123 #define SMMU_PDE_READABLE	(1 << 31)
124 #define SMMU_PDE_WRITABLE	(1 << 30)
125 #define SMMU_PDE_NONSECURE	(1 << 29)
126 #define SMMU_PDE_NEXT		(1 << 28)
127 
128 #define SMMU_PTE_READABLE	(1 << 31)
129 #define SMMU_PTE_WRITABLE	(1 << 30)
130 #define SMMU_PTE_NONSECURE	(1 << 29)
131 
132 #define SMMU_PDE_ATTR		(SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
133 				 SMMU_PDE_NONSECURE)
134 #define SMMU_PTE_ATTR		(SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
135 				 SMMU_PTE_NONSECURE)
136 
137 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
138 				  unsigned long offset)
139 {
140 	phys_addr_t phys = page ? page_to_phys(page) : 0;
141 	u32 value;
142 
143 	if (page) {
144 		offset &= ~(smmu->mc->soc->atom_size - 1);
145 
146 		if (smmu->mc->soc->num_address_bits > 32) {
147 #ifdef CONFIG_PHYS_ADDR_T_64BIT
148 			value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
149 #else
150 			value = 0;
151 #endif
152 			smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
153 		}
154 
155 		value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
156 	} else {
157 		value = SMMU_PTC_FLUSH_TYPE_ALL;
158 	}
159 
160 	smmu_writel(smmu, value, SMMU_PTC_FLUSH);
161 }
162 
163 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
164 {
165 	smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
166 }
167 
168 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
169 				       unsigned long asid)
170 {
171 	u32 value;
172 
173 	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
174 		SMMU_TLB_FLUSH_VA_MATCH_ALL;
175 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
176 }
177 
178 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
179 					  unsigned long asid,
180 					  unsigned long iova)
181 {
182 	u32 value;
183 
184 	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
185 		SMMU_TLB_FLUSH_VA_SECTION(iova);
186 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
187 }
188 
189 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
190 					unsigned long asid,
191 					unsigned long iova)
192 {
193 	u32 value;
194 
195 	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
196 		SMMU_TLB_FLUSH_VA_GROUP(iova);
197 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
198 }
199 
200 static inline void smmu_flush(struct tegra_smmu *smmu)
201 {
202 	smmu_readl(smmu, SMMU_CONFIG);
203 }
204 
205 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
206 {
207 	unsigned long id;
208 
209 	mutex_lock(&smmu->lock);
210 
211 	id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
212 	if (id >= smmu->soc->num_asids) {
213 		mutex_unlock(&smmu->lock);
214 		return -ENOSPC;
215 	}
216 
217 	set_bit(id, smmu->asids);
218 	*idp = id;
219 
220 	mutex_unlock(&smmu->lock);
221 	return 0;
222 }
223 
224 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
225 {
226 	mutex_lock(&smmu->lock);
227 	clear_bit(id, smmu->asids);
228 	mutex_unlock(&smmu->lock);
229 }
230 
231 static bool tegra_smmu_capable(enum iommu_cap cap)
232 {
233 	return false;
234 }
235 
236 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
237 {
238 	struct tegra_smmu_as *as;
239 	unsigned int i;
240 	uint32_t *pd;
241 
242 	if (type != IOMMU_DOMAIN_UNMANAGED)
243 		return NULL;
244 
245 	as = kzalloc(sizeof(*as), GFP_KERNEL);
246 	if (!as)
247 		return NULL;
248 
249 	as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
250 
251 	as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
252 	if (!as->pd) {
253 		kfree(as);
254 		return NULL;
255 	}
256 
257 	as->count = alloc_page(GFP_KERNEL);
258 	if (!as->count) {
259 		__free_page(as->pd);
260 		kfree(as);
261 		return NULL;
262 	}
263 
264 	/* clear PDEs */
265 	pd = page_address(as->pd);
266 	SetPageReserved(as->pd);
267 
268 	for (i = 0; i < SMMU_NUM_PDE; i++)
269 		pd[i] = 0;
270 
271 	/* clear PDE usage counters */
272 	pd = page_address(as->count);
273 	SetPageReserved(as->count);
274 
275 	for (i = 0; i < SMMU_NUM_PDE; i++)
276 		pd[i] = 0;
277 
278 	/* setup aperture */
279 	as->domain.geometry.aperture_start = 0;
280 	as->domain.geometry.aperture_end = 0xffffffff;
281 	as->domain.geometry.force_aperture = true;
282 
283 	return &as->domain;
284 }
285 
286 static void tegra_smmu_domain_free(struct iommu_domain *domain)
287 {
288 	struct tegra_smmu_as *as = to_smmu_as(domain);
289 
290 	/* TODO: free page directory and page tables */
291 	ClearPageReserved(as->pd);
292 
293 	kfree(as);
294 }
295 
296 static const struct tegra_smmu_swgroup *
297 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
298 {
299 	const struct tegra_smmu_swgroup *group = NULL;
300 	unsigned int i;
301 
302 	for (i = 0; i < smmu->soc->num_swgroups; i++) {
303 		if (smmu->soc->swgroups[i].swgroup == swgroup) {
304 			group = &smmu->soc->swgroups[i];
305 			break;
306 		}
307 	}
308 
309 	return group;
310 }
311 
312 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
313 			      unsigned int asid)
314 {
315 	const struct tegra_smmu_swgroup *group;
316 	unsigned int i;
317 	u32 value;
318 
319 	for (i = 0; i < smmu->soc->num_clients; i++) {
320 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
321 
322 		if (client->swgroup != swgroup)
323 			continue;
324 
325 		value = smmu_readl(smmu, client->smmu.reg);
326 		value |= BIT(client->smmu.bit);
327 		smmu_writel(smmu, value, client->smmu.reg);
328 	}
329 
330 	group = tegra_smmu_find_swgroup(smmu, swgroup);
331 	if (group) {
332 		value = smmu_readl(smmu, group->reg);
333 		value &= ~SMMU_ASID_MASK;
334 		value |= SMMU_ASID_VALUE(asid);
335 		value |= SMMU_ASID_ENABLE;
336 		smmu_writel(smmu, value, group->reg);
337 	}
338 }
339 
340 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
341 			       unsigned int asid)
342 {
343 	const struct tegra_smmu_swgroup *group;
344 	unsigned int i;
345 	u32 value;
346 
347 	group = tegra_smmu_find_swgroup(smmu, swgroup);
348 	if (group) {
349 		value = smmu_readl(smmu, group->reg);
350 		value &= ~SMMU_ASID_MASK;
351 		value |= SMMU_ASID_VALUE(asid);
352 		value &= ~SMMU_ASID_ENABLE;
353 		smmu_writel(smmu, value, group->reg);
354 	}
355 
356 	for (i = 0; i < smmu->soc->num_clients; i++) {
357 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
358 
359 		if (client->swgroup != swgroup)
360 			continue;
361 
362 		value = smmu_readl(smmu, client->smmu.reg);
363 		value &= ~BIT(client->smmu.bit);
364 		smmu_writel(smmu, value, client->smmu.reg);
365 	}
366 }
367 
368 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
369 				 struct tegra_smmu_as *as)
370 {
371 	u32 value;
372 	int err;
373 
374 	if (as->use_count > 0) {
375 		as->use_count++;
376 		return 0;
377 	}
378 
379 	err = tegra_smmu_alloc_asid(smmu, &as->id);
380 	if (err < 0)
381 		return err;
382 
383 	smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
384 	smmu_flush_ptc(smmu, as->pd, 0);
385 	smmu_flush_tlb_asid(smmu, as->id);
386 
387 	smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
388 	value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
389 	smmu_writel(smmu, value, SMMU_PTB_DATA);
390 	smmu_flush(smmu);
391 
392 	as->smmu = smmu;
393 	as->use_count++;
394 
395 	return 0;
396 }
397 
398 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
399 				    struct tegra_smmu_as *as)
400 {
401 	if (--as->use_count > 0)
402 		return;
403 
404 	tegra_smmu_free_asid(smmu, as->id);
405 	as->smmu = NULL;
406 }
407 
408 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
409 				 struct device *dev)
410 {
411 	struct tegra_smmu *smmu = dev->archdata.iommu;
412 	struct tegra_smmu_as *as = to_smmu_as(domain);
413 	struct device_node *np = dev->of_node;
414 	struct of_phandle_args args;
415 	unsigned int index = 0;
416 	int err = 0;
417 
418 	while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
419 					   &args)) {
420 		unsigned int swgroup = args.args[0];
421 
422 		if (args.np != smmu->dev->of_node) {
423 			of_node_put(args.np);
424 			continue;
425 		}
426 
427 		of_node_put(args.np);
428 
429 		err = tegra_smmu_as_prepare(smmu, as);
430 		if (err < 0)
431 			return err;
432 
433 		tegra_smmu_enable(smmu, swgroup, as->id);
434 		index++;
435 	}
436 
437 	if (index == 0)
438 		return -ENODEV;
439 
440 	return 0;
441 }
442 
443 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
444 {
445 	struct tegra_smmu_as *as = to_smmu_as(domain);
446 	struct device_node *np = dev->of_node;
447 	struct tegra_smmu *smmu = as->smmu;
448 	struct of_phandle_args args;
449 	unsigned int index = 0;
450 
451 	while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
452 					   &args)) {
453 		unsigned int swgroup = args.args[0];
454 
455 		if (args.np != smmu->dev->of_node) {
456 			of_node_put(args.np);
457 			continue;
458 		}
459 
460 		of_node_put(args.np);
461 
462 		tegra_smmu_disable(smmu, swgroup, as->id);
463 		tegra_smmu_as_unprepare(smmu, as);
464 		index++;
465 	}
466 }
467 
468 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
469 		       struct page **pagep)
470 {
471 	u32 *pd = page_address(as->pd), *pt, *count;
472 	u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
473 	u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
474 	struct tegra_smmu *smmu = as->smmu;
475 	struct page *page;
476 	unsigned int i;
477 
478 	if (pd[pde] == 0) {
479 		page = alloc_page(GFP_KERNEL | __GFP_DMA);
480 		if (!page)
481 			return NULL;
482 
483 		pt = page_address(page);
484 		SetPageReserved(page);
485 
486 		for (i = 0; i < SMMU_NUM_PTE; i++)
487 			pt[i] = 0;
488 
489 		smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
490 
491 		pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
492 
493 		smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
494 		smmu_flush_ptc(smmu, as->pd, pde << 2);
495 		smmu_flush_tlb_section(smmu, as->id, iova);
496 		smmu_flush(smmu);
497 	} else {
498 		page = pfn_to_page(pd[pde] & smmu->pfn_mask);
499 		pt = page_address(page);
500 	}
501 
502 	*pagep = page;
503 
504 	/* Keep track of entries in this page table. */
505 	count = page_address(as->count);
506 	if (pt[pte] == 0)
507 		count[pde]++;
508 
509 	return &pt[pte];
510 }
511 
512 static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
513 {
514 	u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
515 	u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
516 	u32 *count = page_address(as->count);
517 	u32 *pd = page_address(as->pd), *pt;
518 	struct page *page;
519 
520 	page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
521 	pt = page_address(page);
522 
523 	/*
524 	 * When no entries in this page table are used anymore, return the
525 	 * memory page to the system.
526 	 */
527 	if (pt[pte] != 0) {
528 		if (--count[pde] == 0) {
529 			ClearPageReserved(page);
530 			__free_page(page);
531 			pd[pde] = 0;
532 		}
533 
534 		pt[pte] = 0;
535 	}
536 }
537 
538 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
539 			  phys_addr_t paddr, size_t size, int prot)
540 {
541 	struct tegra_smmu_as *as = to_smmu_as(domain);
542 	struct tegra_smmu *smmu = as->smmu;
543 	unsigned long offset;
544 	struct page *page;
545 	u32 *pte;
546 
547 	pte = as_get_pte(as, iova, &page);
548 	if (!pte)
549 		return -ENOMEM;
550 
551 	*pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
552 	offset = offset_in_page(pte);
553 
554 	smmu->soc->ops->flush_dcache(page, offset, 4);
555 	smmu_flush_ptc(smmu, page, offset);
556 	smmu_flush_tlb_group(smmu, as->id, iova);
557 	smmu_flush(smmu);
558 
559 	return 0;
560 }
561 
562 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
563 			       size_t size)
564 {
565 	struct tegra_smmu_as *as = to_smmu_as(domain);
566 	struct tegra_smmu *smmu = as->smmu;
567 	unsigned long offset;
568 	struct page *page;
569 	u32 *pte;
570 
571 	pte = as_get_pte(as, iova, &page);
572 	if (!pte)
573 		return 0;
574 
575 	offset = offset_in_page(pte);
576 	as_put_pte(as, iova);
577 
578 	smmu->soc->ops->flush_dcache(page, offset, 4);
579 	smmu_flush_ptc(smmu, page, offset);
580 	smmu_flush_tlb_group(smmu, as->id, iova);
581 	smmu_flush(smmu);
582 
583 	return size;
584 }
585 
586 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
587 					   dma_addr_t iova)
588 {
589 	struct tegra_smmu_as *as = to_smmu_as(domain);
590 	struct page *page;
591 	unsigned long pfn;
592 	u32 *pte;
593 
594 	pte = as_get_pte(as, iova, &page);
595 	pfn = *pte & as->smmu->pfn_mask;
596 
597 	return PFN_PHYS(pfn);
598 }
599 
600 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
601 {
602 	struct platform_device *pdev;
603 	struct tegra_mc *mc;
604 
605 	pdev = of_find_device_by_node(np);
606 	if (!pdev)
607 		return NULL;
608 
609 	mc = platform_get_drvdata(pdev);
610 	if (!mc)
611 		return NULL;
612 
613 	return mc->smmu;
614 }
615 
616 static int tegra_smmu_add_device(struct device *dev)
617 {
618 	struct device_node *np = dev->of_node;
619 	struct of_phandle_args args;
620 	unsigned int index = 0;
621 
622 	while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
623 					  &args) == 0) {
624 		struct tegra_smmu *smmu;
625 
626 		smmu = tegra_smmu_find(args.np);
627 		if (smmu) {
628 			/*
629 			 * Only a single IOMMU master interface is currently
630 			 * supported by the Linux kernel, so abort after the
631 			 * first match.
632 			 */
633 			dev->archdata.iommu = smmu;
634 			break;
635 		}
636 
637 		index++;
638 	}
639 
640 	return 0;
641 }
642 
643 static void tegra_smmu_remove_device(struct device *dev)
644 {
645 	dev->archdata.iommu = NULL;
646 }
647 
648 static const struct iommu_ops tegra_smmu_ops = {
649 	.capable = tegra_smmu_capable,
650 	.domain_alloc = tegra_smmu_domain_alloc,
651 	.domain_free = tegra_smmu_domain_free,
652 	.attach_dev = tegra_smmu_attach_dev,
653 	.detach_dev = tegra_smmu_detach_dev,
654 	.add_device = tegra_smmu_add_device,
655 	.remove_device = tegra_smmu_remove_device,
656 	.map = tegra_smmu_map,
657 	.unmap = tegra_smmu_unmap,
658 	.map_sg = default_iommu_map_sg,
659 	.iova_to_phys = tegra_smmu_iova_to_phys,
660 
661 	.pgsize_bitmap = SZ_4K,
662 };
663 
664 static void tegra_smmu_ahb_enable(void)
665 {
666 	static const struct of_device_id ahb_match[] = {
667 		{ .compatible = "nvidia,tegra30-ahb", },
668 		{ }
669 	};
670 	struct device_node *ahb;
671 
672 	ahb = of_find_matching_node(NULL, ahb_match);
673 	if (ahb) {
674 		tegra_ahb_enable_smmu(ahb);
675 		of_node_put(ahb);
676 	}
677 }
678 
679 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
680 {
681 	struct tegra_smmu *smmu = s->private;
682 	unsigned int i;
683 	u32 value;
684 
685 	seq_printf(s, "swgroup    enabled  ASID\n");
686 	seq_printf(s, "------------------------\n");
687 
688 	for (i = 0; i < smmu->soc->num_swgroups; i++) {
689 		const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
690 		const char *status;
691 		unsigned int asid;
692 
693 		value = smmu_readl(smmu, group->reg);
694 
695 		if (value & SMMU_ASID_ENABLE)
696 			status = "yes";
697 		else
698 			status = "no";
699 
700 		asid = value & SMMU_ASID_MASK;
701 
702 		seq_printf(s, "%-9s  %-7s  %#04x\n", group->name, status,
703 			   asid);
704 	}
705 
706 	return 0;
707 }
708 
709 static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
710 {
711 	return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
712 }
713 
714 static const struct file_operations tegra_smmu_swgroups_fops = {
715 	.open = tegra_smmu_swgroups_open,
716 	.read = seq_read,
717 	.llseek = seq_lseek,
718 	.release = single_release,
719 };
720 
721 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
722 {
723 	struct tegra_smmu *smmu = s->private;
724 	unsigned int i;
725 	u32 value;
726 
727 	seq_printf(s, "client       enabled\n");
728 	seq_printf(s, "--------------------\n");
729 
730 	for (i = 0; i < smmu->soc->num_clients; i++) {
731 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
732 		const char *status;
733 
734 		value = smmu_readl(smmu, client->smmu.reg);
735 
736 		if (value & BIT(client->smmu.bit))
737 			status = "yes";
738 		else
739 			status = "no";
740 
741 		seq_printf(s, "%-12s %s\n", client->name, status);
742 	}
743 
744 	return 0;
745 }
746 
747 static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
748 {
749 	return single_open(file, tegra_smmu_clients_show, inode->i_private);
750 }
751 
752 static const struct file_operations tegra_smmu_clients_fops = {
753 	.open = tegra_smmu_clients_open,
754 	.read = seq_read,
755 	.llseek = seq_lseek,
756 	.release = single_release,
757 };
758 
759 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
760 {
761 	smmu->debugfs = debugfs_create_dir("smmu", NULL);
762 	if (!smmu->debugfs)
763 		return;
764 
765 	debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
766 			    &tegra_smmu_swgroups_fops);
767 	debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
768 			    &tegra_smmu_clients_fops);
769 }
770 
771 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
772 {
773 	debugfs_remove_recursive(smmu->debugfs);
774 }
775 
776 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
777 				    const struct tegra_smmu_soc *soc,
778 				    struct tegra_mc *mc)
779 {
780 	struct tegra_smmu *smmu;
781 	size_t size;
782 	u32 value;
783 	int err;
784 
785 	/* This can happen on Tegra20 which doesn't have an SMMU */
786 	if (!soc)
787 		return NULL;
788 
789 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
790 	if (!smmu)
791 		return ERR_PTR(-ENOMEM);
792 
793 	/*
794 	 * This is a bit of a hack. Ideally we'd want to simply return this
795 	 * value. However the IOMMU registration process will attempt to add
796 	 * all devices to the IOMMU when bus_set_iommu() is called. In order
797 	 * not to rely on global variables to track the IOMMU instance, we
798 	 * set it here so that it can be looked up from the .add_device()
799 	 * callback via the IOMMU device's .drvdata field.
800 	 */
801 	mc->smmu = smmu;
802 
803 	size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
804 
805 	smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
806 	if (!smmu->asids)
807 		return ERR_PTR(-ENOMEM);
808 
809 	mutex_init(&smmu->lock);
810 
811 	smmu->regs = mc->regs;
812 	smmu->soc = soc;
813 	smmu->dev = dev;
814 	smmu->mc = mc;
815 
816 	smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
817 	dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
818 		mc->soc->num_address_bits, smmu->pfn_mask);
819 
820 	value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
821 
822 	if (soc->supports_request_limit)
823 		value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
824 
825 	smmu_writel(smmu, value, SMMU_PTC_CONFIG);
826 
827 	value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
828 		SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
829 
830 	if (soc->supports_round_robin_arbitration)
831 		value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
832 
833 	smmu_writel(smmu, value, SMMU_TLB_CONFIG);
834 
835 	smmu_flush_ptc(smmu, NULL, 0);
836 	smmu_flush_tlb(smmu);
837 	smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
838 	smmu_flush(smmu);
839 
840 	tegra_smmu_ahb_enable();
841 
842 	err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
843 	if (err < 0)
844 		return ERR_PTR(err);
845 
846 	if (IS_ENABLED(CONFIG_DEBUG_FS))
847 		tegra_smmu_debugfs_init(smmu);
848 
849 	return smmu;
850 }
851 
852 void tegra_smmu_remove(struct tegra_smmu *smmu)
853 {
854 	if (IS_ENABLED(CONFIG_DEBUG_FS))
855 		tegra_smmu_debugfs_exit(smmu);
856 }
857