xref: /openbmc/linux/drivers/iommu/tegra-smmu.c (revision d8bcaabe)
1 /*
2  * Copyright (C) 2011-2014 NVIDIA CORPORATION.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19 
20 #include <soc/tegra/ahb.h>
21 #include <soc/tegra/mc.h>
22 
23 struct tegra_smmu {
24 	void __iomem *regs;
25 	struct device *dev;
26 
27 	struct tegra_mc *mc;
28 	const struct tegra_smmu_soc *soc;
29 
30 	unsigned long pfn_mask;
31 	unsigned long tlb_mask;
32 
33 	unsigned long *asids;
34 	struct mutex lock;
35 
36 	struct list_head list;
37 
38 	struct dentry *debugfs;
39 
40 	struct iommu_device iommu;	/* IOMMU Core code handle */
41 };
42 
43 struct tegra_smmu_as {
44 	struct iommu_domain domain;
45 	struct tegra_smmu *smmu;
46 	unsigned int use_count;
47 	u32 *count;
48 	struct page **pts;
49 	struct page *pd;
50 	dma_addr_t pd_dma;
51 	unsigned id;
52 	u32 attr;
53 };
54 
55 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
56 {
57 	return container_of(dom, struct tegra_smmu_as, domain);
58 }
59 
60 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
61 			       unsigned long offset)
62 {
63 	writel(value, smmu->regs + offset);
64 }
65 
66 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
67 {
68 	return readl(smmu->regs + offset);
69 }
70 
71 #define SMMU_CONFIG 0x010
72 #define  SMMU_CONFIG_ENABLE (1 << 0)
73 
74 #define SMMU_TLB_CONFIG 0x14
75 #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
76 #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
77 #define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
78 	((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
79 
80 #define SMMU_PTC_CONFIG 0x18
81 #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
82 #define  SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
83 #define  SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
84 
85 #define SMMU_PTB_ASID 0x01c
86 #define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
87 
88 #define SMMU_PTB_DATA 0x020
89 #define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
90 
91 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
92 
93 #define SMMU_TLB_FLUSH 0x030
94 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
95 #define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
96 #define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
97 #define  SMMU_TLB_FLUSH_ASID(x)          (((x) & 0x7f) << 24)
98 #define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
99 					  SMMU_TLB_FLUSH_VA_MATCH_SECTION)
100 #define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
101 					  SMMU_TLB_FLUSH_VA_MATCH_GROUP)
102 #define  SMMU_TLB_FLUSH_ASID_MATCH       (1 << 31)
103 
104 #define SMMU_PTC_FLUSH 0x034
105 #define  SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
106 #define  SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
107 
108 #define SMMU_PTC_FLUSH_HI 0x9b8
109 #define  SMMU_PTC_FLUSH_HI_MASK 0x3
110 
111 /* per-SWGROUP SMMU_*_ASID register */
112 #define SMMU_ASID_ENABLE (1 << 31)
113 #define SMMU_ASID_MASK 0x7f
114 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
115 
116 /* page table definitions */
117 #define SMMU_NUM_PDE 1024
118 #define SMMU_NUM_PTE 1024
119 
120 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
121 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
122 
123 #define SMMU_PDE_SHIFT 22
124 #define SMMU_PTE_SHIFT 12
125 
126 #define SMMU_PD_READABLE	(1 << 31)
127 #define SMMU_PD_WRITABLE	(1 << 30)
128 #define SMMU_PD_NONSECURE	(1 << 29)
129 
130 #define SMMU_PDE_READABLE	(1 << 31)
131 #define SMMU_PDE_WRITABLE	(1 << 30)
132 #define SMMU_PDE_NONSECURE	(1 << 29)
133 #define SMMU_PDE_NEXT		(1 << 28)
134 
135 #define SMMU_PTE_READABLE	(1 << 31)
136 #define SMMU_PTE_WRITABLE	(1 << 30)
137 #define SMMU_PTE_NONSECURE	(1 << 29)
138 
139 #define SMMU_PDE_ATTR		(SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
140 				 SMMU_PDE_NONSECURE)
141 #define SMMU_PTE_ATTR		(SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
142 				 SMMU_PTE_NONSECURE)
143 
144 static unsigned int iova_pd_index(unsigned long iova)
145 {
146 	return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
147 }
148 
149 static unsigned int iova_pt_index(unsigned long iova)
150 {
151 	return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
152 }
153 
154 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
155 {
156 	addr >>= 12;
157 	return (addr & smmu->pfn_mask) == addr;
158 }
159 
160 static dma_addr_t smmu_pde_to_dma(u32 pde)
161 {
162 	return pde << 12;
163 }
164 
165 static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
166 {
167 	smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
168 }
169 
170 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
171 				  unsigned long offset)
172 {
173 	u32 value;
174 
175 	offset &= ~(smmu->mc->soc->atom_size - 1);
176 
177 	if (smmu->mc->soc->num_address_bits > 32) {
178 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
179 		value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
180 #else
181 		value = 0;
182 #endif
183 		smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
184 	}
185 
186 	value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
187 	smmu_writel(smmu, value, SMMU_PTC_FLUSH);
188 }
189 
190 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
191 {
192 	smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
193 }
194 
195 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
196 				       unsigned long asid)
197 {
198 	u32 value;
199 
200 	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
201 		SMMU_TLB_FLUSH_VA_MATCH_ALL;
202 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
203 }
204 
205 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
206 					  unsigned long asid,
207 					  unsigned long iova)
208 {
209 	u32 value;
210 
211 	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
212 		SMMU_TLB_FLUSH_VA_SECTION(iova);
213 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
214 }
215 
216 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
217 					unsigned long asid,
218 					unsigned long iova)
219 {
220 	u32 value;
221 
222 	value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
223 		SMMU_TLB_FLUSH_VA_GROUP(iova);
224 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
225 }
226 
227 static inline void smmu_flush(struct tegra_smmu *smmu)
228 {
229 	smmu_readl(smmu, SMMU_CONFIG);
230 }
231 
232 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
233 {
234 	unsigned long id;
235 
236 	mutex_lock(&smmu->lock);
237 
238 	id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
239 	if (id >= smmu->soc->num_asids) {
240 		mutex_unlock(&smmu->lock);
241 		return -ENOSPC;
242 	}
243 
244 	set_bit(id, smmu->asids);
245 	*idp = id;
246 
247 	mutex_unlock(&smmu->lock);
248 	return 0;
249 }
250 
251 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
252 {
253 	mutex_lock(&smmu->lock);
254 	clear_bit(id, smmu->asids);
255 	mutex_unlock(&smmu->lock);
256 }
257 
258 static bool tegra_smmu_capable(enum iommu_cap cap)
259 {
260 	return false;
261 }
262 
263 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
264 {
265 	struct tegra_smmu_as *as;
266 
267 	if (type != IOMMU_DOMAIN_UNMANAGED)
268 		return NULL;
269 
270 	as = kzalloc(sizeof(*as), GFP_KERNEL);
271 	if (!as)
272 		return NULL;
273 
274 	as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
275 
276 	as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
277 	if (!as->pd) {
278 		kfree(as);
279 		return NULL;
280 	}
281 
282 	as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
283 	if (!as->count) {
284 		__free_page(as->pd);
285 		kfree(as);
286 		return NULL;
287 	}
288 
289 	as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
290 	if (!as->pts) {
291 		kfree(as->count);
292 		__free_page(as->pd);
293 		kfree(as);
294 		return NULL;
295 	}
296 
297 	/* setup aperture */
298 	as->domain.geometry.aperture_start = 0;
299 	as->domain.geometry.aperture_end = 0xffffffff;
300 	as->domain.geometry.force_aperture = true;
301 
302 	return &as->domain;
303 }
304 
305 static void tegra_smmu_domain_free(struct iommu_domain *domain)
306 {
307 	struct tegra_smmu_as *as = to_smmu_as(domain);
308 
309 	/* TODO: free page directory and page tables */
310 
311 	kfree(as);
312 }
313 
314 static const struct tegra_smmu_swgroup *
315 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
316 {
317 	const struct tegra_smmu_swgroup *group = NULL;
318 	unsigned int i;
319 
320 	for (i = 0; i < smmu->soc->num_swgroups; i++) {
321 		if (smmu->soc->swgroups[i].swgroup == swgroup) {
322 			group = &smmu->soc->swgroups[i];
323 			break;
324 		}
325 	}
326 
327 	return group;
328 }
329 
330 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
331 			      unsigned int asid)
332 {
333 	const struct tegra_smmu_swgroup *group;
334 	unsigned int i;
335 	u32 value;
336 
337 	for (i = 0; i < smmu->soc->num_clients; i++) {
338 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
339 
340 		if (client->swgroup != swgroup)
341 			continue;
342 
343 		value = smmu_readl(smmu, client->smmu.reg);
344 		value |= BIT(client->smmu.bit);
345 		smmu_writel(smmu, value, client->smmu.reg);
346 	}
347 
348 	group = tegra_smmu_find_swgroup(smmu, swgroup);
349 	if (group) {
350 		value = smmu_readl(smmu, group->reg);
351 		value &= ~SMMU_ASID_MASK;
352 		value |= SMMU_ASID_VALUE(asid);
353 		value |= SMMU_ASID_ENABLE;
354 		smmu_writel(smmu, value, group->reg);
355 	}
356 }
357 
358 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
359 			       unsigned int asid)
360 {
361 	const struct tegra_smmu_swgroup *group;
362 	unsigned int i;
363 	u32 value;
364 
365 	group = tegra_smmu_find_swgroup(smmu, swgroup);
366 	if (group) {
367 		value = smmu_readl(smmu, group->reg);
368 		value &= ~SMMU_ASID_MASK;
369 		value |= SMMU_ASID_VALUE(asid);
370 		value &= ~SMMU_ASID_ENABLE;
371 		smmu_writel(smmu, value, group->reg);
372 	}
373 
374 	for (i = 0; i < smmu->soc->num_clients; i++) {
375 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
376 
377 		if (client->swgroup != swgroup)
378 			continue;
379 
380 		value = smmu_readl(smmu, client->smmu.reg);
381 		value &= ~BIT(client->smmu.bit);
382 		smmu_writel(smmu, value, client->smmu.reg);
383 	}
384 }
385 
386 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
387 				 struct tegra_smmu_as *as)
388 {
389 	u32 value;
390 	int err;
391 
392 	if (as->use_count > 0) {
393 		as->use_count++;
394 		return 0;
395 	}
396 
397 	as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
398 				  DMA_TO_DEVICE);
399 	if (dma_mapping_error(smmu->dev, as->pd_dma))
400 		return -ENOMEM;
401 
402 	/* We can't handle 64-bit DMA addresses */
403 	if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
404 		err = -ENOMEM;
405 		goto err_unmap;
406 	}
407 
408 	err = tegra_smmu_alloc_asid(smmu, &as->id);
409 	if (err < 0)
410 		goto err_unmap;
411 
412 	smmu_flush_ptc(smmu, as->pd_dma, 0);
413 	smmu_flush_tlb_asid(smmu, as->id);
414 
415 	smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
416 	value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
417 	smmu_writel(smmu, value, SMMU_PTB_DATA);
418 	smmu_flush(smmu);
419 
420 	as->smmu = smmu;
421 	as->use_count++;
422 
423 	return 0;
424 
425 err_unmap:
426 	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
427 	return err;
428 }
429 
430 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
431 				    struct tegra_smmu_as *as)
432 {
433 	if (--as->use_count > 0)
434 		return;
435 
436 	tegra_smmu_free_asid(smmu, as->id);
437 
438 	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
439 
440 	as->smmu = NULL;
441 }
442 
443 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
444 				 struct device *dev)
445 {
446 	struct tegra_smmu *smmu = dev->archdata.iommu;
447 	struct tegra_smmu_as *as = to_smmu_as(domain);
448 	struct device_node *np = dev->of_node;
449 	struct of_phandle_args args;
450 	unsigned int index = 0;
451 	int err = 0;
452 
453 	while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
454 					   &args)) {
455 		unsigned int swgroup = args.args[0];
456 
457 		if (args.np != smmu->dev->of_node) {
458 			of_node_put(args.np);
459 			continue;
460 		}
461 
462 		of_node_put(args.np);
463 
464 		err = tegra_smmu_as_prepare(smmu, as);
465 		if (err < 0)
466 			return err;
467 
468 		tegra_smmu_enable(smmu, swgroup, as->id);
469 		index++;
470 	}
471 
472 	if (index == 0)
473 		return -ENODEV;
474 
475 	return 0;
476 }
477 
478 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
479 {
480 	struct tegra_smmu_as *as = to_smmu_as(domain);
481 	struct device_node *np = dev->of_node;
482 	struct tegra_smmu *smmu = as->smmu;
483 	struct of_phandle_args args;
484 	unsigned int index = 0;
485 
486 	while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
487 					   &args)) {
488 		unsigned int swgroup = args.args[0];
489 
490 		if (args.np != smmu->dev->of_node) {
491 			of_node_put(args.np);
492 			continue;
493 		}
494 
495 		of_node_put(args.np);
496 
497 		tegra_smmu_disable(smmu, swgroup, as->id);
498 		tegra_smmu_as_unprepare(smmu, as);
499 		index++;
500 	}
501 }
502 
503 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
504 			       u32 value)
505 {
506 	unsigned int pd_index = iova_pd_index(iova);
507 	struct tegra_smmu *smmu = as->smmu;
508 	u32 *pd = page_address(as->pd);
509 	unsigned long offset = pd_index * sizeof(*pd);
510 
511 	/* Set the page directory entry first */
512 	pd[pd_index] = value;
513 
514 	/* The flush the page directory entry from caches */
515 	dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
516 					 sizeof(*pd), DMA_TO_DEVICE);
517 
518 	/* And flush the iommu */
519 	smmu_flush_ptc(smmu, as->pd_dma, offset);
520 	smmu_flush_tlb_section(smmu, as->id, iova);
521 	smmu_flush(smmu);
522 }
523 
524 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
525 {
526 	u32 *pt = page_address(pt_page);
527 
528 	return pt + iova_pt_index(iova);
529 }
530 
531 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
532 				  dma_addr_t *dmap)
533 {
534 	unsigned int pd_index = iova_pd_index(iova);
535 	struct page *pt_page;
536 	u32 *pd;
537 
538 	pt_page = as->pts[pd_index];
539 	if (!pt_page)
540 		return NULL;
541 
542 	pd = page_address(as->pd);
543 	*dmap = smmu_pde_to_dma(pd[pd_index]);
544 
545 	return tegra_smmu_pte_offset(pt_page, iova);
546 }
547 
548 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
549 		       dma_addr_t *dmap)
550 {
551 	unsigned int pde = iova_pd_index(iova);
552 	struct tegra_smmu *smmu = as->smmu;
553 
554 	if (!as->pts[pde]) {
555 		struct page *page;
556 		dma_addr_t dma;
557 
558 		page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
559 		if (!page)
560 			return NULL;
561 
562 		dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
563 				   DMA_TO_DEVICE);
564 		if (dma_mapping_error(smmu->dev, dma)) {
565 			__free_page(page);
566 			return NULL;
567 		}
568 
569 		if (!smmu_dma_addr_valid(smmu, dma)) {
570 			dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
571 				       DMA_TO_DEVICE);
572 			__free_page(page);
573 			return NULL;
574 		}
575 
576 		as->pts[pde] = page;
577 
578 		tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
579 							      SMMU_PDE_NEXT));
580 
581 		*dmap = dma;
582 	} else {
583 		u32 *pd = page_address(as->pd);
584 
585 		*dmap = smmu_pde_to_dma(pd[pde]);
586 	}
587 
588 	return tegra_smmu_pte_offset(as->pts[pde], iova);
589 }
590 
591 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
592 {
593 	unsigned int pd_index = iova_pd_index(iova);
594 
595 	as->count[pd_index]++;
596 }
597 
598 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
599 {
600 	unsigned int pde = iova_pd_index(iova);
601 	struct page *page = as->pts[pde];
602 
603 	/*
604 	 * When no entries in this page table are used anymore, return the
605 	 * memory page to the system.
606 	 */
607 	if (--as->count[pde] == 0) {
608 		struct tegra_smmu *smmu = as->smmu;
609 		u32 *pd = page_address(as->pd);
610 		dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
611 
612 		tegra_smmu_set_pde(as, iova, 0);
613 
614 		dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
615 		__free_page(page);
616 		as->pts[pde] = NULL;
617 	}
618 }
619 
620 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
621 			       u32 *pte, dma_addr_t pte_dma, u32 val)
622 {
623 	struct tegra_smmu *smmu = as->smmu;
624 	unsigned long offset = offset_in_page(pte);
625 
626 	*pte = val;
627 
628 	dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
629 					 4, DMA_TO_DEVICE);
630 	smmu_flush_ptc(smmu, pte_dma, offset);
631 	smmu_flush_tlb_group(smmu, as->id, iova);
632 	smmu_flush(smmu);
633 }
634 
635 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
636 			  phys_addr_t paddr, size_t size, int prot)
637 {
638 	struct tegra_smmu_as *as = to_smmu_as(domain);
639 	dma_addr_t pte_dma;
640 	u32 *pte;
641 
642 	pte = as_get_pte(as, iova, &pte_dma);
643 	if (!pte)
644 		return -ENOMEM;
645 
646 	/* If we aren't overwriting a pre-existing entry, increment use */
647 	if (*pte == 0)
648 		tegra_smmu_pte_get_use(as, iova);
649 
650 	tegra_smmu_set_pte(as, iova, pte, pte_dma,
651 			   __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
652 
653 	return 0;
654 }
655 
656 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
657 			       size_t size)
658 {
659 	struct tegra_smmu_as *as = to_smmu_as(domain);
660 	dma_addr_t pte_dma;
661 	u32 *pte;
662 
663 	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
664 	if (!pte || !*pte)
665 		return 0;
666 
667 	tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
668 	tegra_smmu_pte_put_use(as, iova);
669 
670 	return size;
671 }
672 
673 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
674 					   dma_addr_t iova)
675 {
676 	struct tegra_smmu_as *as = to_smmu_as(domain);
677 	unsigned long pfn;
678 	dma_addr_t pte_dma;
679 	u32 *pte;
680 
681 	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
682 	if (!pte || !*pte)
683 		return 0;
684 
685 	pfn = *pte & as->smmu->pfn_mask;
686 
687 	return PFN_PHYS(pfn);
688 }
689 
690 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
691 {
692 	struct platform_device *pdev;
693 	struct tegra_mc *mc;
694 
695 	pdev = of_find_device_by_node(np);
696 	if (!pdev)
697 		return NULL;
698 
699 	mc = platform_get_drvdata(pdev);
700 	if (!mc)
701 		return NULL;
702 
703 	return mc->smmu;
704 }
705 
706 static int tegra_smmu_add_device(struct device *dev)
707 {
708 	struct device_node *np = dev->of_node;
709 	struct iommu_group *group;
710 	struct of_phandle_args args;
711 	unsigned int index = 0;
712 
713 	while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
714 					  &args) == 0) {
715 		struct tegra_smmu *smmu;
716 
717 		smmu = tegra_smmu_find(args.np);
718 		if (smmu) {
719 			/*
720 			 * Only a single IOMMU master interface is currently
721 			 * supported by the Linux kernel, so abort after the
722 			 * first match.
723 			 */
724 			dev->archdata.iommu = smmu;
725 
726 			iommu_device_link(&smmu->iommu, dev);
727 
728 			break;
729 		}
730 
731 		index++;
732 	}
733 
734 	group = iommu_group_get_for_dev(dev);
735 	if (IS_ERR(group))
736 		return PTR_ERR(group);
737 
738 	iommu_group_put(group);
739 
740 	return 0;
741 }
742 
743 static void tegra_smmu_remove_device(struct device *dev)
744 {
745 	struct tegra_smmu *smmu = dev->archdata.iommu;
746 
747 	if (smmu)
748 		iommu_device_unlink(&smmu->iommu, dev);
749 
750 	dev->archdata.iommu = NULL;
751 	iommu_group_remove_device(dev);
752 }
753 
754 static const struct iommu_ops tegra_smmu_ops = {
755 	.capable = tegra_smmu_capable,
756 	.domain_alloc = tegra_smmu_domain_alloc,
757 	.domain_free = tegra_smmu_domain_free,
758 	.attach_dev = tegra_smmu_attach_dev,
759 	.detach_dev = tegra_smmu_detach_dev,
760 	.add_device = tegra_smmu_add_device,
761 	.remove_device = tegra_smmu_remove_device,
762 	.device_group = generic_device_group,
763 	.map = tegra_smmu_map,
764 	.unmap = tegra_smmu_unmap,
765 	.map_sg = default_iommu_map_sg,
766 	.iova_to_phys = tegra_smmu_iova_to_phys,
767 
768 	.pgsize_bitmap = SZ_4K,
769 };
770 
771 static void tegra_smmu_ahb_enable(void)
772 {
773 	static const struct of_device_id ahb_match[] = {
774 		{ .compatible = "nvidia,tegra30-ahb", },
775 		{ }
776 	};
777 	struct device_node *ahb;
778 
779 	ahb = of_find_matching_node(NULL, ahb_match);
780 	if (ahb) {
781 		tegra_ahb_enable_smmu(ahb);
782 		of_node_put(ahb);
783 	}
784 }
785 
786 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
787 {
788 	struct tegra_smmu *smmu = s->private;
789 	unsigned int i;
790 	u32 value;
791 
792 	seq_printf(s, "swgroup    enabled  ASID\n");
793 	seq_printf(s, "------------------------\n");
794 
795 	for (i = 0; i < smmu->soc->num_swgroups; i++) {
796 		const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
797 		const char *status;
798 		unsigned int asid;
799 
800 		value = smmu_readl(smmu, group->reg);
801 
802 		if (value & SMMU_ASID_ENABLE)
803 			status = "yes";
804 		else
805 			status = "no";
806 
807 		asid = value & SMMU_ASID_MASK;
808 
809 		seq_printf(s, "%-9s  %-7s  %#04x\n", group->name, status,
810 			   asid);
811 	}
812 
813 	return 0;
814 }
815 
816 static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
817 {
818 	return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
819 }
820 
821 static const struct file_operations tegra_smmu_swgroups_fops = {
822 	.open = tegra_smmu_swgroups_open,
823 	.read = seq_read,
824 	.llseek = seq_lseek,
825 	.release = single_release,
826 };
827 
828 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
829 {
830 	struct tegra_smmu *smmu = s->private;
831 	unsigned int i;
832 	u32 value;
833 
834 	seq_printf(s, "client       enabled\n");
835 	seq_printf(s, "--------------------\n");
836 
837 	for (i = 0; i < smmu->soc->num_clients; i++) {
838 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
839 		const char *status;
840 
841 		value = smmu_readl(smmu, client->smmu.reg);
842 
843 		if (value & BIT(client->smmu.bit))
844 			status = "yes";
845 		else
846 			status = "no";
847 
848 		seq_printf(s, "%-12s %s\n", client->name, status);
849 	}
850 
851 	return 0;
852 }
853 
854 static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
855 {
856 	return single_open(file, tegra_smmu_clients_show, inode->i_private);
857 }
858 
859 static const struct file_operations tegra_smmu_clients_fops = {
860 	.open = tegra_smmu_clients_open,
861 	.read = seq_read,
862 	.llseek = seq_lseek,
863 	.release = single_release,
864 };
865 
866 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
867 {
868 	smmu->debugfs = debugfs_create_dir("smmu", NULL);
869 	if (!smmu->debugfs)
870 		return;
871 
872 	debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
873 			    &tegra_smmu_swgroups_fops);
874 	debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
875 			    &tegra_smmu_clients_fops);
876 }
877 
878 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
879 {
880 	debugfs_remove_recursive(smmu->debugfs);
881 }
882 
883 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
884 				    const struct tegra_smmu_soc *soc,
885 				    struct tegra_mc *mc)
886 {
887 	struct tegra_smmu *smmu;
888 	size_t size;
889 	u32 value;
890 	int err;
891 
892 	/* This can happen on Tegra20 which doesn't have an SMMU */
893 	if (!soc)
894 		return NULL;
895 
896 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
897 	if (!smmu)
898 		return ERR_PTR(-ENOMEM);
899 
900 	/*
901 	 * This is a bit of a hack. Ideally we'd want to simply return this
902 	 * value. However the IOMMU registration process will attempt to add
903 	 * all devices to the IOMMU when bus_set_iommu() is called. In order
904 	 * not to rely on global variables to track the IOMMU instance, we
905 	 * set it here so that it can be looked up from the .add_device()
906 	 * callback via the IOMMU device's .drvdata field.
907 	 */
908 	mc->smmu = smmu;
909 
910 	size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
911 
912 	smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
913 	if (!smmu->asids)
914 		return ERR_PTR(-ENOMEM);
915 
916 	mutex_init(&smmu->lock);
917 
918 	smmu->regs = mc->regs;
919 	smmu->soc = soc;
920 	smmu->dev = dev;
921 	smmu->mc = mc;
922 
923 	smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
924 	dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
925 		mc->soc->num_address_bits, smmu->pfn_mask);
926 	smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
927 	dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
928 		smmu->tlb_mask);
929 
930 	value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
931 
932 	if (soc->supports_request_limit)
933 		value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
934 
935 	smmu_writel(smmu, value, SMMU_PTC_CONFIG);
936 
937 	value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
938 		SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
939 
940 	if (soc->supports_round_robin_arbitration)
941 		value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
942 
943 	smmu_writel(smmu, value, SMMU_TLB_CONFIG);
944 
945 	smmu_flush_ptc_all(smmu);
946 	smmu_flush_tlb(smmu);
947 	smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
948 	smmu_flush(smmu);
949 
950 	tegra_smmu_ahb_enable();
951 
952 	err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
953 	if (err)
954 		return ERR_PTR(err);
955 
956 	iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
957 
958 	err = iommu_device_register(&smmu->iommu);
959 	if (err) {
960 		iommu_device_sysfs_remove(&smmu->iommu);
961 		return ERR_PTR(err);
962 	}
963 
964 	err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
965 	if (err < 0) {
966 		iommu_device_unregister(&smmu->iommu);
967 		iommu_device_sysfs_remove(&smmu->iommu);
968 		return ERR_PTR(err);
969 	}
970 
971 	if (IS_ENABLED(CONFIG_DEBUG_FS))
972 		tegra_smmu_debugfs_init(smmu);
973 
974 	return smmu;
975 }
976 
977 void tegra_smmu_remove(struct tegra_smmu *smmu)
978 {
979 	iommu_device_unregister(&smmu->iommu);
980 	iommu_device_sysfs_remove(&smmu->iommu);
981 
982 	if (IS_ENABLED(CONFIG_DEBUG_FS))
983 		tegra_smmu_debugfs_exit(smmu);
984 }
985