xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision e0bf6c5c)
1 /* linux/drivers/iommu/exynos_iommu.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  *		http://www.samsung.com
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12 #define DEBUG
13 #endif
14 
15 #include <linux/io.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/clk.h>
21 #include <linux/err.h>
22 #include <linux/mm.h>
23 #include <linux/iommu.h>
24 #include <linux/errno.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/export.h>
28 
29 #include <asm/cacheflush.h>
30 #include <asm/pgtable.h>
31 
32 typedef u32 sysmmu_iova_t;
33 typedef u32 sysmmu_pte_t;
34 
35 /* We do not consider super section mapping (16MB) */
36 #define SECT_ORDER 20
37 #define LPAGE_ORDER 16
38 #define SPAGE_ORDER 12
39 
40 #define SECT_SIZE (1 << SECT_ORDER)
41 #define LPAGE_SIZE (1 << LPAGE_ORDER)
42 #define SPAGE_SIZE (1 << SPAGE_ORDER)
43 
44 #define SECT_MASK (~(SECT_SIZE - 1))
45 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
46 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
47 
48 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
49 			   ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
50 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
51 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
52 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
53 			  ((*(sent) & 3) == 1))
54 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
55 
56 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
57 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
58 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
59 
60 static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
61 {
62 	return iova & (size - 1);
63 }
64 
65 #define section_phys(sent) (*(sent) & SECT_MASK)
66 #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
67 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
68 #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
69 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
70 #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
71 
72 #define NUM_LV1ENTRIES 4096
73 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
74 
75 static u32 lv1ent_offset(sysmmu_iova_t iova)
76 {
77 	return iova >> SECT_ORDER;
78 }
79 
80 static u32 lv2ent_offset(sysmmu_iova_t iova)
81 {
82 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
83 }
84 
85 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
86 
87 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
88 
89 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
90 
91 #define mk_lv1ent_sect(pa) ((pa) | 2)
92 #define mk_lv1ent_page(pa) ((pa) | 1)
93 #define mk_lv2ent_lpage(pa) ((pa) | 1)
94 #define mk_lv2ent_spage(pa) ((pa) | 2)
95 
96 #define CTRL_ENABLE	0x5
97 #define CTRL_BLOCK	0x7
98 #define CTRL_DISABLE	0x0
99 
100 #define CFG_LRU		0x1
101 #define CFG_QOS(n)	((n & 0xF) << 7)
102 #define CFG_MASK	0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
103 #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
104 #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
105 #define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
106 
107 #define REG_MMU_CTRL		0x000
108 #define REG_MMU_CFG		0x004
109 #define REG_MMU_STATUS		0x008
110 #define REG_MMU_FLUSH		0x00C
111 #define REG_MMU_FLUSH_ENTRY	0x010
112 #define REG_PT_BASE_ADDR	0x014
113 #define REG_INT_STATUS		0x018
114 #define REG_INT_CLEAR		0x01C
115 
116 #define REG_PAGE_FAULT_ADDR	0x024
117 #define REG_AW_FAULT_ADDR	0x028
118 #define REG_AR_FAULT_ADDR	0x02C
119 #define REG_DEFAULT_SLAVE_ADDR	0x030
120 
121 #define REG_MMU_VERSION		0x034
122 
123 #define MMU_MAJ_VER(val)	((val) >> 7)
124 #define MMU_MIN_VER(val)	((val) & 0x7F)
125 #define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
126 
127 #define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
128 
129 #define REG_PB0_SADDR		0x04C
130 #define REG_PB0_EADDR		0x050
131 #define REG_PB1_SADDR		0x054
132 #define REG_PB1_EADDR		0x058
133 
134 #define has_sysmmu(dev)		(dev->archdata.iommu != NULL)
135 
136 static struct kmem_cache *lv2table_kmem_cache;
137 static sysmmu_pte_t *zero_lv2_table;
138 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
139 
140 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
141 {
142 	return pgtable + lv1ent_offset(iova);
143 }
144 
145 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
146 {
147 	return (sysmmu_pte_t *)phys_to_virt(
148 				lv2table_base(sent)) + lv2ent_offset(iova);
149 }
150 
151 enum exynos_sysmmu_inttype {
152 	SYSMMU_PAGEFAULT,
153 	SYSMMU_AR_MULTIHIT,
154 	SYSMMU_AW_MULTIHIT,
155 	SYSMMU_BUSERROR,
156 	SYSMMU_AR_SECURITY,
157 	SYSMMU_AR_ACCESS,
158 	SYSMMU_AW_SECURITY,
159 	SYSMMU_AW_PROTECTION, /* 7 */
160 	SYSMMU_FAULT_UNKNOWN,
161 	SYSMMU_FAULTS_NUM
162 };
163 
164 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
165 	REG_PAGE_FAULT_ADDR,
166 	REG_AR_FAULT_ADDR,
167 	REG_AW_FAULT_ADDR,
168 	REG_DEFAULT_SLAVE_ADDR,
169 	REG_AR_FAULT_ADDR,
170 	REG_AR_FAULT_ADDR,
171 	REG_AW_FAULT_ADDR,
172 	REG_AW_FAULT_ADDR
173 };
174 
175 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
176 	"PAGE FAULT",
177 	"AR MULTI-HIT FAULT",
178 	"AW MULTI-HIT FAULT",
179 	"BUS ERROR",
180 	"AR SECURITY PROTECTION FAULT",
181 	"AR ACCESS PROTECTION FAULT",
182 	"AW SECURITY PROTECTION FAULT",
183 	"AW ACCESS PROTECTION FAULT",
184 	"UNKNOWN FAULT"
185 };
186 
187 /* attached to dev.archdata.iommu of the master device */
188 struct exynos_iommu_owner {
189 	struct list_head client; /* entry of exynos_iommu_domain.clients */
190 	struct device *dev;
191 	struct device *sysmmu;
192 	struct iommu_domain *domain;
193 	void *vmm_data;         /* IO virtual memory manager's data */
194 	spinlock_t lock;        /* Lock to preserve consistency of System MMU */
195 };
196 
197 struct exynos_iommu_domain {
198 	struct list_head clients; /* list of sysmmu_drvdata.node */
199 	sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
200 	short *lv2entcnt; /* free lv2 entry counter for each section */
201 	spinlock_t lock; /* lock for this structure */
202 	spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
203 };
204 
205 struct sysmmu_drvdata {
206 	struct device *sysmmu;	/* System MMU's device descriptor */
207 	struct device *master;	/* Owner of system MMU */
208 	void __iomem *sfrbase;
209 	struct clk *clk;
210 	struct clk *clk_master;
211 	int activations;
212 	spinlock_t lock;
213 	struct iommu_domain *domain;
214 	phys_addr_t pgtable;
215 };
216 
217 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
218 {
219 	/* return true if the System MMU was not active previously
220 	   and it needs to be initialized */
221 	return ++data->activations == 1;
222 }
223 
224 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
225 {
226 	/* return true if the System MMU is needed to be disabled */
227 	BUG_ON(data->activations < 1);
228 	return --data->activations == 0;
229 }
230 
231 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
232 {
233 	return data->activations > 0;
234 }
235 
236 static void sysmmu_unblock(void __iomem *sfrbase)
237 {
238 	__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
239 }
240 
241 static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
242 {
243 	return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
244 }
245 
246 static bool sysmmu_block(void __iomem *sfrbase)
247 {
248 	int i = 120;
249 
250 	__raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
251 	while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
252 		--i;
253 
254 	if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
255 		sysmmu_unblock(sfrbase);
256 		return false;
257 	}
258 
259 	return true;
260 }
261 
262 static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
263 {
264 	__raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
265 }
266 
267 static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
268 				sysmmu_iova_t iova, unsigned int num_inv)
269 {
270 	unsigned int i;
271 
272 	for (i = 0; i < num_inv; i++) {
273 		__raw_writel((iova & SPAGE_MASK) | 1,
274 				sfrbase + REG_MMU_FLUSH_ENTRY);
275 		iova += SPAGE_SIZE;
276 	}
277 }
278 
279 static void __sysmmu_set_ptbase(void __iomem *sfrbase,
280 				       phys_addr_t pgd)
281 {
282 	__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
283 
284 	__sysmmu_tlb_invalidate(sfrbase);
285 }
286 
287 static void show_fault_information(const char *name,
288 		enum exynos_sysmmu_inttype itype,
289 		phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
290 {
291 	sysmmu_pte_t *ent;
292 
293 	if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
294 		itype = SYSMMU_FAULT_UNKNOWN;
295 
296 	pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
297 		sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
298 
299 	ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
300 	pr_err("\tLv1 entry: %#x\n", *ent);
301 
302 	if (lv1ent_page(ent)) {
303 		ent = page_entry(ent, fault_addr);
304 		pr_err("\t Lv2 entry: %#x\n", *ent);
305 	}
306 }
307 
308 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
309 {
310 	/* SYSMMU is in blocked state when interrupt occurred. */
311 	struct sysmmu_drvdata *data = dev_id;
312 	enum exynos_sysmmu_inttype itype;
313 	sysmmu_iova_t addr = -1;
314 	int ret = -ENOSYS;
315 
316 	WARN_ON(!is_sysmmu_active(data));
317 
318 	spin_lock(&data->lock);
319 
320 	if (!IS_ERR(data->clk_master))
321 		clk_enable(data->clk_master);
322 
323 	itype = (enum exynos_sysmmu_inttype)
324 		__ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
325 	if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
326 		itype = SYSMMU_FAULT_UNKNOWN;
327 	else
328 		addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
329 
330 	if (itype == SYSMMU_FAULT_UNKNOWN) {
331 		pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
332 			__func__, dev_name(data->sysmmu));
333 		pr_err("%s: Please check if IRQ is correctly configured.\n",
334 			__func__);
335 		BUG();
336 	} else {
337 		unsigned int base =
338 				__raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
339 		show_fault_information(dev_name(data->sysmmu),
340 					itype, base, addr);
341 		if (data->domain)
342 			ret = report_iommu_fault(data->domain,
343 					data->master, addr, itype);
344 	}
345 
346 	/* fault is not recovered by fault handler */
347 	BUG_ON(ret != 0);
348 
349 	__raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
350 
351 	sysmmu_unblock(data->sfrbase);
352 
353 	if (!IS_ERR(data->clk_master))
354 		clk_disable(data->clk_master);
355 
356 	spin_unlock(&data->lock);
357 
358 	return IRQ_HANDLED;
359 }
360 
361 static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
362 {
363 	if (!IS_ERR(data->clk_master))
364 		clk_enable(data->clk_master);
365 
366 	__raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
367 	__raw_writel(0, data->sfrbase + REG_MMU_CFG);
368 
369 	clk_disable(data->clk);
370 	if (!IS_ERR(data->clk_master))
371 		clk_disable(data->clk_master);
372 }
373 
374 static bool __sysmmu_disable(struct sysmmu_drvdata *data)
375 {
376 	bool disabled;
377 	unsigned long flags;
378 
379 	spin_lock_irqsave(&data->lock, flags);
380 
381 	disabled = set_sysmmu_inactive(data);
382 
383 	if (disabled) {
384 		data->pgtable = 0;
385 		data->domain = NULL;
386 
387 		__sysmmu_disable_nocount(data);
388 
389 		dev_dbg(data->sysmmu, "Disabled\n");
390 	} else  {
391 		dev_dbg(data->sysmmu, "%d times left to disable\n",
392 					data->activations);
393 	}
394 
395 	spin_unlock_irqrestore(&data->lock, flags);
396 
397 	return disabled;
398 }
399 
400 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
401 {
402 	unsigned int cfg = CFG_LRU | CFG_QOS(15);
403 	unsigned int ver;
404 
405 	ver = __raw_sysmmu_version(data);
406 	if (MMU_MAJ_VER(ver) == 3) {
407 		if (MMU_MIN_VER(ver) >= 2) {
408 			cfg |= CFG_FLPDCACHE;
409 			if (MMU_MIN_VER(ver) == 3) {
410 				cfg |= CFG_ACGEN;
411 				cfg &= ~CFG_LRU;
412 			} else {
413 				cfg |= CFG_SYSSEL;
414 			}
415 		}
416 	}
417 
418 	__raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
419 }
420 
421 static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
422 {
423 	if (!IS_ERR(data->clk_master))
424 		clk_enable(data->clk_master);
425 	clk_enable(data->clk);
426 
427 	__raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
428 
429 	__sysmmu_init_config(data);
430 
431 	__sysmmu_set_ptbase(data->sfrbase, data->pgtable);
432 
433 	__raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
434 
435 	if (!IS_ERR(data->clk_master))
436 		clk_disable(data->clk_master);
437 }
438 
439 static int __sysmmu_enable(struct sysmmu_drvdata *data,
440 			phys_addr_t pgtable, struct iommu_domain *domain)
441 {
442 	int ret = 0;
443 	unsigned long flags;
444 
445 	spin_lock_irqsave(&data->lock, flags);
446 	if (set_sysmmu_active(data)) {
447 		data->pgtable = pgtable;
448 		data->domain = domain;
449 
450 		__sysmmu_enable_nocount(data);
451 
452 		dev_dbg(data->sysmmu, "Enabled\n");
453 	} else {
454 		ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
455 
456 		dev_dbg(data->sysmmu, "already enabled\n");
457 	}
458 
459 	if (WARN_ON(ret < 0))
460 		set_sysmmu_inactive(data); /* decrement count */
461 
462 	spin_unlock_irqrestore(&data->lock, flags);
463 
464 	return ret;
465 }
466 
467 /* __exynos_sysmmu_enable: Enables System MMU
468  *
469  * returns -error if an error occurred and System MMU is not enabled,
470  * 0 if the System MMU has been just enabled and 1 if System MMU was already
471  * enabled before.
472  */
473 static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
474 				  struct iommu_domain *domain)
475 {
476 	int ret = 0;
477 	unsigned long flags;
478 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
479 	struct sysmmu_drvdata *data;
480 
481 	BUG_ON(!has_sysmmu(dev));
482 
483 	spin_lock_irqsave(&owner->lock, flags);
484 
485 	data = dev_get_drvdata(owner->sysmmu);
486 
487 	ret = __sysmmu_enable(data, pgtable, domain);
488 	if (ret >= 0)
489 		data->master = dev;
490 
491 	spin_unlock_irqrestore(&owner->lock, flags);
492 
493 	return ret;
494 }
495 
496 int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
497 {
498 	BUG_ON(!memblock_is_memory(pgtable));
499 
500 	return __exynos_sysmmu_enable(dev, pgtable, NULL);
501 }
502 
503 static bool exynos_sysmmu_disable(struct device *dev)
504 {
505 	unsigned long flags;
506 	bool disabled = true;
507 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
508 	struct sysmmu_drvdata *data;
509 
510 	BUG_ON(!has_sysmmu(dev));
511 
512 	spin_lock_irqsave(&owner->lock, flags);
513 
514 	data = dev_get_drvdata(owner->sysmmu);
515 
516 	disabled = __sysmmu_disable(data);
517 	if (disabled)
518 		data->master = NULL;
519 
520 	spin_unlock_irqrestore(&owner->lock, flags);
521 
522 	return disabled;
523 }
524 
525 static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
526 					      sysmmu_iova_t iova)
527 {
528 	if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3))
529 		__raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
530 }
531 
532 static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
533 					    sysmmu_iova_t iova)
534 {
535 	unsigned long flags;
536 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
537 	struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
538 
539 	if (!IS_ERR(data->clk_master))
540 		clk_enable(data->clk_master);
541 
542 	spin_lock_irqsave(&data->lock, flags);
543 	if (is_sysmmu_active(data))
544 		__sysmmu_tlb_invalidate_flpdcache(data, iova);
545 	spin_unlock_irqrestore(&data->lock, flags);
546 
547 	if (!IS_ERR(data->clk_master))
548 		clk_disable(data->clk_master);
549 }
550 
551 static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
552 					size_t size)
553 {
554 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
555 	unsigned long flags;
556 	struct sysmmu_drvdata *data;
557 
558 	data = dev_get_drvdata(owner->sysmmu);
559 
560 	spin_lock_irqsave(&data->lock, flags);
561 	if (is_sysmmu_active(data)) {
562 		unsigned int num_inv = 1;
563 
564 		if (!IS_ERR(data->clk_master))
565 			clk_enable(data->clk_master);
566 
567 		/*
568 		 * L2TLB invalidation required
569 		 * 4KB page: 1 invalidation
570 		 * 64KB page: 16 invalidations
571 		 * 1MB page: 64 invalidations
572 		 * because it is set-associative TLB
573 		 * with 8-way and 64 sets.
574 		 * 1MB page can be cached in one of all sets.
575 		 * 64KB page can be one of 16 consecutive sets.
576 		 */
577 		if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
578 			num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
579 
580 		if (sysmmu_block(data->sfrbase)) {
581 			__sysmmu_tlb_invalidate_entry(
582 				data->sfrbase, iova, num_inv);
583 			sysmmu_unblock(data->sfrbase);
584 		}
585 		if (!IS_ERR(data->clk_master))
586 			clk_disable(data->clk_master);
587 	} else {
588 		dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
589 			iova);
590 	}
591 	spin_unlock_irqrestore(&data->lock, flags);
592 }
593 
594 void exynos_sysmmu_tlb_invalidate(struct device *dev)
595 {
596 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
597 	unsigned long flags;
598 	struct sysmmu_drvdata *data;
599 
600 	data = dev_get_drvdata(owner->sysmmu);
601 
602 	spin_lock_irqsave(&data->lock, flags);
603 	if (is_sysmmu_active(data)) {
604 		if (!IS_ERR(data->clk_master))
605 			clk_enable(data->clk_master);
606 		if (sysmmu_block(data->sfrbase)) {
607 			__sysmmu_tlb_invalidate(data->sfrbase);
608 			sysmmu_unblock(data->sfrbase);
609 		}
610 		if (!IS_ERR(data->clk_master))
611 			clk_disable(data->clk_master);
612 	} else {
613 		dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
614 	}
615 	spin_unlock_irqrestore(&data->lock, flags);
616 }
617 
618 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
619 {
620 	int irq, ret;
621 	struct device *dev = &pdev->dev;
622 	struct sysmmu_drvdata *data;
623 	struct resource *res;
624 
625 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
626 	if (!data)
627 		return -ENOMEM;
628 
629 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
630 	data->sfrbase = devm_ioremap_resource(dev, res);
631 	if (IS_ERR(data->sfrbase))
632 		return PTR_ERR(data->sfrbase);
633 
634 	irq = platform_get_irq(pdev, 0);
635 	if (irq <= 0) {
636 		dev_err(dev, "Unable to find IRQ resource\n");
637 		return irq;
638 	}
639 
640 	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
641 				dev_name(dev), data);
642 	if (ret) {
643 		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
644 		return ret;
645 	}
646 
647 	data->clk = devm_clk_get(dev, "sysmmu");
648 	if (IS_ERR(data->clk)) {
649 		dev_err(dev, "Failed to get clock!\n");
650 		return PTR_ERR(data->clk);
651 	} else  {
652 		ret = clk_prepare(data->clk);
653 		if (ret) {
654 			dev_err(dev, "Failed to prepare clk\n");
655 			return ret;
656 		}
657 	}
658 
659 	data->clk_master = devm_clk_get(dev, "master");
660 	if (!IS_ERR(data->clk_master)) {
661 		ret = clk_prepare(data->clk_master);
662 		if (ret) {
663 			clk_unprepare(data->clk);
664 			dev_err(dev, "Failed to prepare master's clk\n");
665 			return ret;
666 		}
667 	}
668 
669 	data->sysmmu = dev;
670 	spin_lock_init(&data->lock);
671 
672 	platform_set_drvdata(pdev, data);
673 
674 	pm_runtime_enable(dev);
675 
676 	return 0;
677 }
678 
679 static const struct of_device_id sysmmu_of_match[] __initconst = {
680 	{ .compatible	= "samsung,exynos-sysmmu", },
681 	{ },
682 };
683 
684 static struct platform_driver exynos_sysmmu_driver __refdata = {
685 	.probe	= exynos_sysmmu_probe,
686 	.driver	= {
687 		.name		= "exynos-sysmmu",
688 		.of_match_table	= sysmmu_of_match,
689 	}
690 };
691 
692 static inline void pgtable_flush(void *vastart, void *vaend)
693 {
694 	dmac_flush_range(vastart, vaend);
695 	outer_flush_range(virt_to_phys(vastart),
696 				virt_to_phys(vaend));
697 }
698 
699 static int exynos_iommu_domain_init(struct iommu_domain *domain)
700 {
701 	struct exynos_iommu_domain *priv;
702 	int i;
703 
704 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
705 	if (!priv)
706 		return -ENOMEM;
707 
708 	priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
709 	if (!priv->pgtable)
710 		goto err_pgtable;
711 
712 	priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
713 	if (!priv->lv2entcnt)
714 		goto err_counter;
715 
716 	/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
717 	for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
718 		priv->pgtable[i + 0] = ZERO_LV2LINK;
719 		priv->pgtable[i + 1] = ZERO_LV2LINK;
720 		priv->pgtable[i + 2] = ZERO_LV2LINK;
721 		priv->pgtable[i + 3] = ZERO_LV2LINK;
722 		priv->pgtable[i + 4] = ZERO_LV2LINK;
723 		priv->pgtable[i + 5] = ZERO_LV2LINK;
724 		priv->pgtable[i + 6] = ZERO_LV2LINK;
725 		priv->pgtable[i + 7] = ZERO_LV2LINK;
726 	}
727 
728 	pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
729 
730 	spin_lock_init(&priv->lock);
731 	spin_lock_init(&priv->pgtablelock);
732 	INIT_LIST_HEAD(&priv->clients);
733 
734 	domain->geometry.aperture_start = 0;
735 	domain->geometry.aperture_end   = ~0UL;
736 	domain->geometry.force_aperture = true;
737 
738 	domain->priv = priv;
739 	return 0;
740 
741 err_counter:
742 	free_pages((unsigned long)priv->pgtable, 2);
743 err_pgtable:
744 	kfree(priv);
745 	return -ENOMEM;
746 }
747 
748 static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
749 {
750 	struct exynos_iommu_domain *priv = domain->priv;
751 	struct exynos_iommu_owner *owner;
752 	unsigned long flags;
753 	int i;
754 
755 	WARN_ON(!list_empty(&priv->clients));
756 
757 	spin_lock_irqsave(&priv->lock, flags);
758 
759 	list_for_each_entry(owner, &priv->clients, client) {
760 		while (!exynos_sysmmu_disable(owner->dev))
761 			; /* until System MMU is actually disabled */
762 	}
763 
764 	while (!list_empty(&priv->clients))
765 		list_del_init(priv->clients.next);
766 
767 	spin_unlock_irqrestore(&priv->lock, flags);
768 
769 	for (i = 0; i < NUM_LV1ENTRIES; i++)
770 		if (lv1ent_page(priv->pgtable + i))
771 			kmem_cache_free(lv2table_kmem_cache,
772 				phys_to_virt(lv2table_base(priv->pgtable + i)));
773 
774 	free_pages((unsigned long)priv->pgtable, 2);
775 	free_pages((unsigned long)priv->lv2entcnt, 1);
776 	kfree(domain->priv);
777 	domain->priv = NULL;
778 }
779 
780 static int exynos_iommu_attach_device(struct iommu_domain *domain,
781 				   struct device *dev)
782 {
783 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
784 	struct exynos_iommu_domain *priv = domain->priv;
785 	phys_addr_t pagetable = virt_to_phys(priv->pgtable);
786 	unsigned long flags;
787 	int ret;
788 
789 	spin_lock_irqsave(&priv->lock, flags);
790 
791 	ret = __exynos_sysmmu_enable(dev, pagetable, domain);
792 	if (ret == 0) {
793 		list_add_tail(&owner->client, &priv->clients);
794 		owner->domain = domain;
795 	}
796 
797 	spin_unlock_irqrestore(&priv->lock, flags);
798 
799 	if (ret < 0) {
800 		dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
801 					__func__, &pagetable);
802 		return ret;
803 	}
804 
805 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
806 		__func__, &pagetable, (ret == 0) ? "" : ", again");
807 
808 	return ret;
809 }
810 
811 static void exynos_iommu_detach_device(struct iommu_domain *domain,
812 				    struct device *dev)
813 {
814 	struct exynos_iommu_owner *owner;
815 	struct exynos_iommu_domain *priv = domain->priv;
816 	phys_addr_t pagetable = virt_to_phys(priv->pgtable);
817 	unsigned long flags;
818 
819 	spin_lock_irqsave(&priv->lock, flags);
820 
821 	list_for_each_entry(owner, &priv->clients, client) {
822 		if (owner == dev->archdata.iommu) {
823 			if (exynos_sysmmu_disable(dev)) {
824 				list_del_init(&owner->client);
825 				owner->domain = NULL;
826 			}
827 			break;
828 		}
829 	}
830 
831 	spin_unlock_irqrestore(&priv->lock, flags);
832 
833 	if (owner == dev->archdata.iommu)
834 		dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
835 					__func__, &pagetable);
836 	else
837 		dev_err(dev, "%s: No IOMMU is attached\n", __func__);
838 }
839 
840 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
841 		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
842 {
843 	if (lv1ent_section(sent)) {
844 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
845 		return ERR_PTR(-EADDRINUSE);
846 	}
847 
848 	if (lv1ent_fault(sent)) {
849 		sysmmu_pte_t *pent;
850 		bool need_flush_flpd_cache = lv1ent_zero(sent);
851 
852 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
853 		BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
854 		if (!pent)
855 			return ERR_PTR(-ENOMEM);
856 
857 		*sent = mk_lv1ent_page(virt_to_phys(pent));
858 		*pgcounter = NUM_LV2ENTRIES;
859 		pgtable_flush(pent, pent + NUM_LV2ENTRIES);
860 		pgtable_flush(sent, sent + 1);
861 
862 		/*
863 		 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
864 		 * FLPD cache may cache the address of zero_l2_table. This
865 		 * function replaces the zero_l2_table with new L2 page table
866 		 * to write valid mappings.
867 		 * Accessing the valid area may cause page fault since FLPD
868 		 * cache may still cache zero_l2_table for the valid area
869 		 * instead of new L2 page table that has the mapping
870 		 * information of the valid area.
871 		 * Thus any replacement of zero_l2_table with other valid L2
872 		 * page table must involve FLPD cache invalidation for System
873 		 * MMU v3.3.
874 		 * FLPD cache invalidation is performed with TLB invalidation
875 		 * by VPN without blocking. It is safe to invalidate TLB without
876 		 * blocking because the target address of TLB invalidation is
877 		 * not currently mapped.
878 		 */
879 		if (need_flush_flpd_cache) {
880 			struct exynos_iommu_owner *owner;
881 
882 			spin_lock(&priv->lock);
883 			list_for_each_entry(owner, &priv->clients, client)
884 				sysmmu_tlb_invalidate_flpdcache(
885 							owner->dev, iova);
886 			spin_unlock(&priv->lock);
887 		}
888 	}
889 
890 	return page_entry(sent, iova);
891 }
892 
893 static int lv1set_section(struct exynos_iommu_domain *priv,
894 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
895 			  phys_addr_t paddr, short *pgcnt)
896 {
897 	if (lv1ent_section(sent)) {
898 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
899 			iova);
900 		return -EADDRINUSE;
901 	}
902 
903 	if (lv1ent_page(sent)) {
904 		if (*pgcnt != NUM_LV2ENTRIES) {
905 			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
906 				iova);
907 			return -EADDRINUSE;
908 		}
909 
910 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
911 		*pgcnt = 0;
912 	}
913 
914 	*sent = mk_lv1ent_sect(paddr);
915 
916 	pgtable_flush(sent, sent + 1);
917 
918 	spin_lock(&priv->lock);
919 	if (lv1ent_page_zero(sent)) {
920 		struct exynos_iommu_owner *owner;
921 		/*
922 		 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
923 		 * entry by speculative prefetch of SLPD which has no mapping.
924 		 */
925 		list_for_each_entry(owner, &priv->clients, client)
926 			sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
927 	}
928 	spin_unlock(&priv->lock);
929 
930 	return 0;
931 }
932 
933 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
934 								short *pgcnt)
935 {
936 	if (size == SPAGE_SIZE) {
937 		if (WARN_ON(!lv2ent_fault(pent)))
938 			return -EADDRINUSE;
939 
940 		*pent = mk_lv2ent_spage(paddr);
941 		pgtable_flush(pent, pent + 1);
942 		*pgcnt -= 1;
943 	} else { /* size == LPAGE_SIZE */
944 		int i;
945 
946 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
947 			if (WARN_ON(!lv2ent_fault(pent))) {
948 				if (i > 0)
949 					memset(pent - i, 0, sizeof(*pent) * i);
950 				return -EADDRINUSE;
951 			}
952 
953 			*pent = mk_lv2ent_lpage(paddr);
954 		}
955 		pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
956 		*pgcnt -= SPAGES_PER_LPAGE;
957 	}
958 
959 	return 0;
960 }
961 
962 /*
963  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
964  *
965  * System MMU v3.x has advanced logic to improve address translation
966  * performance with caching more page table entries by a page table walk.
967  * However, the logic has a bug that while caching faulty page table entries,
968  * System MMU reports page fault if the cached fault entry is hit even though
969  * the fault entry is updated to a valid entry after the entry is cached.
970  * To prevent caching faulty page table entries which may be updated to valid
971  * entries later, the virtual memory manager should care about the workaround
972  * for the problem. The following describes the workaround.
973  *
974  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
975  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
976  *
977  * Precisely, any start address of I/O virtual region must be aligned with
978  * the following sizes for System MMU v3.1 and v3.2.
979  * System MMU v3.1: 128KiB
980  * System MMU v3.2: 256KiB
981  *
982  * Because System MMU v3.3 caches page table entries more aggressively, it needs
983  * more workarounds.
984  * - Any two consecutive I/O virtual regions must have a hole of size larger
985  *   than or equal to 128KiB.
986  * - Start address of an I/O virtual region must be aligned by 128KiB.
987  */
988 static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
989 			 phys_addr_t paddr, size_t size, int prot)
990 {
991 	struct exynos_iommu_domain *priv = domain->priv;
992 	sysmmu_pte_t *entry;
993 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
994 	unsigned long flags;
995 	int ret = -ENOMEM;
996 
997 	BUG_ON(priv->pgtable == NULL);
998 
999 	spin_lock_irqsave(&priv->pgtablelock, flags);
1000 
1001 	entry = section_entry(priv->pgtable, iova);
1002 
1003 	if (size == SECT_SIZE) {
1004 		ret = lv1set_section(priv, entry, iova, paddr,
1005 					&priv->lv2entcnt[lv1ent_offset(iova)]);
1006 	} else {
1007 		sysmmu_pte_t *pent;
1008 
1009 		pent = alloc_lv2entry(priv, entry, iova,
1010 					&priv->lv2entcnt[lv1ent_offset(iova)]);
1011 
1012 		if (IS_ERR(pent))
1013 			ret = PTR_ERR(pent);
1014 		else
1015 			ret = lv2set_page(pent, paddr, size,
1016 					&priv->lv2entcnt[lv1ent_offset(iova)]);
1017 	}
1018 
1019 	if (ret)
1020 		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1021 			__func__, ret, size, iova);
1022 
1023 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
1024 
1025 	return ret;
1026 }
1027 
1028 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
1029 						sysmmu_iova_t iova, size_t size)
1030 {
1031 	struct exynos_iommu_owner *owner;
1032 	unsigned long flags;
1033 
1034 	spin_lock_irqsave(&priv->lock, flags);
1035 
1036 	list_for_each_entry(owner, &priv->clients, client)
1037 		sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
1038 
1039 	spin_unlock_irqrestore(&priv->lock, flags);
1040 }
1041 
1042 static size_t exynos_iommu_unmap(struct iommu_domain *domain,
1043 					unsigned long l_iova, size_t size)
1044 {
1045 	struct exynos_iommu_domain *priv = domain->priv;
1046 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1047 	sysmmu_pte_t *ent;
1048 	size_t err_pgsize;
1049 	unsigned long flags;
1050 
1051 	BUG_ON(priv->pgtable == NULL);
1052 
1053 	spin_lock_irqsave(&priv->pgtablelock, flags);
1054 
1055 	ent = section_entry(priv->pgtable, iova);
1056 
1057 	if (lv1ent_section(ent)) {
1058 		if (WARN_ON(size < SECT_SIZE)) {
1059 			err_pgsize = SECT_SIZE;
1060 			goto err;
1061 		}
1062 
1063 		/* workaround for h/w bug in System MMU v3.3 */
1064 		*ent = ZERO_LV2LINK;
1065 		pgtable_flush(ent, ent + 1);
1066 		size = SECT_SIZE;
1067 		goto done;
1068 	}
1069 
1070 	if (unlikely(lv1ent_fault(ent))) {
1071 		if (size > SECT_SIZE)
1072 			size = SECT_SIZE;
1073 		goto done;
1074 	}
1075 
1076 	/* lv1ent_page(sent) == true here */
1077 
1078 	ent = page_entry(ent, iova);
1079 
1080 	if (unlikely(lv2ent_fault(ent))) {
1081 		size = SPAGE_SIZE;
1082 		goto done;
1083 	}
1084 
1085 	if (lv2ent_small(ent)) {
1086 		*ent = 0;
1087 		size = SPAGE_SIZE;
1088 		pgtable_flush(ent, ent + 1);
1089 		priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1090 		goto done;
1091 	}
1092 
1093 	/* lv1ent_large(ent) == true here */
1094 	if (WARN_ON(size < LPAGE_SIZE)) {
1095 		err_pgsize = LPAGE_SIZE;
1096 		goto err;
1097 	}
1098 
1099 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1100 	pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
1101 
1102 	size = LPAGE_SIZE;
1103 	priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1104 done:
1105 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
1106 
1107 	exynos_iommu_tlb_invalidate_entry(priv, iova, size);
1108 
1109 	return size;
1110 err:
1111 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
1112 
1113 	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1114 		__func__, size, iova, err_pgsize);
1115 
1116 	return 0;
1117 }
1118 
1119 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
1120 					  dma_addr_t iova)
1121 {
1122 	struct exynos_iommu_domain *priv = domain->priv;
1123 	sysmmu_pte_t *entry;
1124 	unsigned long flags;
1125 	phys_addr_t phys = 0;
1126 
1127 	spin_lock_irqsave(&priv->pgtablelock, flags);
1128 
1129 	entry = section_entry(priv->pgtable, iova);
1130 
1131 	if (lv1ent_section(entry)) {
1132 		phys = section_phys(entry) + section_offs(iova);
1133 	} else if (lv1ent_page(entry)) {
1134 		entry = page_entry(entry, iova);
1135 
1136 		if (lv2ent_large(entry))
1137 			phys = lpage_phys(entry) + lpage_offs(iova);
1138 		else if (lv2ent_small(entry))
1139 			phys = spage_phys(entry) + spage_offs(iova);
1140 	}
1141 
1142 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
1143 
1144 	return phys;
1145 }
1146 
1147 static int exynos_iommu_add_device(struct device *dev)
1148 {
1149 	struct iommu_group *group;
1150 	int ret;
1151 
1152 	group = iommu_group_get(dev);
1153 
1154 	if (!group) {
1155 		group = iommu_group_alloc();
1156 		if (IS_ERR(group)) {
1157 			dev_err(dev, "Failed to allocate IOMMU group\n");
1158 			return PTR_ERR(group);
1159 		}
1160 	}
1161 
1162 	ret = iommu_group_add_device(group, dev);
1163 	iommu_group_put(group);
1164 
1165 	return ret;
1166 }
1167 
1168 static void exynos_iommu_remove_device(struct device *dev)
1169 {
1170 	iommu_group_remove_device(dev);
1171 }
1172 
1173 static const struct iommu_ops exynos_iommu_ops = {
1174 	.domain_init = exynos_iommu_domain_init,
1175 	.domain_destroy = exynos_iommu_domain_destroy,
1176 	.attach_dev = exynos_iommu_attach_device,
1177 	.detach_dev = exynos_iommu_detach_device,
1178 	.map = exynos_iommu_map,
1179 	.unmap = exynos_iommu_unmap,
1180 	.map_sg = default_iommu_map_sg,
1181 	.iova_to_phys = exynos_iommu_iova_to_phys,
1182 	.add_device = exynos_iommu_add_device,
1183 	.remove_device = exynos_iommu_remove_device,
1184 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1185 };
1186 
1187 static int __init exynos_iommu_init(void)
1188 {
1189 	struct device_node *np;
1190 	int ret;
1191 
1192 	np = of_find_matching_node(NULL, sysmmu_of_match);
1193 	if (!np)
1194 		return 0;
1195 
1196 	of_node_put(np);
1197 
1198 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1199 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1200 	if (!lv2table_kmem_cache) {
1201 		pr_err("%s: Failed to create kmem cache\n", __func__);
1202 		return -ENOMEM;
1203 	}
1204 
1205 	ret = platform_driver_register(&exynos_sysmmu_driver);
1206 	if (ret) {
1207 		pr_err("%s: Failed to register driver\n", __func__);
1208 		goto err_reg_driver;
1209 	}
1210 
1211 	zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1212 	if (zero_lv2_table == NULL) {
1213 		pr_err("%s: Failed to allocate zero level2 page table\n",
1214 			__func__);
1215 		ret = -ENOMEM;
1216 		goto err_zero_lv2;
1217 	}
1218 
1219 	ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1220 	if (ret) {
1221 		pr_err("%s: Failed to register exynos-iommu driver.\n",
1222 								__func__);
1223 		goto err_set_iommu;
1224 	}
1225 
1226 	return 0;
1227 err_set_iommu:
1228 	kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1229 err_zero_lv2:
1230 	platform_driver_unregister(&exynos_sysmmu_driver);
1231 err_reg_driver:
1232 	kmem_cache_destroy(lv2table_kmem_cache);
1233 	return ret;
1234 }
1235 subsys_initcall(exynos_iommu_init);
1236