xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision be2b81b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
4  *		http://www.samsung.com
5  */
6 
7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10 
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/iommu.h>
16 #include <linux/interrupt.h>
17 #include <linux/kmemleak.h>
18 #include <linux/list.h>
19 #include <linux/of.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 
25 typedef u32 sysmmu_iova_t;
26 typedef u32 sysmmu_pte_t;
27 
28 /* We do not consider super section mapping (16MB) */
29 #define SECT_ORDER 20
30 #define LPAGE_ORDER 16
31 #define SPAGE_ORDER 12
32 
33 #define SECT_SIZE (1 << SECT_ORDER)
34 #define LPAGE_SIZE (1 << LPAGE_ORDER)
35 #define SPAGE_SIZE (1 << SPAGE_ORDER)
36 
37 #define SECT_MASK (~(SECT_SIZE - 1))
38 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
39 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
40 
41 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
42 			   ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
43 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
44 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
45 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
46 			  ((*(sent) & 3) == 1))
47 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
48 
49 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
50 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
51 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
52 
53 /*
54  * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
55  * v5.0 introduced support for 36bit physical address space by shifting
56  * all page entry values by 4 bits.
57  * All SYSMMU controllers in the system support the address spaces of the same
58  * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
59  * value (0 or 4).
60  */
61 static short PG_ENT_SHIFT = -1;
62 #define SYSMMU_PG_ENT_SHIFT 0
63 #define SYSMMU_V5_PG_ENT_SHIFT 4
64 
65 static const sysmmu_pte_t *LV1_PROT;
66 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
67 	((0 << 15) | (0 << 10)), /* no access */
68 	((1 << 15) | (1 << 10)), /* IOMMU_READ only */
69 	((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
70 	((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
71 };
72 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
73 	(0 << 4), /* no access */
74 	(1 << 4), /* IOMMU_READ only */
75 	(2 << 4), /* IOMMU_WRITE only */
76 	(3 << 4), /* IOMMU_READ | IOMMU_WRITE */
77 };
78 
79 static const sysmmu_pte_t *LV2_PROT;
80 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
81 	((0 << 9) | (0 << 4)), /* no access */
82 	((1 << 9) | (1 << 4)), /* IOMMU_READ only */
83 	((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
84 	((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
85 };
86 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
87 	(0 << 2), /* no access */
88 	(1 << 2), /* IOMMU_READ only */
89 	(2 << 2), /* IOMMU_WRITE only */
90 	(3 << 2), /* IOMMU_READ | IOMMU_WRITE */
91 };
92 
93 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
94 
95 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
96 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
97 #define section_offs(iova) (iova & (SECT_SIZE - 1))
98 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
99 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
100 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
101 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
102 
103 #define NUM_LV1ENTRIES 4096
104 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
105 
106 static u32 lv1ent_offset(sysmmu_iova_t iova)
107 {
108 	return iova >> SECT_ORDER;
109 }
110 
111 static u32 lv2ent_offset(sysmmu_iova_t iova)
112 {
113 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
114 }
115 
116 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
117 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
118 
119 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
120 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
121 
122 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
123 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
124 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
125 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
126 
127 #define CTRL_ENABLE	0x5
128 #define CTRL_BLOCK	0x7
129 #define CTRL_DISABLE	0x0
130 
131 #define CFG_LRU		0x1
132 #define CFG_EAP		(1 << 2)
133 #define CFG_QOS(n)	((n & 0xF) << 7)
134 #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
135 #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
136 #define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
137 
138 #define CTRL_VM_ENABLE			BIT(0)
139 #define CTRL_VM_FAULT_MODE_STALL	BIT(3)
140 #define CAPA0_CAPA1_EXIST		BIT(11)
141 #define CAPA1_VCR_ENABLED		BIT(14)
142 
143 /* common registers */
144 #define REG_MMU_CTRL		0x000
145 #define REG_MMU_CFG		0x004
146 #define REG_MMU_STATUS		0x008
147 #define REG_MMU_VERSION		0x034
148 
149 #define MMU_MAJ_VER(val)	((val) >> 7)
150 #define MMU_MIN_VER(val)	((val) & 0x7F)
151 #define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
152 
153 #define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
154 
155 /* v1.x - v3.x registers */
156 #define REG_PAGE_FAULT_ADDR	0x024
157 #define REG_AW_FAULT_ADDR	0x028
158 #define REG_AR_FAULT_ADDR	0x02C
159 #define REG_DEFAULT_SLAVE_ADDR	0x030
160 
161 /* v5.x registers */
162 #define REG_V5_FAULT_AR_VA	0x070
163 #define REG_V5_FAULT_AW_VA	0x080
164 
165 /* v7.x registers */
166 #define REG_V7_CAPA0		0x870
167 #define REG_V7_CAPA1		0x874
168 #define REG_V7_CTRL_VM		0x8000
169 
170 #define has_sysmmu(dev)		(dev_iommu_priv_get(dev) != NULL)
171 
172 static struct device *dma_dev;
173 static struct kmem_cache *lv2table_kmem_cache;
174 static sysmmu_pte_t *zero_lv2_table;
175 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
176 
177 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
178 {
179 	return pgtable + lv1ent_offset(iova);
180 }
181 
182 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
183 {
184 	return (sysmmu_pte_t *)phys_to_virt(
185 				lv2table_base(sent)) + lv2ent_offset(iova);
186 }
187 
188 /*
189  * IOMMU fault information register
190  */
191 struct sysmmu_fault_info {
192 	unsigned int bit;	/* bit number in STATUS register */
193 	unsigned short addr_reg; /* register to read VA fault address */
194 	const char *name;	/* human readable fault name */
195 	unsigned int type;	/* fault type for report_iommu_fault */
196 };
197 
198 static const struct sysmmu_fault_info sysmmu_faults[] = {
199 	{ 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
200 	{ 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
201 	{ 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
202 	{ 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
203 	{ 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
204 	{ 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
205 	{ 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
206 	{ 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
207 };
208 
209 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
210 	{ 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
211 	{ 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
212 	{ 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
213 	{ 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
214 	{ 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
215 	{ 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
216 	{ 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
217 	{ 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
218 	{ 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
219 	{ 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
220 };
221 
222 /*
223  * This structure is attached to dev->iommu->priv of the master device
224  * on device add, contains a list of SYSMMU controllers defined by device tree,
225  * which are bound to given master device. It is usually referenced by 'owner'
226  * pointer.
227 */
228 struct exynos_iommu_owner {
229 	struct list_head controllers;	/* list of sysmmu_drvdata.owner_node */
230 	struct iommu_domain *domain;	/* domain this device is attached */
231 	struct mutex rpm_lock;		/* for runtime pm of all sysmmus */
232 };
233 
234 /*
235  * This structure exynos specific generalization of struct iommu_domain.
236  * It contains list of SYSMMU controllers from all master devices, which has
237  * been attached to this domain and page tables of IO address space defined by
238  * it. It is usually referenced by 'domain' pointer.
239  */
240 struct exynos_iommu_domain {
241 	struct list_head clients; /* list of sysmmu_drvdata.domain_node */
242 	sysmmu_pte_t *pgtable;	/* lv1 page table, 16KB */
243 	short *lv2entcnt;	/* free lv2 entry counter for each section */
244 	spinlock_t lock;	/* lock for modyfying list of clients */
245 	spinlock_t pgtablelock;	/* lock for modifying page table @ pgtable */
246 	struct iommu_domain domain; /* generic domain data structure */
247 };
248 
249 /*
250  * SysMMU version specific data. Contains offsets for the registers which can
251  * be found in different SysMMU variants, but have different offset values.
252  */
253 struct sysmmu_variant {
254 	u32 pt_base;		/* page table base address (physical) */
255 	u32 flush_all;		/* invalidate all TLB entries */
256 	u32 flush_entry;	/* invalidate specific TLB entry */
257 	u32 flush_range;	/* invalidate TLB entries in specified range */
258 	u32 flush_start;	/* start address of range invalidation */
259 	u32 flush_end;		/* end address of range invalidation */
260 	u32 int_status;		/* interrupt status information */
261 	u32 int_clear;		/* clear the interrupt */
262 };
263 
264 /*
265  * This structure hold all data of a single SYSMMU controller, this includes
266  * hw resources like registers and clocks, pointers and list nodes to connect
267  * it to all other structures, internal state and parameters read from device
268  * tree. It is usually referenced by 'data' pointer.
269  */
270 struct sysmmu_drvdata {
271 	struct device *sysmmu;		/* SYSMMU controller device */
272 	struct device *master;		/* master device (owner) */
273 	struct device_link *link;	/* runtime PM link to master */
274 	void __iomem *sfrbase;		/* our registers */
275 	struct clk *clk;		/* SYSMMU's clock */
276 	struct clk *aclk;		/* SYSMMU's aclk clock */
277 	struct clk *pclk;		/* SYSMMU's pclk clock */
278 	struct clk *clk_master;		/* master's device clock */
279 	spinlock_t lock;		/* lock for modyfying state */
280 	bool active;			/* current status */
281 	struct exynos_iommu_domain *domain; /* domain we belong to */
282 	struct list_head domain_node;	/* node for domain clients list */
283 	struct list_head owner_node;	/* node for owner controllers list */
284 	phys_addr_t pgtable;		/* assigned page table structure */
285 	unsigned int version;		/* our version */
286 
287 	struct iommu_device iommu;	/* IOMMU core handle */
288 	const struct sysmmu_variant *variant; /* version specific data */
289 
290 	/* v7 fields */
291 	bool has_vcr;			/* virtual machine control register */
292 };
293 
294 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
295 
296 /* SysMMU v1..v3 */
297 static const struct sysmmu_variant sysmmu_v1_variant = {
298 	.flush_all	= 0x0c,
299 	.flush_entry	= 0x10,
300 	.pt_base	= 0x14,
301 	.int_status	= 0x18,
302 	.int_clear	= 0x1c,
303 };
304 
305 /* SysMMU v5 and v7 (non-VM capable) */
306 static const struct sysmmu_variant sysmmu_v5_variant = {
307 	.pt_base	= 0x0c,
308 	.flush_all	= 0x10,
309 	.flush_entry	= 0x14,
310 	.flush_range	= 0x18,
311 	.flush_start	= 0x20,
312 	.flush_end	= 0x24,
313 	.int_status	= 0x60,
314 	.int_clear	= 0x64,
315 };
316 
317 /* SysMMU v7: VM capable register set */
318 static const struct sysmmu_variant sysmmu_v7_vm_variant = {
319 	.pt_base	= 0x800c,
320 	.flush_all	= 0x8010,
321 	.flush_entry	= 0x8014,
322 	.flush_range	= 0x8018,
323 	.flush_start	= 0x8020,
324 	.flush_end	= 0x8024,
325 	.int_status	= 0x60,
326 	.int_clear	= 0x64,
327 };
328 
329 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
330 {
331 	return container_of(dom, struct exynos_iommu_domain, domain);
332 }
333 
334 static void sysmmu_unblock(struct sysmmu_drvdata *data)
335 {
336 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
337 }
338 
339 static bool sysmmu_block(struct sysmmu_drvdata *data)
340 {
341 	int i = 120;
342 
343 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
344 	while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
345 		--i;
346 
347 	if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
348 		sysmmu_unblock(data);
349 		return false;
350 	}
351 
352 	return true;
353 }
354 
355 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
356 {
357 	writel(0x1, SYSMMU_REG(data, flush_all));
358 }
359 
360 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
361 				sysmmu_iova_t iova, unsigned int num_inv)
362 {
363 	unsigned int i;
364 
365 	if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
366 		for (i = 0; i < num_inv; i++) {
367 			writel((iova & SPAGE_MASK) | 1,
368 			       SYSMMU_REG(data, flush_entry));
369 			iova += SPAGE_SIZE;
370 		}
371 	} else {
372 		writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
373 		writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
374 		       SYSMMU_REG(data, flush_end));
375 		writel(0x1, SYSMMU_REG(data, flush_range));
376 	}
377 }
378 
379 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
380 {
381 	u32 pt_base;
382 
383 	if (MMU_MAJ_VER(data->version) < 5)
384 		pt_base = pgd;
385 	else
386 		pt_base = pgd >> SPAGE_ORDER;
387 
388 	writel(pt_base, SYSMMU_REG(data, pt_base));
389 	__sysmmu_tlb_invalidate(data);
390 }
391 
392 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
393 {
394 	BUG_ON(clk_prepare_enable(data->clk_master));
395 	BUG_ON(clk_prepare_enable(data->clk));
396 	BUG_ON(clk_prepare_enable(data->pclk));
397 	BUG_ON(clk_prepare_enable(data->aclk));
398 }
399 
400 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
401 {
402 	clk_disable_unprepare(data->aclk);
403 	clk_disable_unprepare(data->pclk);
404 	clk_disable_unprepare(data->clk);
405 	clk_disable_unprepare(data->clk_master);
406 }
407 
408 static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data)
409 {
410 	u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0);
411 
412 	return capa0 & CAPA0_CAPA1_EXIST;
413 }
414 
415 static void __sysmmu_get_vcr(struct sysmmu_drvdata *data)
416 {
417 	u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1);
418 
419 	data->has_vcr = capa1 & CAPA1_VCR_ENABLED;
420 }
421 
422 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
423 {
424 	u32 ver;
425 
426 	__sysmmu_enable_clocks(data);
427 
428 	ver = readl(data->sfrbase + REG_MMU_VERSION);
429 
430 	/* controllers on some SoCs don't report proper version */
431 	if (ver == 0x80000001u)
432 		data->version = MAKE_MMU_VER(1, 0);
433 	else
434 		data->version = MMU_RAW_VER(ver);
435 
436 	dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
437 		MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
438 
439 	if (MMU_MAJ_VER(data->version) < 5) {
440 		data->variant = &sysmmu_v1_variant;
441 	} else if (MMU_MAJ_VER(data->version) < 7) {
442 		data->variant = &sysmmu_v5_variant;
443 	} else {
444 		if (__sysmmu_has_capa1(data))
445 			__sysmmu_get_vcr(data);
446 		if (data->has_vcr)
447 			data->variant = &sysmmu_v7_vm_variant;
448 		else
449 			data->variant = &sysmmu_v5_variant;
450 	}
451 
452 	__sysmmu_disable_clocks(data);
453 }
454 
455 static void show_fault_information(struct sysmmu_drvdata *data,
456 				   const struct sysmmu_fault_info *finfo,
457 				   sysmmu_iova_t fault_addr)
458 {
459 	sysmmu_pte_t *ent;
460 
461 	dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
462 		dev_name(data->master), finfo->name, fault_addr);
463 	dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
464 	ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
465 	dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
466 	if (lv1ent_page(ent)) {
467 		ent = page_entry(ent, fault_addr);
468 		dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
469 	}
470 }
471 
472 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
473 {
474 	/* SYSMMU is in blocked state when interrupt occurred. */
475 	struct sysmmu_drvdata *data = dev_id;
476 	const struct sysmmu_fault_info *finfo;
477 	unsigned int i, n, itype;
478 	sysmmu_iova_t fault_addr;
479 	int ret = -ENOSYS;
480 
481 	WARN_ON(!data->active);
482 
483 	if (MMU_MAJ_VER(data->version) < 5) {
484 		finfo = sysmmu_faults;
485 		n = ARRAY_SIZE(sysmmu_faults);
486 	} else {
487 		finfo = sysmmu_v5_faults;
488 		n = ARRAY_SIZE(sysmmu_v5_faults);
489 	}
490 
491 	spin_lock(&data->lock);
492 
493 	clk_enable(data->clk_master);
494 
495 	itype = __ffs(readl(SYSMMU_REG(data, int_status)));
496 	for (i = 0; i < n; i++, finfo++)
497 		if (finfo->bit == itype)
498 			break;
499 	/* unknown/unsupported fault */
500 	BUG_ON(i == n);
501 
502 	/* print debug message */
503 	fault_addr = readl(data->sfrbase + finfo->addr_reg);
504 	show_fault_information(data, finfo, fault_addr);
505 
506 	if (data->domain)
507 		ret = report_iommu_fault(&data->domain->domain,
508 					data->master, fault_addr, finfo->type);
509 	/* fault is not recovered by fault handler */
510 	BUG_ON(ret != 0);
511 
512 	writel(1 << itype, SYSMMU_REG(data, int_clear));
513 
514 	sysmmu_unblock(data);
515 
516 	clk_disable(data->clk_master);
517 
518 	spin_unlock(&data->lock);
519 
520 	return IRQ_HANDLED;
521 }
522 
523 static void __sysmmu_disable(struct sysmmu_drvdata *data)
524 {
525 	unsigned long flags;
526 
527 	clk_enable(data->clk_master);
528 
529 	spin_lock_irqsave(&data->lock, flags);
530 	writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
531 	writel(0, data->sfrbase + REG_MMU_CFG);
532 	data->active = false;
533 	spin_unlock_irqrestore(&data->lock, flags);
534 
535 	__sysmmu_disable_clocks(data);
536 }
537 
538 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
539 {
540 	unsigned int cfg;
541 
542 	if (data->version <= MAKE_MMU_VER(3, 1))
543 		cfg = CFG_LRU | CFG_QOS(15);
544 	else if (data->version <= MAKE_MMU_VER(3, 2))
545 		cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
546 	else
547 		cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
548 
549 	cfg |= CFG_EAP; /* enable access protection bits check */
550 
551 	writel(cfg, data->sfrbase + REG_MMU_CFG);
552 }
553 
554 static void __sysmmu_enable_vid(struct sysmmu_drvdata *data)
555 {
556 	u32 ctrl;
557 
558 	if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr)
559 		return;
560 
561 	ctrl = readl(data->sfrbase + REG_V7_CTRL_VM);
562 	ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL;
563 	writel(ctrl, data->sfrbase + REG_V7_CTRL_VM);
564 }
565 
566 static void __sysmmu_enable(struct sysmmu_drvdata *data)
567 {
568 	unsigned long flags;
569 
570 	__sysmmu_enable_clocks(data);
571 
572 	spin_lock_irqsave(&data->lock, flags);
573 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
574 	__sysmmu_init_config(data);
575 	__sysmmu_set_ptbase(data, data->pgtable);
576 	__sysmmu_enable_vid(data);
577 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
578 	data->active = true;
579 	spin_unlock_irqrestore(&data->lock, flags);
580 
581 	/*
582 	 * SYSMMU driver keeps master's clock enabled only for the short
583 	 * time, while accessing the registers. For performing address
584 	 * translation during DMA transaction it relies on the client
585 	 * driver to enable it.
586 	 */
587 	clk_disable(data->clk_master);
588 }
589 
590 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
591 					    sysmmu_iova_t iova)
592 {
593 	unsigned long flags;
594 
595 	spin_lock_irqsave(&data->lock, flags);
596 	if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
597 		clk_enable(data->clk_master);
598 		if (sysmmu_block(data)) {
599 			if (data->version >= MAKE_MMU_VER(5, 0))
600 				__sysmmu_tlb_invalidate(data);
601 			else
602 				__sysmmu_tlb_invalidate_entry(data, iova, 1);
603 			sysmmu_unblock(data);
604 		}
605 		clk_disable(data->clk_master);
606 	}
607 	spin_unlock_irqrestore(&data->lock, flags);
608 }
609 
610 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
611 					sysmmu_iova_t iova, size_t size)
612 {
613 	unsigned long flags;
614 
615 	spin_lock_irqsave(&data->lock, flags);
616 	if (data->active) {
617 		unsigned int num_inv = 1;
618 
619 		clk_enable(data->clk_master);
620 
621 		/*
622 		 * L2TLB invalidation required
623 		 * 4KB page: 1 invalidation
624 		 * 64KB page: 16 invalidations
625 		 * 1MB page: 64 invalidations
626 		 * because it is set-associative TLB
627 		 * with 8-way and 64 sets.
628 		 * 1MB page can be cached in one of all sets.
629 		 * 64KB page can be one of 16 consecutive sets.
630 		 */
631 		if (MMU_MAJ_VER(data->version) == 2)
632 			num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
633 
634 		if (sysmmu_block(data)) {
635 			__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
636 			sysmmu_unblock(data);
637 		}
638 		clk_disable(data->clk_master);
639 	}
640 	spin_unlock_irqrestore(&data->lock, flags);
641 }
642 
643 static const struct iommu_ops exynos_iommu_ops;
644 
645 static int exynos_sysmmu_probe(struct platform_device *pdev)
646 {
647 	int irq, ret;
648 	struct device *dev = &pdev->dev;
649 	struct sysmmu_drvdata *data;
650 	struct resource *res;
651 
652 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
653 	if (!data)
654 		return -ENOMEM;
655 
656 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
657 	data->sfrbase = devm_ioremap_resource(dev, res);
658 	if (IS_ERR(data->sfrbase))
659 		return PTR_ERR(data->sfrbase);
660 
661 	irq = platform_get_irq(pdev, 0);
662 	if (irq <= 0)
663 		return irq;
664 
665 	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
666 				dev_name(dev), data);
667 	if (ret) {
668 		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
669 		return ret;
670 	}
671 
672 	data->clk = devm_clk_get(dev, "sysmmu");
673 	if (PTR_ERR(data->clk) == -ENOENT)
674 		data->clk = NULL;
675 	else if (IS_ERR(data->clk))
676 		return PTR_ERR(data->clk);
677 
678 	data->aclk = devm_clk_get(dev, "aclk");
679 	if (PTR_ERR(data->aclk) == -ENOENT)
680 		data->aclk = NULL;
681 	else if (IS_ERR(data->aclk))
682 		return PTR_ERR(data->aclk);
683 
684 	data->pclk = devm_clk_get(dev, "pclk");
685 	if (PTR_ERR(data->pclk) == -ENOENT)
686 		data->pclk = NULL;
687 	else if (IS_ERR(data->pclk))
688 		return PTR_ERR(data->pclk);
689 
690 	if (!data->clk && (!data->aclk || !data->pclk)) {
691 		dev_err(dev, "Failed to get device clock(s)!\n");
692 		return -ENOSYS;
693 	}
694 
695 	data->clk_master = devm_clk_get(dev, "master");
696 	if (PTR_ERR(data->clk_master) == -ENOENT)
697 		data->clk_master = NULL;
698 	else if (IS_ERR(data->clk_master))
699 		return PTR_ERR(data->clk_master);
700 
701 	data->sysmmu = dev;
702 	spin_lock_init(&data->lock);
703 
704 	__sysmmu_get_version(data);
705 
706 	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
707 				     dev_name(data->sysmmu));
708 	if (ret)
709 		return ret;
710 
711 	platform_set_drvdata(pdev, data);
712 
713 	if (PG_ENT_SHIFT < 0) {
714 		if (MMU_MAJ_VER(data->version) < 5) {
715 			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
716 			LV1_PROT = SYSMMU_LV1_PROT;
717 			LV2_PROT = SYSMMU_LV2_PROT;
718 		} else {
719 			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
720 			LV1_PROT = SYSMMU_V5_LV1_PROT;
721 			LV2_PROT = SYSMMU_V5_LV2_PROT;
722 		}
723 	}
724 
725 	if (MMU_MAJ_VER(data->version) >= 5) {
726 		ret = dma_set_mask(dev, DMA_BIT_MASK(36));
727 		if (ret) {
728 			dev_err(dev, "Unable to set DMA mask: %d\n", ret);
729 			goto err_dma_set_mask;
730 		}
731 	}
732 
733 	/*
734 	 * use the first registered sysmmu device for performing
735 	 * dma mapping operations on iommu page tables (cpu cache flush)
736 	 */
737 	if (!dma_dev)
738 		dma_dev = &pdev->dev;
739 
740 	pm_runtime_enable(dev);
741 
742 	ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
743 	if (ret)
744 		goto err_dma_set_mask;
745 
746 	return 0;
747 
748 err_dma_set_mask:
749 	iommu_device_sysfs_remove(&data->iommu);
750 	return ret;
751 }
752 
753 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
754 {
755 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
756 	struct device *master = data->master;
757 
758 	if (master) {
759 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
760 
761 		mutex_lock(&owner->rpm_lock);
762 		if (data->domain) {
763 			dev_dbg(data->sysmmu, "saving state\n");
764 			__sysmmu_disable(data);
765 		}
766 		mutex_unlock(&owner->rpm_lock);
767 	}
768 	return 0;
769 }
770 
771 static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
772 {
773 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
774 	struct device *master = data->master;
775 
776 	if (master) {
777 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
778 
779 		mutex_lock(&owner->rpm_lock);
780 		if (data->domain) {
781 			dev_dbg(data->sysmmu, "restoring state\n");
782 			__sysmmu_enable(data);
783 		}
784 		mutex_unlock(&owner->rpm_lock);
785 	}
786 	return 0;
787 }
788 
789 static const struct dev_pm_ops sysmmu_pm_ops = {
790 	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
791 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
792 				pm_runtime_force_resume)
793 };
794 
795 static const struct of_device_id sysmmu_of_match[] = {
796 	{ .compatible	= "samsung,exynos-sysmmu", },
797 	{ },
798 };
799 
800 static struct platform_driver exynos_sysmmu_driver __refdata = {
801 	.probe	= exynos_sysmmu_probe,
802 	.driver	= {
803 		.name		= "exynos-sysmmu",
804 		.of_match_table	= sysmmu_of_match,
805 		.pm		= &sysmmu_pm_ops,
806 		.suppress_bind_attrs = true,
807 	}
808 };
809 
810 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
811 {
812 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
813 				DMA_TO_DEVICE);
814 	*ent = cpu_to_le32(val);
815 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
816 				   DMA_TO_DEVICE);
817 }
818 
819 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
820 {
821 	struct exynos_iommu_domain *domain;
822 	dma_addr_t handle;
823 	int i;
824 
825 	/* Check if correct PTE offsets are initialized */
826 	BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
827 
828 	if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
829 		return NULL;
830 
831 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
832 	if (!domain)
833 		return NULL;
834 
835 	domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
836 	if (!domain->pgtable)
837 		goto err_pgtable;
838 
839 	domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
840 	if (!domain->lv2entcnt)
841 		goto err_counter;
842 
843 	/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
844 	for (i = 0; i < NUM_LV1ENTRIES; i++)
845 		domain->pgtable[i] = ZERO_LV2LINK;
846 
847 	handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
848 				DMA_TO_DEVICE);
849 	/* For mapping page table entries we rely on dma == phys */
850 	BUG_ON(handle != virt_to_phys(domain->pgtable));
851 	if (dma_mapping_error(dma_dev, handle))
852 		goto err_lv2ent;
853 
854 	spin_lock_init(&domain->lock);
855 	spin_lock_init(&domain->pgtablelock);
856 	INIT_LIST_HEAD(&domain->clients);
857 
858 	domain->domain.geometry.aperture_start = 0;
859 	domain->domain.geometry.aperture_end   = ~0UL;
860 	domain->domain.geometry.force_aperture = true;
861 
862 	return &domain->domain;
863 
864 err_lv2ent:
865 	free_pages((unsigned long)domain->lv2entcnt, 1);
866 err_counter:
867 	free_pages((unsigned long)domain->pgtable, 2);
868 err_pgtable:
869 	kfree(domain);
870 	return NULL;
871 }
872 
873 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
874 {
875 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
876 	struct sysmmu_drvdata *data, *next;
877 	unsigned long flags;
878 	int i;
879 
880 	WARN_ON(!list_empty(&domain->clients));
881 
882 	spin_lock_irqsave(&domain->lock, flags);
883 
884 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
885 		spin_lock(&data->lock);
886 		__sysmmu_disable(data);
887 		data->pgtable = 0;
888 		data->domain = NULL;
889 		list_del_init(&data->domain_node);
890 		spin_unlock(&data->lock);
891 	}
892 
893 	spin_unlock_irqrestore(&domain->lock, flags);
894 
895 	dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
896 			 DMA_TO_DEVICE);
897 
898 	for (i = 0; i < NUM_LV1ENTRIES; i++)
899 		if (lv1ent_page(domain->pgtable + i)) {
900 			phys_addr_t base = lv2table_base(domain->pgtable + i);
901 
902 			dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
903 					 DMA_TO_DEVICE);
904 			kmem_cache_free(lv2table_kmem_cache,
905 					phys_to_virt(base));
906 		}
907 
908 	free_pages((unsigned long)domain->pgtable, 2);
909 	free_pages((unsigned long)domain->lv2entcnt, 1);
910 	kfree(domain);
911 }
912 
913 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
914 				    struct device *dev)
915 {
916 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
917 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
918 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
919 	struct sysmmu_drvdata *data, *next;
920 	unsigned long flags;
921 
922 	if (!has_sysmmu(dev) || owner->domain != iommu_domain)
923 		return;
924 
925 	mutex_lock(&owner->rpm_lock);
926 
927 	list_for_each_entry(data, &owner->controllers, owner_node) {
928 		pm_runtime_get_noresume(data->sysmmu);
929 		if (pm_runtime_active(data->sysmmu))
930 			__sysmmu_disable(data);
931 		pm_runtime_put(data->sysmmu);
932 	}
933 
934 	spin_lock_irqsave(&domain->lock, flags);
935 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
936 		spin_lock(&data->lock);
937 		data->pgtable = 0;
938 		data->domain = NULL;
939 		list_del_init(&data->domain_node);
940 		spin_unlock(&data->lock);
941 	}
942 	owner->domain = NULL;
943 	spin_unlock_irqrestore(&domain->lock, flags);
944 
945 	mutex_unlock(&owner->rpm_lock);
946 
947 	dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
948 		&pagetable);
949 }
950 
951 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
952 				   struct device *dev)
953 {
954 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
955 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
956 	struct sysmmu_drvdata *data;
957 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
958 	unsigned long flags;
959 
960 	if (!has_sysmmu(dev))
961 		return -ENODEV;
962 
963 	if (owner->domain)
964 		exynos_iommu_detach_device(owner->domain, dev);
965 
966 	mutex_lock(&owner->rpm_lock);
967 
968 	spin_lock_irqsave(&domain->lock, flags);
969 	list_for_each_entry(data, &owner->controllers, owner_node) {
970 		spin_lock(&data->lock);
971 		data->pgtable = pagetable;
972 		data->domain = domain;
973 		list_add_tail(&data->domain_node, &domain->clients);
974 		spin_unlock(&data->lock);
975 	}
976 	owner->domain = iommu_domain;
977 	spin_unlock_irqrestore(&domain->lock, flags);
978 
979 	list_for_each_entry(data, &owner->controllers, owner_node) {
980 		pm_runtime_get_noresume(data->sysmmu);
981 		if (pm_runtime_active(data->sysmmu))
982 			__sysmmu_enable(data);
983 		pm_runtime_put(data->sysmmu);
984 	}
985 
986 	mutex_unlock(&owner->rpm_lock);
987 
988 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
989 		&pagetable);
990 
991 	return 0;
992 }
993 
994 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
995 		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
996 {
997 	if (lv1ent_section(sent)) {
998 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
999 		return ERR_PTR(-EADDRINUSE);
1000 	}
1001 
1002 	if (lv1ent_fault(sent)) {
1003 		dma_addr_t handle;
1004 		sysmmu_pte_t *pent;
1005 		bool need_flush_flpd_cache = lv1ent_zero(sent);
1006 
1007 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
1008 		BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
1009 		if (!pent)
1010 			return ERR_PTR(-ENOMEM);
1011 
1012 		exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
1013 		kmemleak_ignore(pent);
1014 		*pgcounter = NUM_LV2ENTRIES;
1015 		handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
1016 					DMA_TO_DEVICE);
1017 		if (dma_mapping_error(dma_dev, handle)) {
1018 			kmem_cache_free(lv2table_kmem_cache, pent);
1019 			return ERR_PTR(-EADDRINUSE);
1020 		}
1021 
1022 		/*
1023 		 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
1024 		 * FLPD cache may cache the address of zero_l2_table. This
1025 		 * function replaces the zero_l2_table with new L2 page table
1026 		 * to write valid mappings.
1027 		 * Accessing the valid area may cause page fault since FLPD
1028 		 * cache may still cache zero_l2_table for the valid area
1029 		 * instead of new L2 page table that has the mapping
1030 		 * information of the valid area.
1031 		 * Thus any replacement of zero_l2_table with other valid L2
1032 		 * page table must involve FLPD cache invalidation for System
1033 		 * MMU v3.3.
1034 		 * FLPD cache invalidation is performed with TLB invalidation
1035 		 * by VPN without blocking. It is safe to invalidate TLB without
1036 		 * blocking because the target address of TLB invalidation is
1037 		 * not currently mapped.
1038 		 */
1039 		if (need_flush_flpd_cache) {
1040 			struct sysmmu_drvdata *data;
1041 
1042 			spin_lock(&domain->lock);
1043 			list_for_each_entry(data, &domain->clients, domain_node)
1044 				sysmmu_tlb_invalidate_flpdcache(data, iova);
1045 			spin_unlock(&domain->lock);
1046 		}
1047 	}
1048 
1049 	return page_entry(sent, iova);
1050 }
1051 
1052 static int lv1set_section(struct exynos_iommu_domain *domain,
1053 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
1054 			  phys_addr_t paddr, int prot, short *pgcnt)
1055 {
1056 	if (lv1ent_section(sent)) {
1057 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1058 			iova);
1059 		return -EADDRINUSE;
1060 	}
1061 
1062 	if (lv1ent_page(sent)) {
1063 		if (*pgcnt != NUM_LV2ENTRIES) {
1064 			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1065 				iova);
1066 			return -EADDRINUSE;
1067 		}
1068 
1069 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
1070 		*pgcnt = 0;
1071 	}
1072 
1073 	exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
1074 
1075 	spin_lock(&domain->lock);
1076 	if (lv1ent_page_zero(sent)) {
1077 		struct sysmmu_drvdata *data;
1078 		/*
1079 		 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1080 		 * entry by speculative prefetch of SLPD which has no mapping.
1081 		 */
1082 		list_for_each_entry(data, &domain->clients, domain_node)
1083 			sysmmu_tlb_invalidate_flpdcache(data, iova);
1084 	}
1085 	spin_unlock(&domain->lock);
1086 
1087 	return 0;
1088 }
1089 
1090 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1091 		       int prot, short *pgcnt)
1092 {
1093 	if (size == SPAGE_SIZE) {
1094 		if (WARN_ON(!lv2ent_fault(pent)))
1095 			return -EADDRINUSE;
1096 
1097 		exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
1098 		*pgcnt -= 1;
1099 	} else { /* size == LPAGE_SIZE */
1100 		int i;
1101 		dma_addr_t pent_base = virt_to_phys(pent);
1102 
1103 		dma_sync_single_for_cpu(dma_dev, pent_base,
1104 					sizeof(*pent) * SPAGES_PER_LPAGE,
1105 					DMA_TO_DEVICE);
1106 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1107 			if (WARN_ON(!lv2ent_fault(pent))) {
1108 				if (i > 0)
1109 					memset(pent - i, 0, sizeof(*pent) * i);
1110 				return -EADDRINUSE;
1111 			}
1112 
1113 			*pent = mk_lv2ent_lpage(paddr, prot);
1114 		}
1115 		dma_sync_single_for_device(dma_dev, pent_base,
1116 					   sizeof(*pent) * SPAGES_PER_LPAGE,
1117 					   DMA_TO_DEVICE);
1118 		*pgcnt -= SPAGES_PER_LPAGE;
1119 	}
1120 
1121 	return 0;
1122 }
1123 
1124 /*
1125  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1126  *
1127  * System MMU v3.x has advanced logic to improve address translation
1128  * performance with caching more page table entries by a page table walk.
1129  * However, the logic has a bug that while caching faulty page table entries,
1130  * System MMU reports page fault if the cached fault entry is hit even though
1131  * the fault entry is updated to a valid entry after the entry is cached.
1132  * To prevent caching faulty page table entries which may be updated to valid
1133  * entries later, the virtual memory manager should care about the workaround
1134  * for the problem. The following describes the workaround.
1135  *
1136  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1137  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1138  *
1139  * Precisely, any start address of I/O virtual region must be aligned with
1140  * the following sizes for System MMU v3.1 and v3.2.
1141  * System MMU v3.1: 128KiB
1142  * System MMU v3.2: 256KiB
1143  *
1144  * Because System MMU v3.3 caches page table entries more aggressively, it needs
1145  * more workarounds.
1146  * - Any two consecutive I/O virtual regions must have a hole of size larger
1147  *   than or equal to 128KiB.
1148  * - Start address of an I/O virtual region must be aligned by 128KiB.
1149  */
1150 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1151 			    unsigned long l_iova, phys_addr_t paddr, size_t size,
1152 			    int prot, gfp_t gfp)
1153 {
1154 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1155 	sysmmu_pte_t *entry;
1156 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1157 	unsigned long flags;
1158 	int ret = -ENOMEM;
1159 
1160 	BUG_ON(domain->pgtable == NULL);
1161 	prot &= SYSMMU_SUPPORTED_PROT_BITS;
1162 
1163 	spin_lock_irqsave(&domain->pgtablelock, flags);
1164 
1165 	entry = section_entry(domain->pgtable, iova);
1166 
1167 	if (size == SECT_SIZE) {
1168 		ret = lv1set_section(domain, entry, iova, paddr, prot,
1169 				     &domain->lv2entcnt[lv1ent_offset(iova)]);
1170 	} else {
1171 		sysmmu_pte_t *pent;
1172 
1173 		pent = alloc_lv2entry(domain, entry, iova,
1174 				      &domain->lv2entcnt[lv1ent_offset(iova)]);
1175 
1176 		if (IS_ERR(pent))
1177 			ret = PTR_ERR(pent);
1178 		else
1179 			ret = lv2set_page(pent, paddr, size, prot,
1180 				       &domain->lv2entcnt[lv1ent_offset(iova)]);
1181 	}
1182 
1183 	if (ret)
1184 		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1185 			__func__, ret, size, iova);
1186 
1187 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1188 
1189 	return ret;
1190 }
1191 
1192 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1193 					      sysmmu_iova_t iova, size_t size)
1194 {
1195 	struct sysmmu_drvdata *data;
1196 	unsigned long flags;
1197 
1198 	spin_lock_irqsave(&domain->lock, flags);
1199 
1200 	list_for_each_entry(data, &domain->clients, domain_node)
1201 		sysmmu_tlb_invalidate_entry(data, iova, size);
1202 
1203 	spin_unlock_irqrestore(&domain->lock, flags);
1204 }
1205 
1206 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1207 				 unsigned long l_iova, size_t size,
1208 				 struct iommu_iotlb_gather *gather)
1209 {
1210 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1211 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1212 	sysmmu_pte_t *ent;
1213 	size_t err_pgsize;
1214 	unsigned long flags;
1215 
1216 	BUG_ON(domain->pgtable == NULL);
1217 
1218 	spin_lock_irqsave(&domain->pgtablelock, flags);
1219 
1220 	ent = section_entry(domain->pgtable, iova);
1221 
1222 	if (lv1ent_section(ent)) {
1223 		if (WARN_ON(size < SECT_SIZE)) {
1224 			err_pgsize = SECT_SIZE;
1225 			goto err;
1226 		}
1227 
1228 		/* workaround for h/w bug in System MMU v3.3 */
1229 		exynos_iommu_set_pte(ent, ZERO_LV2LINK);
1230 		size = SECT_SIZE;
1231 		goto done;
1232 	}
1233 
1234 	if (unlikely(lv1ent_fault(ent))) {
1235 		if (size > SECT_SIZE)
1236 			size = SECT_SIZE;
1237 		goto done;
1238 	}
1239 
1240 	/* lv1ent_page(sent) == true here */
1241 
1242 	ent = page_entry(ent, iova);
1243 
1244 	if (unlikely(lv2ent_fault(ent))) {
1245 		size = SPAGE_SIZE;
1246 		goto done;
1247 	}
1248 
1249 	if (lv2ent_small(ent)) {
1250 		exynos_iommu_set_pte(ent, 0);
1251 		size = SPAGE_SIZE;
1252 		domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1253 		goto done;
1254 	}
1255 
1256 	/* lv1ent_large(ent) == true here */
1257 	if (WARN_ON(size < LPAGE_SIZE)) {
1258 		err_pgsize = LPAGE_SIZE;
1259 		goto err;
1260 	}
1261 
1262 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1263 				sizeof(*ent) * SPAGES_PER_LPAGE,
1264 				DMA_TO_DEVICE);
1265 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1266 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1267 				   sizeof(*ent) * SPAGES_PER_LPAGE,
1268 				   DMA_TO_DEVICE);
1269 	size = LPAGE_SIZE;
1270 	domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1271 done:
1272 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1273 
1274 	exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1275 
1276 	return size;
1277 err:
1278 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1279 
1280 	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1281 		__func__, size, iova, err_pgsize);
1282 
1283 	return 0;
1284 }
1285 
1286 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1287 					  dma_addr_t iova)
1288 {
1289 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1290 	sysmmu_pte_t *entry;
1291 	unsigned long flags;
1292 	phys_addr_t phys = 0;
1293 
1294 	spin_lock_irqsave(&domain->pgtablelock, flags);
1295 
1296 	entry = section_entry(domain->pgtable, iova);
1297 
1298 	if (lv1ent_section(entry)) {
1299 		phys = section_phys(entry) + section_offs(iova);
1300 	} else if (lv1ent_page(entry)) {
1301 		entry = page_entry(entry, iova);
1302 
1303 		if (lv2ent_large(entry))
1304 			phys = lpage_phys(entry) + lpage_offs(iova);
1305 		else if (lv2ent_small(entry))
1306 			phys = spage_phys(entry) + spage_offs(iova);
1307 	}
1308 
1309 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1310 
1311 	return phys;
1312 }
1313 
1314 static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1315 {
1316 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1317 	struct sysmmu_drvdata *data;
1318 
1319 	if (!has_sysmmu(dev))
1320 		return ERR_PTR(-ENODEV);
1321 
1322 	list_for_each_entry(data, &owner->controllers, owner_node) {
1323 		/*
1324 		 * SYSMMU will be runtime activated via device link
1325 		 * (dependency) to its master device, so there are no
1326 		 * direct calls to pm_runtime_get/put in this driver.
1327 		 */
1328 		data->link = device_link_add(dev, data->sysmmu,
1329 					     DL_FLAG_STATELESS |
1330 					     DL_FLAG_PM_RUNTIME);
1331 	}
1332 
1333 	/* There is always at least one entry, see exynos_iommu_of_xlate() */
1334 	data = list_first_entry(&owner->controllers,
1335 				struct sysmmu_drvdata, owner_node);
1336 
1337 	return &data->iommu;
1338 }
1339 
1340 static void exynos_iommu_release_device(struct device *dev)
1341 {
1342 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1343 	struct sysmmu_drvdata *data;
1344 
1345 	if (owner->domain) {
1346 		struct iommu_group *group = iommu_group_get(dev);
1347 
1348 		if (group) {
1349 			WARN_ON(owner->domain !=
1350 				iommu_group_default_domain(group));
1351 			exynos_iommu_detach_device(owner->domain, dev);
1352 			iommu_group_put(group);
1353 		}
1354 	}
1355 
1356 	list_for_each_entry(data, &owner->controllers, owner_node)
1357 		device_link_del(data->link);
1358 }
1359 
1360 static int exynos_iommu_of_xlate(struct device *dev,
1361 				 struct of_phandle_args *spec)
1362 {
1363 	struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1364 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1365 	struct sysmmu_drvdata *data, *entry;
1366 
1367 	if (!sysmmu)
1368 		return -ENODEV;
1369 
1370 	data = platform_get_drvdata(sysmmu);
1371 	if (!data) {
1372 		put_device(&sysmmu->dev);
1373 		return -ENODEV;
1374 	}
1375 
1376 	if (!owner) {
1377 		owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1378 		if (!owner) {
1379 			put_device(&sysmmu->dev);
1380 			return -ENOMEM;
1381 		}
1382 
1383 		INIT_LIST_HEAD(&owner->controllers);
1384 		mutex_init(&owner->rpm_lock);
1385 		dev_iommu_priv_set(dev, owner);
1386 	}
1387 
1388 	list_for_each_entry(entry, &owner->controllers, owner_node)
1389 		if (entry == data)
1390 			return 0;
1391 
1392 	list_add_tail(&data->owner_node, &owner->controllers);
1393 	data->master = dev;
1394 
1395 	return 0;
1396 }
1397 
1398 static const struct iommu_ops exynos_iommu_ops = {
1399 	.domain_alloc = exynos_iommu_domain_alloc,
1400 	.device_group = generic_device_group,
1401 	.probe_device = exynos_iommu_probe_device,
1402 	.release_device = exynos_iommu_release_device,
1403 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1404 	.of_xlate = exynos_iommu_of_xlate,
1405 	.default_domain_ops = &(const struct iommu_domain_ops) {
1406 		.attach_dev	= exynos_iommu_attach_device,
1407 		.detach_dev	= exynos_iommu_detach_device,
1408 		.map		= exynos_iommu_map,
1409 		.unmap		= exynos_iommu_unmap,
1410 		.iova_to_phys	= exynos_iommu_iova_to_phys,
1411 		.free		= exynos_iommu_domain_free,
1412 	}
1413 };
1414 
1415 static int __init exynos_iommu_init(void)
1416 {
1417 	struct device_node *np;
1418 	int ret;
1419 
1420 	np = of_find_matching_node(NULL, sysmmu_of_match);
1421 	if (!np)
1422 		return 0;
1423 
1424 	of_node_put(np);
1425 
1426 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1427 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1428 	if (!lv2table_kmem_cache) {
1429 		pr_err("%s: Failed to create kmem cache\n", __func__);
1430 		return -ENOMEM;
1431 	}
1432 
1433 	zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1434 	if (zero_lv2_table == NULL) {
1435 		pr_err("%s: Failed to allocate zero level2 page table\n",
1436 			__func__);
1437 		ret = -ENOMEM;
1438 		goto err_zero_lv2;
1439 	}
1440 
1441 	ret = platform_driver_register(&exynos_sysmmu_driver);
1442 	if (ret) {
1443 		pr_err("%s: Failed to register driver\n", __func__);
1444 		goto err_reg_driver;
1445 	}
1446 
1447 	return 0;
1448 err_reg_driver:
1449 	platform_driver_unregister(&exynos_sysmmu_driver);
1450 err_zero_lv2:
1451 	kmem_cache_destroy(lv2table_kmem_cache);
1452 	return ret;
1453 }
1454 core_initcall(exynos_iommu_init);
1455