xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision 675aaf05)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
4  *		http://www.samsung.com
5  */
6 
7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10 
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/iommu.h>
16 #include <linux/interrupt.h>
17 #include <linux/kmemleak.h>
18 #include <linux/list.h>
19 #include <linux/of.h>
20 #include <linux/of_iommu.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/dma-iommu.h>
26 
27 typedef u32 sysmmu_iova_t;
28 typedef u32 sysmmu_pte_t;
29 
30 /* We do not consider super section mapping (16MB) */
31 #define SECT_ORDER 20
32 #define LPAGE_ORDER 16
33 #define SPAGE_ORDER 12
34 
35 #define SECT_SIZE (1 << SECT_ORDER)
36 #define LPAGE_SIZE (1 << LPAGE_ORDER)
37 #define SPAGE_SIZE (1 << SPAGE_ORDER)
38 
39 #define SECT_MASK (~(SECT_SIZE - 1))
40 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
41 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
42 
43 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
44 			   ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
45 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
46 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
47 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
48 			  ((*(sent) & 3) == 1))
49 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
50 
51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
53 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
54 
55 /*
56  * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
57  * v5.0 introduced support for 36bit physical address space by shifting
58  * all page entry values by 4 bits.
59  * All SYSMMU controllers in the system support the address spaces of the same
60  * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
61  * value (0 or 4).
62  */
63 static short PG_ENT_SHIFT = -1;
64 #define SYSMMU_PG_ENT_SHIFT 0
65 #define SYSMMU_V5_PG_ENT_SHIFT 4
66 
67 static const sysmmu_pte_t *LV1_PROT;
68 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
69 	((0 << 15) | (0 << 10)), /* no access */
70 	((1 << 15) | (1 << 10)), /* IOMMU_READ only */
71 	((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
72 	((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
73 };
74 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
75 	(0 << 4), /* no access */
76 	(1 << 4), /* IOMMU_READ only */
77 	(2 << 4), /* IOMMU_WRITE only */
78 	(3 << 4), /* IOMMU_READ | IOMMU_WRITE */
79 };
80 
81 static const sysmmu_pte_t *LV2_PROT;
82 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
83 	((0 << 9) | (0 << 4)), /* no access */
84 	((1 << 9) | (1 << 4)), /* IOMMU_READ only */
85 	((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
86 	((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
87 };
88 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
89 	(0 << 2), /* no access */
90 	(1 << 2), /* IOMMU_READ only */
91 	(2 << 2), /* IOMMU_WRITE only */
92 	(3 << 2), /* IOMMU_READ | IOMMU_WRITE */
93 };
94 
95 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
96 
97 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
98 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
99 #define section_offs(iova) (iova & (SECT_SIZE - 1))
100 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
101 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
102 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
103 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
104 
105 #define NUM_LV1ENTRIES 4096
106 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
107 
108 static u32 lv1ent_offset(sysmmu_iova_t iova)
109 {
110 	return iova >> SECT_ORDER;
111 }
112 
113 static u32 lv2ent_offset(sysmmu_iova_t iova)
114 {
115 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
116 }
117 
118 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
119 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
120 
121 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
122 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
123 
124 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
125 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
126 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
127 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
128 
129 #define CTRL_ENABLE	0x5
130 #define CTRL_BLOCK	0x7
131 #define CTRL_DISABLE	0x0
132 
133 #define CFG_LRU		0x1
134 #define CFG_EAP		(1 << 2)
135 #define CFG_QOS(n)	((n & 0xF) << 7)
136 #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
137 #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
138 #define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
139 
140 /* common registers */
141 #define REG_MMU_CTRL		0x000
142 #define REG_MMU_CFG		0x004
143 #define REG_MMU_STATUS		0x008
144 #define REG_MMU_VERSION		0x034
145 
146 #define MMU_MAJ_VER(val)	((val) >> 7)
147 #define MMU_MIN_VER(val)	((val) & 0x7F)
148 #define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
149 
150 #define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
151 
152 /* v1.x - v3.x registers */
153 #define REG_MMU_FLUSH		0x00C
154 #define REG_MMU_FLUSH_ENTRY	0x010
155 #define REG_PT_BASE_ADDR	0x014
156 #define REG_INT_STATUS		0x018
157 #define REG_INT_CLEAR		0x01C
158 
159 #define REG_PAGE_FAULT_ADDR	0x024
160 #define REG_AW_FAULT_ADDR	0x028
161 #define REG_AR_FAULT_ADDR	0x02C
162 #define REG_DEFAULT_SLAVE_ADDR	0x030
163 
164 /* v5.x registers */
165 #define REG_V5_PT_BASE_PFN	0x00C
166 #define REG_V5_MMU_FLUSH_ALL	0x010
167 #define REG_V5_MMU_FLUSH_ENTRY	0x014
168 #define REG_V5_MMU_FLUSH_RANGE	0x018
169 #define REG_V5_MMU_FLUSH_START	0x020
170 #define REG_V5_MMU_FLUSH_END	0x024
171 #define REG_V5_INT_STATUS	0x060
172 #define REG_V5_INT_CLEAR	0x064
173 #define REG_V5_FAULT_AR_VA	0x070
174 #define REG_V5_FAULT_AW_VA	0x080
175 
176 #define has_sysmmu(dev)		(dev->archdata.iommu != NULL)
177 
178 static struct device *dma_dev;
179 static struct kmem_cache *lv2table_kmem_cache;
180 static sysmmu_pte_t *zero_lv2_table;
181 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
182 
183 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
184 {
185 	return pgtable + lv1ent_offset(iova);
186 }
187 
188 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
189 {
190 	return (sysmmu_pte_t *)phys_to_virt(
191 				lv2table_base(sent)) + lv2ent_offset(iova);
192 }
193 
194 /*
195  * IOMMU fault information register
196  */
197 struct sysmmu_fault_info {
198 	unsigned int bit;	/* bit number in STATUS register */
199 	unsigned short addr_reg; /* register to read VA fault address */
200 	const char *name;	/* human readable fault name */
201 	unsigned int type;	/* fault type for report_iommu_fault */
202 };
203 
204 static const struct sysmmu_fault_info sysmmu_faults[] = {
205 	{ 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
206 	{ 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
207 	{ 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
208 	{ 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
209 	{ 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
210 	{ 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
211 	{ 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
212 	{ 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
213 };
214 
215 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
216 	{ 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
217 	{ 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
218 	{ 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
219 	{ 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
220 	{ 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
221 	{ 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
222 	{ 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
223 	{ 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
224 	{ 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
225 	{ 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
226 };
227 
228 /*
229  * This structure is attached to dev.archdata.iommu of the master device
230  * on device add, contains a list of SYSMMU controllers defined by device tree,
231  * which are bound to given master device. It is usually referenced by 'owner'
232  * pointer.
233 */
234 struct exynos_iommu_owner {
235 	struct list_head controllers;	/* list of sysmmu_drvdata.owner_node */
236 	struct iommu_domain *domain;	/* domain this device is attached */
237 	struct mutex rpm_lock;		/* for runtime pm of all sysmmus */
238 };
239 
240 /*
241  * This structure exynos specific generalization of struct iommu_domain.
242  * It contains list of SYSMMU controllers from all master devices, which has
243  * been attached to this domain and page tables of IO address space defined by
244  * it. It is usually referenced by 'domain' pointer.
245  */
246 struct exynos_iommu_domain {
247 	struct list_head clients; /* list of sysmmu_drvdata.domain_node */
248 	sysmmu_pte_t *pgtable;	/* lv1 page table, 16KB */
249 	short *lv2entcnt;	/* free lv2 entry counter for each section */
250 	spinlock_t lock;	/* lock for modyfying list of clients */
251 	spinlock_t pgtablelock;	/* lock for modifying page table @ pgtable */
252 	struct iommu_domain domain; /* generic domain data structure */
253 };
254 
255 /*
256  * This structure hold all data of a single SYSMMU controller, this includes
257  * hw resources like registers and clocks, pointers and list nodes to connect
258  * it to all other structures, internal state and parameters read from device
259  * tree. It is usually referenced by 'data' pointer.
260  */
261 struct sysmmu_drvdata {
262 	struct device *sysmmu;		/* SYSMMU controller device */
263 	struct device *master;		/* master device (owner) */
264 	struct device_link *link;	/* runtime PM link to master */
265 	void __iomem *sfrbase;		/* our registers */
266 	struct clk *clk;		/* SYSMMU's clock */
267 	struct clk *aclk;		/* SYSMMU's aclk clock */
268 	struct clk *pclk;		/* SYSMMU's pclk clock */
269 	struct clk *clk_master;		/* master's device clock */
270 	spinlock_t lock;		/* lock for modyfying state */
271 	bool active;			/* current status */
272 	struct exynos_iommu_domain *domain; /* domain we belong to */
273 	struct list_head domain_node;	/* node for domain clients list */
274 	struct list_head owner_node;	/* node for owner controllers list */
275 	phys_addr_t pgtable;		/* assigned page table structure */
276 	unsigned int version;		/* our version */
277 
278 	struct iommu_device iommu;	/* IOMMU core handle */
279 };
280 
281 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
282 {
283 	return container_of(dom, struct exynos_iommu_domain, domain);
284 }
285 
286 static void sysmmu_unblock(struct sysmmu_drvdata *data)
287 {
288 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
289 }
290 
291 static bool sysmmu_block(struct sysmmu_drvdata *data)
292 {
293 	int i = 120;
294 
295 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
296 	while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
297 		--i;
298 
299 	if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
300 		sysmmu_unblock(data);
301 		return false;
302 	}
303 
304 	return true;
305 }
306 
307 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
308 {
309 	if (MMU_MAJ_VER(data->version) < 5)
310 		writel(0x1, data->sfrbase + REG_MMU_FLUSH);
311 	else
312 		writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
313 }
314 
315 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
316 				sysmmu_iova_t iova, unsigned int num_inv)
317 {
318 	unsigned int i;
319 
320 	if (MMU_MAJ_VER(data->version) < 5) {
321 		for (i = 0; i < num_inv; i++) {
322 			writel((iova & SPAGE_MASK) | 1,
323 				     data->sfrbase + REG_MMU_FLUSH_ENTRY);
324 			iova += SPAGE_SIZE;
325 		}
326 	} else {
327 		if (num_inv == 1) {
328 			writel((iova & SPAGE_MASK) | 1,
329 				     data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
330 		} else {
331 			writel((iova & SPAGE_MASK),
332 				     data->sfrbase + REG_V5_MMU_FLUSH_START);
333 			writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
334 				     data->sfrbase + REG_V5_MMU_FLUSH_END);
335 			writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
336 		}
337 	}
338 }
339 
340 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
341 {
342 	if (MMU_MAJ_VER(data->version) < 5)
343 		writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
344 	else
345 		writel(pgd >> PAGE_SHIFT,
346 			     data->sfrbase + REG_V5_PT_BASE_PFN);
347 
348 	__sysmmu_tlb_invalidate(data);
349 }
350 
351 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
352 {
353 	BUG_ON(clk_prepare_enable(data->clk_master));
354 	BUG_ON(clk_prepare_enable(data->clk));
355 	BUG_ON(clk_prepare_enable(data->pclk));
356 	BUG_ON(clk_prepare_enable(data->aclk));
357 }
358 
359 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
360 {
361 	clk_disable_unprepare(data->aclk);
362 	clk_disable_unprepare(data->pclk);
363 	clk_disable_unprepare(data->clk);
364 	clk_disable_unprepare(data->clk_master);
365 }
366 
367 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
368 {
369 	u32 ver;
370 
371 	__sysmmu_enable_clocks(data);
372 
373 	ver = readl(data->sfrbase + REG_MMU_VERSION);
374 
375 	/* controllers on some SoCs don't report proper version */
376 	if (ver == 0x80000001u)
377 		data->version = MAKE_MMU_VER(1, 0);
378 	else
379 		data->version = MMU_RAW_VER(ver);
380 
381 	dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
382 		MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
383 
384 	__sysmmu_disable_clocks(data);
385 }
386 
387 static void show_fault_information(struct sysmmu_drvdata *data,
388 				   const struct sysmmu_fault_info *finfo,
389 				   sysmmu_iova_t fault_addr)
390 {
391 	sysmmu_pte_t *ent;
392 
393 	dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
394 		dev_name(data->master), finfo->name, fault_addr);
395 	dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
396 	ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
397 	dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
398 	if (lv1ent_page(ent)) {
399 		ent = page_entry(ent, fault_addr);
400 		dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
401 	}
402 }
403 
404 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
405 {
406 	/* SYSMMU is in blocked state when interrupt occurred. */
407 	struct sysmmu_drvdata *data = dev_id;
408 	const struct sysmmu_fault_info *finfo;
409 	unsigned int i, n, itype;
410 	sysmmu_iova_t fault_addr = -1;
411 	unsigned short reg_status, reg_clear;
412 	int ret = -ENOSYS;
413 
414 	WARN_ON(!data->active);
415 
416 	if (MMU_MAJ_VER(data->version) < 5) {
417 		reg_status = REG_INT_STATUS;
418 		reg_clear = REG_INT_CLEAR;
419 		finfo = sysmmu_faults;
420 		n = ARRAY_SIZE(sysmmu_faults);
421 	} else {
422 		reg_status = REG_V5_INT_STATUS;
423 		reg_clear = REG_V5_INT_CLEAR;
424 		finfo = sysmmu_v5_faults;
425 		n = ARRAY_SIZE(sysmmu_v5_faults);
426 	}
427 
428 	spin_lock(&data->lock);
429 
430 	clk_enable(data->clk_master);
431 
432 	itype = __ffs(readl(data->sfrbase + reg_status));
433 	for (i = 0; i < n; i++, finfo++)
434 		if (finfo->bit == itype)
435 			break;
436 	/* unknown/unsupported fault */
437 	BUG_ON(i == n);
438 
439 	/* print debug message */
440 	fault_addr = readl(data->sfrbase + finfo->addr_reg);
441 	show_fault_information(data, finfo, fault_addr);
442 
443 	if (data->domain)
444 		ret = report_iommu_fault(&data->domain->domain,
445 					data->master, fault_addr, finfo->type);
446 	/* fault is not recovered by fault handler */
447 	BUG_ON(ret != 0);
448 
449 	writel(1 << itype, data->sfrbase + reg_clear);
450 
451 	sysmmu_unblock(data);
452 
453 	clk_disable(data->clk_master);
454 
455 	spin_unlock(&data->lock);
456 
457 	return IRQ_HANDLED;
458 }
459 
460 static void __sysmmu_disable(struct sysmmu_drvdata *data)
461 {
462 	unsigned long flags;
463 
464 	clk_enable(data->clk_master);
465 
466 	spin_lock_irqsave(&data->lock, flags);
467 	writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
468 	writel(0, data->sfrbase + REG_MMU_CFG);
469 	data->active = false;
470 	spin_unlock_irqrestore(&data->lock, flags);
471 
472 	__sysmmu_disable_clocks(data);
473 }
474 
475 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
476 {
477 	unsigned int cfg;
478 
479 	if (data->version <= MAKE_MMU_VER(3, 1))
480 		cfg = CFG_LRU | CFG_QOS(15);
481 	else if (data->version <= MAKE_MMU_VER(3, 2))
482 		cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
483 	else
484 		cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
485 
486 	cfg |= CFG_EAP; /* enable access protection bits check */
487 
488 	writel(cfg, data->sfrbase + REG_MMU_CFG);
489 }
490 
491 static void __sysmmu_enable(struct sysmmu_drvdata *data)
492 {
493 	unsigned long flags;
494 
495 	__sysmmu_enable_clocks(data);
496 
497 	spin_lock_irqsave(&data->lock, flags);
498 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
499 	__sysmmu_init_config(data);
500 	__sysmmu_set_ptbase(data, data->pgtable);
501 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
502 	data->active = true;
503 	spin_unlock_irqrestore(&data->lock, flags);
504 
505 	/*
506 	 * SYSMMU driver keeps master's clock enabled only for the short
507 	 * time, while accessing the registers. For performing address
508 	 * translation during DMA transaction it relies on the client
509 	 * driver to enable it.
510 	 */
511 	clk_disable(data->clk_master);
512 }
513 
514 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
515 					    sysmmu_iova_t iova)
516 {
517 	unsigned long flags;
518 
519 	spin_lock_irqsave(&data->lock, flags);
520 	if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
521 		clk_enable(data->clk_master);
522 		if (sysmmu_block(data)) {
523 			if (data->version >= MAKE_MMU_VER(5, 0))
524 				__sysmmu_tlb_invalidate(data);
525 			else
526 				__sysmmu_tlb_invalidate_entry(data, iova, 1);
527 			sysmmu_unblock(data);
528 		}
529 		clk_disable(data->clk_master);
530 	}
531 	spin_unlock_irqrestore(&data->lock, flags);
532 }
533 
534 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
535 					sysmmu_iova_t iova, size_t size)
536 {
537 	unsigned long flags;
538 
539 	spin_lock_irqsave(&data->lock, flags);
540 	if (data->active) {
541 		unsigned int num_inv = 1;
542 
543 		clk_enable(data->clk_master);
544 
545 		/*
546 		 * L2TLB invalidation required
547 		 * 4KB page: 1 invalidation
548 		 * 64KB page: 16 invalidations
549 		 * 1MB page: 64 invalidations
550 		 * because it is set-associative TLB
551 		 * with 8-way and 64 sets.
552 		 * 1MB page can be cached in one of all sets.
553 		 * 64KB page can be one of 16 consecutive sets.
554 		 */
555 		if (MMU_MAJ_VER(data->version) == 2)
556 			num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
557 
558 		if (sysmmu_block(data)) {
559 			__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
560 			sysmmu_unblock(data);
561 		}
562 		clk_disable(data->clk_master);
563 	}
564 	spin_unlock_irqrestore(&data->lock, flags);
565 }
566 
567 static const struct iommu_ops exynos_iommu_ops;
568 
569 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
570 {
571 	int irq, ret;
572 	struct device *dev = &pdev->dev;
573 	struct sysmmu_drvdata *data;
574 	struct resource *res;
575 
576 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
577 	if (!data)
578 		return -ENOMEM;
579 
580 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
581 	data->sfrbase = devm_ioremap_resource(dev, res);
582 	if (IS_ERR(data->sfrbase))
583 		return PTR_ERR(data->sfrbase);
584 
585 	irq = platform_get_irq(pdev, 0);
586 	if (irq <= 0) {
587 		dev_err(dev, "Unable to find IRQ resource\n");
588 		return irq;
589 	}
590 
591 	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
592 				dev_name(dev), data);
593 	if (ret) {
594 		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
595 		return ret;
596 	}
597 
598 	data->clk = devm_clk_get(dev, "sysmmu");
599 	if (PTR_ERR(data->clk) == -ENOENT)
600 		data->clk = NULL;
601 	else if (IS_ERR(data->clk))
602 		return PTR_ERR(data->clk);
603 
604 	data->aclk = devm_clk_get(dev, "aclk");
605 	if (PTR_ERR(data->aclk) == -ENOENT)
606 		data->aclk = NULL;
607 	else if (IS_ERR(data->aclk))
608 		return PTR_ERR(data->aclk);
609 
610 	data->pclk = devm_clk_get(dev, "pclk");
611 	if (PTR_ERR(data->pclk) == -ENOENT)
612 		data->pclk = NULL;
613 	else if (IS_ERR(data->pclk))
614 		return PTR_ERR(data->pclk);
615 
616 	if (!data->clk && (!data->aclk || !data->pclk)) {
617 		dev_err(dev, "Failed to get device clock(s)!\n");
618 		return -ENOSYS;
619 	}
620 
621 	data->clk_master = devm_clk_get(dev, "master");
622 	if (PTR_ERR(data->clk_master) == -ENOENT)
623 		data->clk_master = NULL;
624 	else if (IS_ERR(data->clk_master))
625 		return PTR_ERR(data->clk_master);
626 
627 	data->sysmmu = dev;
628 	spin_lock_init(&data->lock);
629 
630 	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
631 				     dev_name(data->sysmmu));
632 	if (ret)
633 		return ret;
634 
635 	iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
636 	iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
637 
638 	ret = iommu_device_register(&data->iommu);
639 	if (ret)
640 		return ret;
641 
642 	platform_set_drvdata(pdev, data);
643 
644 	__sysmmu_get_version(data);
645 	if (PG_ENT_SHIFT < 0) {
646 		if (MMU_MAJ_VER(data->version) < 5) {
647 			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
648 			LV1_PROT = SYSMMU_LV1_PROT;
649 			LV2_PROT = SYSMMU_LV2_PROT;
650 		} else {
651 			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
652 			LV1_PROT = SYSMMU_V5_LV1_PROT;
653 			LV2_PROT = SYSMMU_V5_LV2_PROT;
654 		}
655 	}
656 
657 	/*
658 	 * use the first registered sysmmu device for performing
659 	 * dma mapping operations on iommu page tables (cpu cache flush)
660 	 */
661 	if (!dma_dev)
662 		dma_dev = &pdev->dev;
663 
664 	pm_runtime_enable(dev);
665 
666 	return 0;
667 }
668 
669 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
670 {
671 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
672 	struct device *master = data->master;
673 
674 	if (master) {
675 		struct exynos_iommu_owner *owner = master->archdata.iommu;
676 
677 		mutex_lock(&owner->rpm_lock);
678 		if (data->domain) {
679 			dev_dbg(data->sysmmu, "saving state\n");
680 			__sysmmu_disable(data);
681 		}
682 		mutex_unlock(&owner->rpm_lock);
683 	}
684 	return 0;
685 }
686 
687 static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
688 {
689 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
690 	struct device *master = data->master;
691 
692 	if (master) {
693 		struct exynos_iommu_owner *owner = master->archdata.iommu;
694 
695 		mutex_lock(&owner->rpm_lock);
696 		if (data->domain) {
697 			dev_dbg(data->sysmmu, "restoring state\n");
698 			__sysmmu_enable(data);
699 		}
700 		mutex_unlock(&owner->rpm_lock);
701 	}
702 	return 0;
703 }
704 
705 static const struct dev_pm_ops sysmmu_pm_ops = {
706 	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
707 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
708 				pm_runtime_force_resume)
709 };
710 
711 static const struct of_device_id sysmmu_of_match[] = {
712 	{ .compatible	= "samsung,exynos-sysmmu", },
713 	{ },
714 };
715 
716 static struct platform_driver exynos_sysmmu_driver __refdata = {
717 	.probe	= exynos_sysmmu_probe,
718 	.driver	= {
719 		.name		= "exynos-sysmmu",
720 		.of_match_table	= sysmmu_of_match,
721 		.pm		= &sysmmu_pm_ops,
722 		.suppress_bind_attrs = true,
723 	}
724 };
725 
726 static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
727 {
728 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
729 				DMA_TO_DEVICE);
730 	*ent = cpu_to_le32(val);
731 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
732 				   DMA_TO_DEVICE);
733 }
734 
735 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
736 {
737 	struct exynos_iommu_domain *domain;
738 	dma_addr_t handle;
739 	int i;
740 
741 	/* Check if correct PTE offsets are initialized */
742 	BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
743 
744 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
745 	if (!domain)
746 		return NULL;
747 
748 	if (type == IOMMU_DOMAIN_DMA) {
749 		if (iommu_get_dma_cookie(&domain->domain) != 0)
750 			goto err_pgtable;
751 	} else if (type != IOMMU_DOMAIN_UNMANAGED) {
752 		goto err_pgtable;
753 	}
754 
755 	domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
756 	if (!domain->pgtable)
757 		goto err_dma_cookie;
758 
759 	domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
760 	if (!domain->lv2entcnt)
761 		goto err_counter;
762 
763 	/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
764 	for (i = 0; i < NUM_LV1ENTRIES; i++)
765 		domain->pgtable[i] = ZERO_LV2LINK;
766 
767 	handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
768 				DMA_TO_DEVICE);
769 	/* For mapping page table entries we rely on dma == phys */
770 	BUG_ON(handle != virt_to_phys(domain->pgtable));
771 	if (dma_mapping_error(dma_dev, handle))
772 		goto err_lv2ent;
773 
774 	spin_lock_init(&domain->lock);
775 	spin_lock_init(&domain->pgtablelock);
776 	INIT_LIST_HEAD(&domain->clients);
777 
778 	domain->domain.geometry.aperture_start = 0;
779 	domain->domain.geometry.aperture_end   = ~0UL;
780 	domain->domain.geometry.force_aperture = true;
781 
782 	return &domain->domain;
783 
784 err_lv2ent:
785 	free_pages((unsigned long)domain->lv2entcnt, 1);
786 err_counter:
787 	free_pages((unsigned long)domain->pgtable, 2);
788 err_dma_cookie:
789 	if (type == IOMMU_DOMAIN_DMA)
790 		iommu_put_dma_cookie(&domain->domain);
791 err_pgtable:
792 	kfree(domain);
793 	return NULL;
794 }
795 
796 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
797 {
798 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
799 	struct sysmmu_drvdata *data, *next;
800 	unsigned long flags;
801 	int i;
802 
803 	WARN_ON(!list_empty(&domain->clients));
804 
805 	spin_lock_irqsave(&domain->lock, flags);
806 
807 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
808 		spin_lock(&data->lock);
809 		__sysmmu_disable(data);
810 		data->pgtable = 0;
811 		data->domain = NULL;
812 		list_del_init(&data->domain_node);
813 		spin_unlock(&data->lock);
814 	}
815 
816 	spin_unlock_irqrestore(&domain->lock, flags);
817 
818 	if (iommu_domain->type == IOMMU_DOMAIN_DMA)
819 		iommu_put_dma_cookie(iommu_domain);
820 
821 	dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
822 			 DMA_TO_DEVICE);
823 
824 	for (i = 0; i < NUM_LV1ENTRIES; i++)
825 		if (lv1ent_page(domain->pgtable + i)) {
826 			phys_addr_t base = lv2table_base(domain->pgtable + i);
827 
828 			dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
829 					 DMA_TO_DEVICE);
830 			kmem_cache_free(lv2table_kmem_cache,
831 					phys_to_virt(base));
832 		}
833 
834 	free_pages((unsigned long)domain->pgtable, 2);
835 	free_pages((unsigned long)domain->lv2entcnt, 1);
836 	kfree(domain);
837 }
838 
839 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
840 				    struct device *dev)
841 {
842 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
843 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
844 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
845 	struct sysmmu_drvdata *data, *next;
846 	unsigned long flags;
847 
848 	if (!has_sysmmu(dev) || owner->domain != iommu_domain)
849 		return;
850 
851 	mutex_lock(&owner->rpm_lock);
852 
853 	list_for_each_entry(data, &owner->controllers, owner_node) {
854 		pm_runtime_get_noresume(data->sysmmu);
855 		if (pm_runtime_active(data->sysmmu))
856 			__sysmmu_disable(data);
857 		pm_runtime_put(data->sysmmu);
858 	}
859 
860 	spin_lock_irqsave(&domain->lock, flags);
861 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
862 		spin_lock(&data->lock);
863 		data->pgtable = 0;
864 		data->domain = NULL;
865 		list_del_init(&data->domain_node);
866 		spin_unlock(&data->lock);
867 	}
868 	owner->domain = NULL;
869 	spin_unlock_irqrestore(&domain->lock, flags);
870 
871 	mutex_unlock(&owner->rpm_lock);
872 
873 	dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
874 		&pagetable);
875 }
876 
877 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
878 				   struct device *dev)
879 {
880 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
881 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
882 	struct sysmmu_drvdata *data;
883 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
884 	unsigned long flags;
885 
886 	if (!has_sysmmu(dev))
887 		return -ENODEV;
888 
889 	if (owner->domain)
890 		exynos_iommu_detach_device(owner->domain, dev);
891 
892 	mutex_lock(&owner->rpm_lock);
893 
894 	spin_lock_irqsave(&domain->lock, flags);
895 	list_for_each_entry(data, &owner->controllers, owner_node) {
896 		spin_lock(&data->lock);
897 		data->pgtable = pagetable;
898 		data->domain = domain;
899 		list_add_tail(&data->domain_node, &domain->clients);
900 		spin_unlock(&data->lock);
901 	}
902 	owner->domain = iommu_domain;
903 	spin_unlock_irqrestore(&domain->lock, flags);
904 
905 	list_for_each_entry(data, &owner->controllers, owner_node) {
906 		pm_runtime_get_noresume(data->sysmmu);
907 		if (pm_runtime_active(data->sysmmu))
908 			__sysmmu_enable(data);
909 		pm_runtime_put(data->sysmmu);
910 	}
911 
912 	mutex_unlock(&owner->rpm_lock);
913 
914 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
915 		&pagetable);
916 
917 	return 0;
918 }
919 
920 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
921 		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
922 {
923 	if (lv1ent_section(sent)) {
924 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
925 		return ERR_PTR(-EADDRINUSE);
926 	}
927 
928 	if (lv1ent_fault(sent)) {
929 		dma_addr_t handle;
930 		sysmmu_pte_t *pent;
931 		bool need_flush_flpd_cache = lv1ent_zero(sent);
932 
933 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
934 		BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
935 		if (!pent)
936 			return ERR_PTR(-ENOMEM);
937 
938 		update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
939 		kmemleak_ignore(pent);
940 		*pgcounter = NUM_LV2ENTRIES;
941 		handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
942 					DMA_TO_DEVICE);
943 		if (dma_mapping_error(dma_dev, handle)) {
944 			kmem_cache_free(lv2table_kmem_cache, pent);
945 			return ERR_PTR(-EADDRINUSE);
946 		}
947 
948 		/*
949 		 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
950 		 * FLPD cache may cache the address of zero_l2_table. This
951 		 * function replaces the zero_l2_table with new L2 page table
952 		 * to write valid mappings.
953 		 * Accessing the valid area may cause page fault since FLPD
954 		 * cache may still cache zero_l2_table for the valid area
955 		 * instead of new L2 page table that has the mapping
956 		 * information of the valid area.
957 		 * Thus any replacement of zero_l2_table with other valid L2
958 		 * page table must involve FLPD cache invalidation for System
959 		 * MMU v3.3.
960 		 * FLPD cache invalidation is performed with TLB invalidation
961 		 * by VPN without blocking. It is safe to invalidate TLB without
962 		 * blocking because the target address of TLB invalidation is
963 		 * not currently mapped.
964 		 */
965 		if (need_flush_flpd_cache) {
966 			struct sysmmu_drvdata *data;
967 
968 			spin_lock(&domain->lock);
969 			list_for_each_entry(data, &domain->clients, domain_node)
970 				sysmmu_tlb_invalidate_flpdcache(data, iova);
971 			spin_unlock(&domain->lock);
972 		}
973 	}
974 
975 	return page_entry(sent, iova);
976 }
977 
978 static int lv1set_section(struct exynos_iommu_domain *domain,
979 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
980 			  phys_addr_t paddr, int prot, short *pgcnt)
981 {
982 	if (lv1ent_section(sent)) {
983 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
984 			iova);
985 		return -EADDRINUSE;
986 	}
987 
988 	if (lv1ent_page(sent)) {
989 		if (*pgcnt != NUM_LV2ENTRIES) {
990 			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
991 				iova);
992 			return -EADDRINUSE;
993 		}
994 
995 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
996 		*pgcnt = 0;
997 	}
998 
999 	update_pte(sent, mk_lv1ent_sect(paddr, prot));
1000 
1001 	spin_lock(&domain->lock);
1002 	if (lv1ent_page_zero(sent)) {
1003 		struct sysmmu_drvdata *data;
1004 		/*
1005 		 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1006 		 * entry by speculative prefetch of SLPD which has no mapping.
1007 		 */
1008 		list_for_each_entry(data, &domain->clients, domain_node)
1009 			sysmmu_tlb_invalidate_flpdcache(data, iova);
1010 	}
1011 	spin_unlock(&domain->lock);
1012 
1013 	return 0;
1014 }
1015 
1016 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1017 		       int prot, short *pgcnt)
1018 {
1019 	if (size == SPAGE_SIZE) {
1020 		if (WARN_ON(!lv2ent_fault(pent)))
1021 			return -EADDRINUSE;
1022 
1023 		update_pte(pent, mk_lv2ent_spage(paddr, prot));
1024 		*pgcnt -= 1;
1025 	} else { /* size == LPAGE_SIZE */
1026 		int i;
1027 		dma_addr_t pent_base = virt_to_phys(pent);
1028 
1029 		dma_sync_single_for_cpu(dma_dev, pent_base,
1030 					sizeof(*pent) * SPAGES_PER_LPAGE,
1031 					DMA_TO_DEVICE);
1032 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1033 			if (WARN_ON(!lv2ent_fault(pent))) {
1034 				if (i > 0)
1035 					memset(pent - i, 0, sizeof(*pent) * i);
1036 				return -EADDRINUSE;
1037 			}
1038 
1039 			*pent = mk_lv2ent_lpage(paddr, prot);
1040 		}
1041 		dma_sync_single_for_device(dma_dev, pent_base,
1042 					   sizeof(*pent) * SPAGES_PER_LPAGE,
1043 					   DMA_TO_DEVICE);
1044 		*pgcnt -= SPAGES_PER_LPAGE;
1045 	}
1046 
1047 	return 0;
1048 }
1049 
1050 /*
1051  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1052  *
1053  * System MMU v3.x has advanced logic to improve address translation
1054  * performance with caching more page table entries by a page table walk.
1055  * However, the logic has a bug that while caching faulty page table entries,
1056  * System MMU reports page fault if the cached fault entry is hit even though
1057  * the fault entry is updated to a valid entry after the entry is cached.
1058  * To prevent caching faulty page table entries which may be updated to valid
1059  * entries later, the virtual memory manager should care about the workaround
1060  * for the problem. The following describes the workaround.
1061  *
1062  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1063  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1064  *
1065  * Precisely, any start address of I/O virtual region must be aligned with
1066  * the following sizes for System MMU v3.1 and v3.2.
1067  * System MMU v3.1: 128KiB
1068  * System MMU v3.2: 256KiB
1069  *
1070  * Because System MMU v3.3 caches page table entries more aggressively, it needs
1071  * more workarounds.
1072  * - Any two consecutive I/O virtual regions must have a hole of size larger
1073  *   than or equal to 128KiB.
1074  * - Start address of an I/O virtual region must be aligned by 128KiB.
1075  */
1076 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1077 			    unsigned long l_iova, phys_addr_t paddr, size_t size,
1078 			    int prot)
1079 {
1080 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1081 	sysmmu_pte_t *entry;
1082 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1083 	unsigned long flags;
1084 	int ret = -ENOMEM;
1085 
1086 	BUG_ON(domain->pgtable == NULL);
1087 	prot &= SYSMMU_SUPPORTED_PROT_BITS;
1088 
1089 	spin_lock_irqsave(&domain->pgtablelock, flags);
1090 
1091 	entry = section_entry(domain->pgtable, iova);
1092 
1093 	if (size == SECT_SIZE) {
1094 		ret = lv1set_section(domain, entry, iova, paddr, prot,
1095 				     &domain->lv2entcnt[lv1ent_offset(iova)]);
1096 	} else {
1097 		sysmmu_pte_t *pent;
1098 
1099 		pent = alloc_lv2entry(domain, entry, iova,
1100 				      &domain->lv2entcnt[lv1ent_offset(iova)]);
1101 
1102 		if (IS_ERR(pent))
1103 			ret = PTR_ERR(pent);
1104 		else
1105 			ret = lv2set_page(pent, paddr, size, prot,
1106 				       &domain->lv2entcnt[lv1ent_offset(iova)]);
1107 	}
1108 
1109 	if (ret)
1110 		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1111 			__func__, ret, size, iova);
1112 
1113 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1114 
1115 	return ret;
1116 }
1117 
1118 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1119 					      sysmmu_iova_t iova, size_t size)
1120 {
1121 	struct sysmmu_drvdata *data;
1122 	unsigned long flags;
1123 
1124 	spin_lock_irqsave(&domain->lock, flags);
1125 
1126 	list_for_each_entry(data, &domain->clients, domain_node)
1127 		sysmmu_tlb_invalidate_entry(data, iova, size);
1128 
1129 	spin_unlock_irqrestore(&domain->lock, flags);
1130 }
1131 
1132 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1133 				 unsigned long l_iova, size_t size)
1134 {
1135 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1136 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1137 	sysmmu_pte_t *ent;
1138 	size_t err_pgsize;
1139 	unsigned long flags;
1140 
1141 	BUG_ON(domain->pgtable == NULL);
1142 
1143 	spin_lock_irqsave(&domain->pgtablelock, flags);
1144 
1145 	ent = section_entry(domain->pgtable, iova);
1146 
1147 	if (lv1ent_section(ent)) {
1148 		if (WARN_ON(size < SECT_SIZE)) {
1149 			err_pgsize = SECT_SIZE;
1150 			goto err;
1151 		}
1152 
1153 		/* workaround for h/w bug in System MMU v3.3 */
1154 		update_pte(ent, ZERO_LV2LINK);
1155 		size = SECT_SIZE;
1156 		goto done;
1157 	}
1158 
1159 	if (unlikely(lv1ent_fault(ent))) {
1160 		if (size > SECT_SIZE)
1161 			size = SECT_SIZE;
1162 		goto done;
1163 	}
1164 
1165 	/* lv1ent_page(sent) == true here */
1166 
1167 	ent = page_entry(ent, iova);
1168 
1169 	if (unlikely(lv2ent_fault(ent))) {
1170 		size = SPAGE_SIZE;
1171 		goto done;
1172 	}
1173 
1174 	if (lv2ent_small(ent)) {
1175 		update_pte(ent, 0);
1176 		size = SPAGE_SIZE;
1177 		domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1178 		goto done;
1179 	}
1180 
1181 	/* lv1ent_large(ent) == true here */
1182 	if (WARN_ON(size < LPAGE_SIZE)) {
1183 		err_pgsize = LPAGE_SIZE;
1184 		goto err;
1185 	}
1186 
1187 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1188 				sizeof(*ent) * SPAGES_PER_LPAGE,
1189 				DMA_TO_DEVICE);
1190 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1191 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1192 				   sizeof(*ent) * SPAGES_PER_LPAGE,
1193 				   DMA_TO_DEVICE);
1194 	size = LPAGE_SIZE;
1195 	domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1196 done:
1197 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1198 
1199 	exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1200 
1201 	return size;
1202 err:
1203 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1204 
1205 	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1206 		__func__, size, iova, err_pgsize);
1207 
1208 	return 0;
1209 }
1210 
1211 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1212 					  dma_addr_t iova)
1213 {
1214 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1215 	sysmmu_pte_t *entry;
1216 	unsigned long flags;
1217 	phys_addr_t phys = 0;
1218 
1219 	spin_lock_irqsave(&domain->pgtablelock, flags);
1220 
1221 	entry = section_entry(domain->pgtable, iova);
1222 
1223 	if (lv1ent_section(entry)) {
1224 		phys = section_phys(entry) + section_offs(iova);
1225 	} else if (lv1ent_page(entry)) {
1226 		entry = page_entry(entry, iova);
1227 
1228 		if (lv2ent_large(entry))
1229 			phys = lpage_phys(entry) + lpage_offs(iova);
1230 		else if (lv2ent_small(entry))
1231 			phys = spage_phys(entry) + spage_offs(iova);
1232 	}
1233 
1234 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1235 
1236 	return phys;
1237 }
1238 
1239 static int exynos_iommu_add_device(struct device *dev)
1240 {
1241 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
1242 	struct sysmmu_drvdata *data;
1243 	struct iommu_group *group;
1244 
1245 	if (!has_sysmmu(dev))
1246 		return -ENODEV;
1247 
1248 	group = iommu_group_get_for_dev(dev);
1249 
1250 	if (IS_ERR(group))
1251 		return PTR_ERR(group);
1252 
1253 	list_for_each_entry(data, &owner->controllers, owner_node) {
1254 		/*
1255 		 * SYSMMU will be runtime activated via device link
1256 		 * (dependency) to its master device, so there are no
1257 		 * direct calls to pm_runtime_get/put in this driver.
1258 		 */
1259 		data->link = device_link_add(dev, data->sysmmu,
1260 					     DL_FLAG_STATELESS |
1261 					     DL_FLAG_PM_RUNTIME);
1262 	}
1263 	iommu_group_put(group);
1264 
1265 	return 0;
1266 }
1267 
1268 static void exynos_iommu_remove_device(struct device *dev)
1269 {
1270 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
1271 	struct sysmmu_drvdata *data;
1272 
1273 	if (!has_sysmmu(dev))
1274 		return;
1275 
1276 	if (owner->domain) {
1277 		struct iommu_group *group = iommu_group_get(dev);
1278 
1279 		if (group) {
1280 			WARN_ON(owner->domain !=
1281 				iommu_group_default_domain(group));
1282 			exynos_iommu_detach_device(owner->domain, dev);
1283 			iommu_group_put(group);
1284 		}
1285 	}
1286 	iommu_group_remove_device(dev);
1287 
1288 	list_for_each_entry(data, &owner->controllers, owner_node)
1289 		device_link_del(data->link);
1290 }
1291 
1292 static int exynos_iommu_of_xlate(struct device *dev,
1293 				 struct of_phandle_args *spec)
1294 {
1295 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
1296 	struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1297 	struct sysmmu_drvdata *data, *entry;
1298 
1299 	if (!sysmmu)
1300 		return -ENODEV;
1301 
1302 	data = platform_get_drvdata(sysmmu);
1303 	if (!data)
1304 		return -ENODEV;
1305 
1306 	if (!owner) {
1307 		owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1308 		if (!owner)
1309 			return -ENOMEM;
1310 
1311 		INIT_LIST_HEAD(&owner->controllers);
1312 		mutex_init(&owner->rpm_lock);
1313 		dev->archdata.iommu = owner;
1314 	}
1315 
1316 	list_for_each_entry(entry, &owner->controllers, owner_node)
1317 		if (entry == data)
1318 			return 0;
1319 
1320 	list_add_tail(&data->owner_node, &owner->controllers);
1321 	data->master = dev;
1322 
1323 	return 0;
1324 }
1325 
1326 static const struct iommu_ops exynos_iommu_ops = {
1327 	.domain_alloc = exynos_iommu_domain_alloc,
1328 	.domain_free = exynos_iommu_domain_free,
1329 	.attach_dev = exynos_iommu_attach_device,
1330 	.detach_dev = exynos_iommu_detach_device,
1331 	.map = exynos_iommu_map,
1332 	.unmap = exynos_iommu_unmap,
1333 	.iova_to_phys = exynos_iommu_iova_to_phys,
1334 	.device_group = generic_device_group,
1335 	.add_device = exynos_iommu_add_device,
1336 	.remove_device = exynos_iommu_remove_device,
1337 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1338 	.of_xlate = exynos_iommu_of_xlate,
1339 };
1340 
1341 static int __init exynos_iommu_init(void)
1342 {
1343 	struct device_node *np;
1344 	int ret;
1345 
1346 	np = of_find_matching_node(NULL, sysmmu_of_match);
1347 	if (!np)
1348 		return 0;
1349 
1350 	of_node_put(np);
1351 
1352 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1353 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1354 	if (!lv2table_kmem_cache) {
1355 		pr_err("%s: Failed to create kmem cache\n", __func__);
1356 		return -ENOMEM;
1357 	}
1358 
1359 	ret = platform_driver_register(&exynos_sysmmu_driver);
1360 	if (ret) {
1361 		pr_err("%s: Failed to register driver\n", __func__);
1362 		goto err_reg_driver;
1363 	}
1364 
1365 	zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1366 	if (zero_lv2_table == NULL) {
1367 		pr_err("%s: Failed to allocate zero level2 page table\n",
1368 			__func__);
1369 		ret = -ENOMEM;
1370 		goto err_zero_lv2;
1371 	}
1372 
1373 	ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1374 	if (ret) {
1375 		pr_err("%s: Failed to register exynos-iommu driver.\n",
1376 								__func__);
1377 		goto err_set_iommu;
1378 	}
1379 
1380 	return 0;
1381 err_set_iommu:
1382 	kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1383 err_zero_lv2:
1384 	platform_driver_unregister(&exynos_sysmmu_driver);
1385 err_reg_driver:
1386 	kmem_cache_destroy(lv2table_kmem_cache);
1387 	return ret;
1388 }
1389 core_initcall(exynos_iommu_init);
1390