xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision 7fde9d6e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
4  *		http://www.samsung.com
5  */
6 
7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10 
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/iommu.h>
16 #include <linux/interrupt.h>
17 #include <linux/kmemleak.h>
18 #include <linux/list.h>
19 #include <linux/of.h>
20 #include <linux/of_iommu.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/dma-iommu.h>
26 
27 typedef u32 sysmmu_iova_t;
28 typedef u32 sysmmu_pte_t;
29 
30 /* We do not consider super section mapping (16MB) */
31 #define SECT_ORDER 20
32 #define LPAGE_ORDER 16
33 #define SPAGE_ORDER 12
34 
35 #define SECT_SIZE (1 << SECT_ORDER)
36 #define LPAGE_SIZE (1 << LPAGE_ORDER)
37 #define SPAGE_SIZE (1 << SPAGE_ORDER)
38 
39 #define SECT_MASK (~(SECT_SIZE - 1))
40 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
41 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
42 
43 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
44 			   ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
45 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
46 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
47 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
48 			  ((*(sent) & 3) == 1))
49 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
50 
51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
53 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
54 
55 /*
56  * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
57  * v5.0 introduced support for 36bit physical address space by shifting
58  * all page entry values by 4 bits.
59  * All SYSMMU controllers in the system support the address spaces of the same
60  * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
61  * value (0 or 4).
62  */
63 static short PG_ENT_SHIFT = -1;
64 #define SYSMMU_PG_ENT_SHIFT 0
65 #define SYSMMU_V5_PG_ENT_SHIFT 4
66 
67 static const sysmmu_pte_t *LV1_PROT;
68 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
69 	((0 << 15) | (0 << 10)), /* no access */
70 	((1 << 15) | (1 << 10)), /* IOMMU_READ only */
71 	((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
72 	((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
73 };
74 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
75 	(0 << 4), /* no access */
76 	(1 << 4), /* IOMMU_READ only */
77 	(2 << 4), /* IOMMU_WRITE only */
78 	(3 << 4), /* IOMMU_READ | IOMMU_WRITE */
79 };
80 
81 static const sysmmu_pte_t *LV2_PROT;
82 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
83 	((0 << 9) | (0 << 4)), /* no access */
84 	((1 << 9) | (1 << 4)), /* IOMMU_READ only */
85 	((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
86 	((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
87 };
88 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
89 	(0 << 2), /* no access */
90 	(1 << 2), /* IOMMU_READ only */
91 	(2 << 2), /* IOMMU_WRITE only */
92 	(3 << 2), /* IOMMU_READ | IOMMU_WRITE */
93 };
94 
95 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
96 
97 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
98 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
99 #define section_offs(iova) (iova & (SECT_SIZE - 1))
100 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
101 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
102 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
103 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
104 
105 #define NUM_LV1ENTRIES 4096
106 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
107 
108 static u32 lv1ent_offset(sysmmu_iova_t iova)
109 {
110 	return iova >> SECT_ORDER;
111 }
112 
113 static u32 lv2ent_offset(sysmmu_iova_t iova)
114 {
115 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
116 }
117 
118 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
119 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
120 
121 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
122 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
123 
124 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
125 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
126 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
127 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
128 
129 #define CTRL_ENABLE	0x5
130 #define CTRL_BLOCK	0x7
131 #define CTRL_DISABLE	0x0
132 
133 #define CFG_LRU		0x1
134 #define CFG_EAP		(1 << 2)
135 #define CFG_QOS(n)	((n & 0xF) << 7)
136 #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
137 #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
138 #define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
139 
140 /* common registers */
141 #define REG_MMU_CTRL		0x000
142 #define REG_MMU_CFG		0x004
143 #define REG_MMU_STATUS		0x008
144 #define REG_MMU_VERSION		0x034
145 
146 #define MMU_MAJ_VER(val)	((val) >> 7)
147 #define MMU_MIN_VER(val)	((val) & 0x7F)
148 #define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
149 
150 #define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
151 
152 /* v1.x - v3.x registers */
153 #define REG_MMU_FLUSH		0x00C
154 #define REG_MMU_FLUSH_ENTRY	0x010
155 #define REG_PT_BASE_ADDR	0x014
156 #define REG_INT_STATUS		0x018
157 #define REG_INT_CLEAR		0x01C
158 
159 #define REG_PAGE_FAULT_ADDR	0x024
160 #define REG_AW_FAULT_ADDR	0x028
161 #define REG_AR_FAULT_ADDR	0x02C
162 #define REG_DEFAULT_SLAVE_ADDR	0x030
163 
164 /* v5.x registers */
165 #define REG_V5_PT_BASE_PFN	0x00C
166 #define REG_V5_MMU_FLUSH_ALL	0x010
167 #define REG_V5_MMU_FLUSH_ENTRY	0x014
168 #define REG_V5_MMU_FLUSH_RANGE	0x018
169 #define REG_V5_MMU_FLUSH_START	0x020
170 #define REG_V5_MMU_FLUSH_END	0x024
171 #define REG_V5_INT_STATUS	0x060
172 #define REG_V5_INT_CLEAR	0x064
173 #define REG_V5_FAULT_AR_VA	0x070
174 #define REG_V5_FAULT_AW_VA	0x080
175 
176 #define has_sysmmu(dev)		(dev_iommu_priv_get(dev) != NULL)
177 
178 static struct device *dma_dev;
179 static struct kmem_cache *lv2table_kmem_cache;
180 static sysmmu_pte_t *zero_lv2_table;
181 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
182 
183 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
184 {
185 	return pgtable + lv1ent_offset(iova);
186 }
187 
188 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
189 {
190 	return (sysmmu_pte_t *)phys_to_virt(
191 				lv2table_base(sent)) + lv2ent_offset(iova);
192 }
193 
194 /*
195  * IOMMU fault information register
196  */
197 struct sysmmu_fault_info {
198 	unsigned int bit;	/* bit number in STATUS register */
199 	unsigned short addr_reg; /* register to read VA fault address */
200 	const char *name;	/* human readable fault name */
201 	unsigned int type;	/* fault type for report_iommu_fault */
202 };
203 
204 static const struct sysmmu_fault_info sysmmu_faults[] = {
205 	{ 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
206 	{ 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
207 	{ 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
208 	{ 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
209 	{ 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
210 	{ 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
211 	{ 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
212 	{ 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
213 };
214 
215 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
216 	{ 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
217 	{ 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
218 	{ 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
219 	{ 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
220 	{ 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
221 	{ 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
222 	{ 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
223 	{ 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
224 	{ 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
225 	{ 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
226 };
227 
228 /*
229  * This structure is attached to dev->iommu->priv of the master device
230  * on device add, contains a list of SYSMMU controllers defined by device tree,
231  * which are bound to given master device. It is usually referenced by 'owner'
232  * pointer.
233 */
234 struct exynos_iommu_owner {
235 	struct list_head controllers;	/* list of sysmmu_drvdata.owner_node */
236 	struct iommu_domain *domain;	/* domain this device is attached */
237 	struct mutex rpm_lock;		/* for runtime pm of all sysmmus */
238 };
239 
240 /*
241  * This structure exynos specific generalization of struct iommu_domain.
242  * It contains list of SYSMMU controllers from all master devices, which has
243  * been attached to this domain and page tables of IO address space defined by
244  * it. It is usually referenced by 'domain' pointer.
245  */
246 struct exynos_iommu_domain {
247 	struct list_head clients; /* list of sysmmu_drvdata.domain_node */
248 	sysmmu_pte_t *pgtable;	/* lv1 page table, 16KB */
249 	short *lv2entcnt;	/* free lv2 entry counter for each section */
250 	spinlock_t lock;	/* lock for modyfying list of clients */
251 	spinlock_t pgtablelock;	/* lock for modifying page table @ pgtable */
252 	struct iommu_domain domain; /* generic domain data structure */
253 };
254 
255 /*
256  * This structure hold all data of a single SYSMMU controller, this includes
257  * hw resources like registers and clocks, pointers and list nodes to connect
258  * it to all other structures, internal state and parameters read from device
259  * tree. It is usually referenced by 'data' pointer.
260  */
261 struct sysmmu_drvdata {
262 	struct device *sysmmu;		/* SYSMMU controller device */
263 	struct device *master;		/* master device (owner) */
264 	struct device_link *link;	/* runtime PM link to master */
265 	void __iomem *sfrbase;		/* our registers */
266 	struct clk *clk;		/* SYSMMU's clock */
267 	struct clk *aclk;		/* SYSMMU's aclk clock */
268 	struct clk *pclk;		/* SYSMMU's pclk clock */
269 	struct clk *clk_master;		/* master's device clock */
270 	spinlock_t lock;		/* lock for modyfying state */
271 	bool active;			/* current status */
272 	struct exynos_iommu_domain *domain; /* domain we belong to */
273 	struct list_head domain_node;	/* node for domain clients list */
274 	struct list_head owner_node;	/* node for owner controllers list */
275 	phys_addr_t pgtable;		/* assigned page table structure */
276 	unsigned int version;		/* our version */
277 
278 	struct iommu_device iommu;	/* IOMMU core handle */
279 };
280 
281 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
282 {
283 	return container_of(dom, struct exynos_iommu_domain, domain);
284 }
285 
286 static void sysmmu_unblock(struct sysmmu_drvdata *data)
287 {
288 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
289 }
290 
291 static bool sysmmu_block(struct sysmmu_drvdata *data)
292 {
293 	int i = 120;
294 
295 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
296 	while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
297 		--i;
298 
299 	if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
300 		sysmmu_unblock(data);
301 		return false;
302 	}
303 
304 	return true;
305 }
306 
307 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
308 {
309 	if (MMU_MAJ_VER(data->version) < 5)
310 		writel(0x1, data->sfrbase + REG_MMU_FLUSH);
311 	else
312 		writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
313 }
314 
315 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
316 				sysmmu_iova_t iova, unsigned int num_inv)
317 {
318 	unsigned int i;
319 
320 	if (MMU_MAJ_VER(data->version) < 5) {
321 		for (i = 0; i < num_inv; i++) {
322 			writel((iova & SPAGE_MASK) | 1,
323 				     data->sfrbase + REG_MMU_FLUSH_ENTRY);
324 			iova += SPAGE_SIZE;
325 		}
326 	} else {
327 		if (num_inv == 1) {
328 			writel((iova & SPAGE_MASK) | 1,
329 				     data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
330 		} else {
331 			writel((iova & SPAGE_MASK),
332 				     data->sfrbase + REG_V5_MMU_FLUSH_START);
333 			writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
334 				     data->sfrbase + REG_V5_MMU_FLUSH_END);
335 			writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
336 		}
337 	}
338 }
339 
340 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
341 {
342 	if (MMU_MAJ_VER(data->version) < 5)
343 		writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
344 	else
345 		writel(pgd >> PAGE_SHIFT,
346 			     data->sfrbase + REG_V5_PT_BASE_PFN);
347 
348 	__sysmmu_tlb_invalidate(data);
349 }
350 
351 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
352 {
353 	BUG_ON(clk_prepare_enable(data->clk_master));
354 	BUG_ON(clk_prepare_enable(data->clk));
355 	BUG_ON(clk_prepare_enable(data->pclk));
356 	BUG_ON(clk_prepare_enable(data->aclk));
357 }
358 
359 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
360 {
361 	clk_disable_unprepare(data->aclk);
362 	clk_disable_unprepare(data->pclk);
363 	clk_disable_unprepare(data->clk);
364 	clk_disable_unprepare(data->clk_master);
365 }
366 
367 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
368 {
369 	u32 ver;
370 
371 	__sysmmu_enable_clocks(data);
372 
373 	ver = readl(data->sfrbase + REG_MMU_VERSION);
374 
375 	/* controllers on some SoCs don't report proper version */
376 	if (ver == 0x80000001u)
377 		data->version = MAKE_MMU_VER(1, 0);
378 	else
379 		data->version = MMU_RAW_VER(ver);
380 
381 	dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
382 		MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
383 
384 	__sysmmu_disable_clocks(data);
385 }
386 
387 static void show_fault_information(struct sysmmu_drvdata *data,
388 				   const struct sysmmu_fault_info *finfo,
389 				   sysmmu_iova_t fault_addr)
390 {
391 	sysmmu_pte_t *ent;
392 
393 	dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
394 		dev_name(data->master), finfo->name, fault_addr);
395 	dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
396 	ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
397 	dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
398 	if (lv1ent_page(ent)) {
399 		ent = page_entry(ent, fault_addr);
400 		dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
401 	}
402 }
403 
404 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
405 {
406 	/* SYSMMU is in blocked state when interrupt occurred. */
407 	struct sysmmu_drvdata *data = dev_id;
408 	const struct sysmmu_fault_info *finfo;
409 	unsigned int i, n, itype;
410 	sysmmu_iova_t fault_addr;
411 	unsigned short reg_status, reg_clear;
412 	int ret = -ENOSYS;
413 
414 	WARN_ON(!data->active);
415 
416 	if (MMU_MAJ_VER(data->version) < 5) {
417 		reg_status = REG_INT_STATUS;
418 		reg_clear = REG_INT_CLEAR;
419 		finfo = sysmmu_faults;
420 		n = ARRAY_SIZE(sysmmu_faults);
421 	} else {
422 		reg_status = REG_V5_INT_STATUS;
423 		reg_clear = REG_V5_INT_CLEAR;
424 		finfo = sysmmu_v5_faults;
425 		n = ARRAY_SIZE(sysmmu_v5_faults);
426 	}
427 
428 	spin_lock(&data->lock);
429 
430 	clk_enable(data->clk_master);
431 
432 	itype = __ffs(readl(data->sfrbase + reg_status));
433 	for (i = 0; i < n; i++, finfo++)
434 		if (finfo->bit == itype)
435 			break;
436 	/* unknown/unsupported fault */
437 	BUG_ON(i == n);
438 
439 	/* print debug message */
440 	fault_addr = readl(data->sfrbase + finfo->addr_reg);
441 	show_fault_information(data, finfo, fault_addr);
442 
443 	if (data->domain)
444 		ret = report_iommu_fault(&data->domain->domain,
445 					data->master, fault_addr, finfo->type);
446 	/* fault is not recovered by fault handler */
447 	BUG_ON(ret != 0);
448 
449 	writel(1 << itype, data->sfrbase + reg_clear);
450 
451 	sysmmu_unblock(data);
452 
453 	clk_disable(data->clk_master);
454 
455 	spin_unlock(&data->lock);
456 
457 	return IRQ_HANDLED;
458 }
459 
460 static void __sysmmu_disable(struct sysmmu_drvdata *data)
461 {
462 	unsigned long flags;
463 
464 	clk_enable(data->clk_master);
465 
466 	spin_lock_irqsave(&data->lock, flags);
467 	writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
468 	writel(0, data->sfrbase + REG_MMU_CFG);
469 	data->active = false;
470 	spin_unlock_irqrestore(&data->lock, flags);
471 
472 	__sysmmu_disable_clocks(data);
473 }
474 
475 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
476 {
477 	unsigned int cfg;
478 
479 	if (data->version <= MAKE_MMU_VER(3, 1))
480 		cfg = CFG_LRU | CFG_QOS(15);
481 	else if (data->version <= MAKE_MMU_VER(3, 2))
482 		cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
483 	else
484 		cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
485 
486 	cfg |= CFG_EAP; /* enable access protection bits check */
487 
488 	writel(cfg, data->sfrbase + REG_MMU_CFG);
489 }
490 
491 static void __sysmmu_enable(struct sysmmu_drvdata *data)
492 {
493 	unsigned long flags;
494 
495 	__sysmmu_enable_clocks(data);
496 
497 	spin_lock_irqsave(&data->lock, flags);
498 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
499 	__sysmmu_init_config(data);
500 	__sysmmu_set_ptbase(data, data->pgtable);
501 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
502 	data->active = true;
503 	spin_unlock_irqrestore(&data->lock, flags);
504 
505 	/*
506 	 * SYSMMU driver keeps master's clock enabled only for the short
507 	 * time, while accessing the registers. For performing address
508 	 * translation during DMA transaction it relies on the client
509 	 * driver to enable it.
510 	 */
511 	clk_disable(data->clk_master);
512 }
513 
514 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
515 					    sysmmu_iova_t iova)
516 {
517 	unsigned long flags;
518 
519 	spin_lock_irqsave(&data->lock, flags);
520 	if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
521 		clk_enable(data->clk_master);
522 		if (sysmmu_block(data)) {
523 			if (data->version >= MAKE_MMU_VER(5, 0))
524 				__sysmmu_tlb_invalidate(data);
525 			else
526 				__sysmmu_tlb_invalidate_entry(data, iova, 1);
527 			sysmmu_unblock(data);
528 		}
529 		clk_disable(data->clk_master);
530 	}
531 	spin_unlock_irqrestore(&data->lock, flags);
532 }
533 
534 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
535 					sysmmu_iova_t iova, size_t size)
536 {
537 	unsigned long flags;
538 
539 	spin_lock_irqsave(&data->lock, flags);
540 	if (data->active) {
541 		unsigned int num_inv = 1;
542 
543 		clk_enable(data->clk_master);
544 
545 		/*
546 		 * L2TLB invalidation required
547 		 * 4KB page: 1 invalidation
548 		 * 64KB page: 16 invalidations
549 		 * 1MB page: 64 invalidations
550 		 * because it is set-associative TLB
551 		 * with 8-way and 64 sets.
552 		 * 1MB page can be cached in one of all sets.
553 		 * 64KB page can be one of 16 consecutive sets.
554 		 */
555 		if (MMU_MAJ_VER(data->version) == 2)
556 			num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
557 
558 		if (sysmmu_block(data)) {
559 			__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
560 			sysmmu_unblock(data);
561 		}
562 		clk_disable(data->clk_master);
563 	}
564 	spin_unlock_irqrestore(&data->lock, flags);
565 }
566 
567 static const struct iommu_ops exynos_iommu_ops;
568 
569 static int exynos_sysmmu_probe(struct platform_device *pdev)
570 {
571 	int irq, ret;
572 	struct device *dev = &pdev->dev;
573 	struct sysmmu_drvdata *data;
574 	struct resource *res;
575 
576 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
577 	if (!data)
578 		return -ENOMEM;
579 
580 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
581 	data->sfrbase = devm_ioremap_resource(dev, res);
582 	if (IS_ERR(data->sfrbase))
583 		return PTR_ERR(data->sfrbase);
584 
585 	irq = platform_get_irq(pdev, 0);
586 	if (irq <= 0)
587 		return irq;
588 
589 	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
590 				dev_name(dev), data);
591 	if (ret) {
592 		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
593 		return ret;
594 	}
595 
596 	data->clk = devm_clk_get(dev, "sysmmu");
597 	if (PTR_ERR(data->clk) == -ENOENT)
598 		data->clk = NULL;
599 	else if (IS_ERR(data->clk))
600 		return PTR_ERR(data->clk);
601 
602 	data->aclk = devm_clk_get(dev, "aclk");
603 	if (PTR_ERR(data->aclk) == -ENOENT)
604 		data->aclk = NULL;
605 	else if (IS_ERR(data->aclk))
606 		return PTR_ERR(data->aclk);
607 
608 	data->pclk = devm_clk_get(dev, "pclk");
609 	if (PTR_ERR(data->pclk) == -ENOENT)
610 		data->pclk = NULL;
611 	else if (IS_ERR(data->pclk))
612 		return PTR_ERR(data->pclk);
613 
614 	if (!data->clk && (!data->aclk || !data->pclk)) {
615 		dev_err(dev, "Failed to get device clock(s)!\n");
616 		return -ENOSYS;
617 	}
618 
619 	data->clk_master = devm_clk_get(dev, "master");
620 	if (PTR_ERR(data->clk_master) == -ENOENT)
621 		data->clk_master = NULL;
622 	else if (IS_ERR(data->clk_master))
623 		return PTR_ERR(data->clk_master);
624 
625 	data->sysmmu = dev;
626 	spin_lock_init(&data->lock);
627 
628 	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
629 				     dev_name(data->sysmmu));
630 	if (ret)
631 		return ret;
632 
633 	ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
634 	if (ret)
635 		return ret;
636 
637 	platform_set_drvdata(pdev, data);
638 
639 	__sysmmu_get_version(data);
640 	if (PG_ENT_SHIFT < 0) {
641 		if (MMU_MAJ_VER(data->version) < 5) {
642 			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
643 			LV1_PROT = SYSMMU_LV1_PROT;
644 			LV2_PROT = SYSMMU_LV2_PROT;
645 		} else {
646 			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
647 			LV1_PROT = SYSMMU_V5_LV1_PROT;
648 			LV2_PROT = SYSMMU_V5_LV2_PROT;
649 		}
650 	}
651 
652 	/*
653 	 * use the first registered sysmmu device for performing
654 	 * dma mapping operations on iommu page tables (cpu cache flush)
655 	 */
656 	if (!dma_dev)
657 		dma_dev = &pdev->dev;
658 
659 	pm_runtime_enable(dev);
660 
661 	return 0;
662 }
663 
664 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
665 {
666 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
667 	struct device *master = data->master;
668 
669 	if (master) {
670 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
671 
672 		mutex_lock(&owner->rpm_lock);
673 		if (data->domain) {
674 			dev_dbg(data->sysmmu, "saving state\n");
675 			__sysmmu_disable(data);
676 		}
677 		mutex_unlock(&owner->rpm_lock);
678 	}
679 	return 0;
680 }
681 
682 static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
683 {
684 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
685 	struct device *master = data->master;
686 
687 	if (master) {
688 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
689 
690 		mutex_lock(&owner->rpm_lock);
691 		if (data->domain) {
692 			dev_dbg(data->sysmmu, "restoring state\n");
693 			__sysmmu_enable(data);
694 		}
695 		mutex_unlock(&owner->rpm_lock);
696 	}
697 	return 0;
698 }
699 
700 static const struct dev_pm_ops sysmmu_pm_ops = {
701 	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
702 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
703 				pm_runtime_force_resume)
704 };
705 
706 static const struct of_device_id sysmmu_of_match[] = {
707 	{ .compatible	= "samsung,exynos-sysmmu", },
708 	{ },
709 };
710 
711 static struct platform_driver exynos_sysmmu_driver __refdata = {
712 	.probe	= exynos_sysmmu_probe,
713 	.driver	= {
714 		.name		= "exynos-sysmmu",
715 		.of_match_table	= sysmmu_of_match,
716 		.pm		= &sysmmu_pm_ops,
717 		.suppress_bind_attrs = true,
718 	}
719 };
720 
721 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
722 {
723 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
724 				DMA_TO_DEVICE);
725 	*ent = cpu_to_le32(val);
726 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
727 				   DMA_TO_DEVICE);
728 }
729 
730 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
731 {
732 	struct exynos_iommu_domain *domain;
733 	dma_addr_t handle;
734 	int i;
735 
736 	/* Check if correct PTE offsets are initialized */
737 	BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
738 
739 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
740 	if (!domain)
741 		return NULL;
742 
743 	if (type == IOMMU_DOMAIN_DMA) {
744 		if (iommu_get_dma_cookie(&domain->domain) != 0)
745 			goto err_pgtable;
746 	} else if (type != IOMMU_DOMAIN_UNMANAGED) {
747 		goto err_pgtable;
748 	}
749 
750 	domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
751 	if (!domain->pgtable)
752 		goto err_dma_cookie;
753 
754 	domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
755 	if (!domain->lv2entcnt)
756 		goto err_counter;
757 
758 	/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
759 	for (i = 0; i < NUM_LV1ENTRIES; i++)
760 		domain->pgtable[i] = ZERO_LV2LINK;
761 
762 	handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
763 				DMA_TO_DEVICE);
764 	/* For mapping page table entries we rely on dma == phys */
765 	BUG_ON(handle != virt_to_phys(domain->pgtable));
766 	if (dma_mapping_error(dma_dev, handle))
767 		goto err_lv2ent;
768 
769 	spin_lock_init(&domain->lock);
770 	spin_lock_init(&domain->pgtablelock);
771 	INIT_LIST_HEAD(&domain->clients);
772 
773 	domain->domain.geometry.aperture_start = 0;
774 	domain->domain.geometry.aperture_end   = ~0UL;
775 	domain->domain.geometry.force_aperture = true;
776 
777 	return &domain->domain;
778 
779 err_lv2ent:
780 	free_pages((unsigned long)domain->lv2entcnt, 1);
781 err_counter:
782 	free_pages((unsigned long)domain->pgtable, 2);
783 err_dma_cookie:
784 	if (type == IOMMU_DOMAIN_DMA)
785 		iommu_put_dma_cookie(&domain->domain);
786 err_pgtable:
787 	kfree(domain);
788 	return NULL;
789 }
790 
791 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
792 {
793 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
794 	struct sysmmu_drvdata *data, *next;
795 	unsigned long flags;
796 	int i;
797 
798 	WARN_ON(!list_empty(&domain->clients));
799 
800 	spin_lock_irqsave(&domain->lock, flags);
801 
802 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
803 		spin_lock(&data->lock);
804 		__sysmmu_disable(data);
805 		data->pgtable = 0;
806 		data->domain = NULL;
807 		list_del_init(&data->domain_node);
808 		spin_unlock(&data->lock);
809 	}
810 
811 	spin_unlock_irqrestore(&domain->lock, flags);
812 
813 	if (iommu_domain->type == IOMMU_DOMAIN_DMA)
814 		iommu_put_dma_cookie(iommu_domain);
815 
816 	dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
817 			 DMA_TO_DEVICE);
818 
819 	for (i = 0; i < NUM_LV1ENTRIES; i++)
820 		if (lv1ent_page(domain->pgtable + i)) {
821 			phys_addr_t base = lv2table_base(domain->pgtable + i);
822 
823 			dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
824 					 DMA_TO_DEVICE);
825 			kmem_cache_free(lv2table_kmem_cache,
826 					phys_to_virt(base));
827 		}
828 
829 	free_pages((unsigned long)domain->pgtable, 2);
830 	free_pages((unsigned long)domain->lv2entcnt, 1);
831 	kfree(domain);
832 }
833 
834 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
835 				    struct device *dev)
836 {
837 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
838 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
839 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
840 	struct sysmmu_drvdata *data, *next;
841 	unsigned long flags;
842 
843 	if (!has_sysmmu(dev) || owner->domain != iommu_domain)
844 		return;
845 
846 	mutex_lock(&owner->rpm_lock);
847 
848 	list_for_each_entry(data, &owner->controllers, owner_node) {
849 		pm_runtime_get_noresume(data->sysmmu);
850 		if (pm_runtime_active(data->sysmmu))
851 			__sysmmu_disable(data);
852 		pm_runtime_put(data->sysmmu);
853 	}
854 
855 	spin_lock_irqsave(&domain->lock, flags);
856 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
857 		spin_lock(&data->lock);
858 		data->pgtable = 0;
859 		data->domain = NULL;
860 		list_del_init(&data->domain_node);
861 		spin_unlock(&data->lock);
862 	}
863 	owner->domain = NULL;
864 	spin_unlock_irqrestore(&domain->lock, flags);
865 
866 	mutex_unlock(&owner->rpm_lock);
867 
868 	dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
869 		&pagetable);
870 }
871 
872 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
873 				   struct device *dev)
874 {
875 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
876 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
877 	struct sysmmu_drvdata *data;
878 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
879 	unsigned long flags;
880 
881 	if (!has_sysmmu(dev))
882 		return -ENODEV;
883 
884 	if (owner->domain)
885 		exynos_iommu_detach_device(owner->domain, dev);
886 
887 	mutex_lock(&owner->rpm_lock);
888 
889 	spin_lock_irqsave(&domain->lock, flags);
890 	list_for_each_entry(data, &owner->controllers, owner_node) {
891 		spin_lock(&data->lock);
892 		data->pgtable = pagetable;
893 		data->domain = domain;
894 		list_add_tail(&data->domain_node, &domain->clients);
895 		spin_unlock(&data->lock);
896 	}
897 	owner->domain = iommu_domain;
898 	spin_unlock_irqrestore(&domain->lock, flags);
899 
900 	list_for_each_entry(data, &owner->controllers, owner_node) {
901 		pm_runtime_get_noresume(data->sysmmu);
902 		if (pm_runtime_active(data->sysmmu))
903 			__sysmmu_enable(data);
904 		pm_runtime_put(data->sysmmu);
905 	}
906 
907 	mutex_unlock(&owner->rpm_lock);
908 
909 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
910 		&pagetable);
911 
912 	return 0;
913 }
914 
915 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
916 		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
917 {
918 	if (lv1ent_section(sent)) {
919 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
920 		return ERR_PTR(-EADDRINUSE);
921 	}
922 
923 	if (lv1ent_fault(sent)) {
924 		dma_addr_t handle;
925 		sysmmu_pte_t *pent;
926 		bool need_flush_flpd_cache = lv1ent_zero(sent);
927 
928 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
929 		BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
930 		if (!pent)
931 			return ERR_PTR(-ENOMEM);
932 
933 		exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
934 		kmemleak_ignore(pent);
935 		*pgcounter = NUM_LV2ENTRIES;
936 		handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
937 					DMA_TO_DEVICE);
938 		if (dma_mapping_error(dma_dev, handle)) {
939 			kmem_cache_free(lv2table_kmem_cache, pent);
940 			return ERR_PTR(-EADDRINUSE);
941 		}
942 
943 		/*
944 		 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
945 		 * FLPD cache may cache the address of zero_l2_table. This
946 		 * function replaces the zero_l2_table with new L2 page table
947 		 * to write valid mappings.
948 		 * Accessing the valid area may cause page fault since FLPD
949 		 * cache may still cache zero_l2_table for the valid area
950 		 * instead of new L2 page table that has the mapping
951 		 * information of the valid area.
952 		 * Thus any replacement of zero_l2_table with other valid L2
953 		 * page table must involve FLPD cache invalidation for System
954 		 * MMU v3.3.
955 		 * FLPD cache invalidation is performed with TLB invalidation
956 		 * by VPN without blocking. It is safe to invalidate TLB without
957 		 * blocking because the target address of TLB invalidation is
958 		 * not currently mapped.
959 		 */
960 		if (need_flush_flpd_cache) {
961 			struct sysmmu_drvdata *data;
962 
963 			spin_lock(&domain->lock);
964 			list_for_each_entry(data, &domain->clients, domain_node)
965 				sysmmu_tlb_invalidate_flpdcache(data, iova);
966 			spin_unlock(&domain->lock);
967 		}
968 	}
969 
970 	return page_entry(sent, iova);
971 }
972 
973 static int lv1set_section(struct exynos_iommu_domain *domain,
974 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
975 			  phys_addr_t paddr, int prot, short *pgcnt)
976 {
977 	if (lv1ent_section(sent)) {
978 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
979 			iova);
980 		return -EADDRINUSE;
981 	}
982 
983 	if (lv1ent_page(sent)) {
984 		if (*pgcnt != NUM_LV2ENTRIES) {
985 			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
986 				iova);
987 			return -EADDRINUSE;
988 		}
989 
990 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
991 		*pgcnt = 0;
992 	}
993 
994 	exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
995 
996 	spin_lock(&domain->lock);
997 	if (lv1ent_page_zero(sent)) {
998 		struct sysmmu_drvdata *data;
999 		/*
1000 		 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1001 		 * entry by speculative prefetch of SLPD which has no mapping.
1002 		 */
1003 		list_for_each_entry(data, &domain->clients, domain_node)
1004 			sysmmu_tlb_invalidate_flpdcache(data, iova);
1005 	}
1006 	spin_unlock(&domain->lock);
1007 
1008 	return 0;
1009 }
1010 
1011 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1012 		       int prot, short *pgcnt)
1013 {
1014 	if (size == SPAGE_SIZE) {
1015 		if (WARN_ON(!lv2ent_fault(pent)))
1016 			return -EADDRINUSE;
1017 
1018 		exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
1019 		*pgcnt -= 1;
1020 	} else { /* size == LPAGE_SIZE */
1021 		int i;
1022 		dma_addr_t pent_base = virt_to_phys(pent);
1023 
1024 		dma_sync_single_for_cpu(dma_dev, pent_base,
1025 					sizeof(*pent) * SPAGES_PER_LPAGE,
1026 					DMA_TO_DEVICE);
1027 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1028 			if (WARN_ON(!lv2ent_fault(pent))) {
1029 				if (i > 0)
1030 					memset(pent - i, 0, sizeof(*pent) * i);
1031 				return -EADDRINUSE;
1032 			}
1033 
1034 			*pent = mk_lv2ent_lpage(paddr, prot);
1035 		}
1036 		dma_sync_single_for_device(dma_dev, pent_base,
1037 					   sizeof(*pent) * SPAGES_PER_LPAGE,
1038 					   DMA_TO_DEVICE);
1039 		*pgcnt -= SPAGES_PER_LPAGE;
1040 	}
1041 
1042 	return 0;
1043 }
1044 
1045 /*
1046  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1047  *
1048  * System MMU v3.x has advanced logic to improve address translation
1049  * performance with caching more page table entries by a page table walk.
1050  * However, the logic has a bug that while caching faulty page table entries,
1051  * System MMU reports page fault if the cached fault entry is hit even though
1052  * the fault entry is updated to a valid entry after the entry is cached.
1053  * To prevent caching faulty page table entries which may be updated to valid
1054  * entries later, the virtual memory manager should care about the workaround
1055  * for the problem. The following describes the workaround.
1056  *
1057  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1058  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1059  *
1060  * Precisely, any start address of I/O virtual region must be aligned with
1061  * the following sizes for System MMU v3.1 and v3.2.
1062  * System MMU v3.1: 128KiB
1063  * System MMU v3.2: 256KiB
1064  *
1065  * Because System MMU v3.3 caches page table entries more aggressively, it needs
1066  * more workarounds.
1067  * - Any two consecutive I/O virtual regions must have a hole of size larger
1068  *   than or equal to 128KiB.
1069  * - Start address of an I/O virtual region must be aligned by 128KiB.
1070  */
1071 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1072 			    unsigned long l_iova, phys_addr_t paddr, size_t size,
1073 			    int prot, gfp_t gfp)
1074 {
1075 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1076 	sysmmu_pte_t *entry;
1077 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1078 	unsigned long flags;
1079 	int ret = -ENOMEM;
1080 
1081 	BUG_ON(domain->pgtable == NULL);
1082 	prot &= SYSMMU_SUPPORTED_PROT_BITS;
1083 
1084 	spin_lock_irqsave(&domain->pgtablelock, flags);
1085 
1086 	entry = section_entry(domain->pgtable, iova);
1087 
1088 	if (size == SECT_SIZE) {
1089 		ret = lv1set_section(domain, entry, iova, paddr, prot,
1090 				     &domain->lv2entcnt[lv1ent_offset(iova)]);
1091 	} else {
1092 		sysmmu_pte_t *pent;
1093 
1094 		pent = alloc_lv2entry(domain, entry, iova,
1095 				      &domain->lv2entcnt[lv1ent_offset(iova)]);
1096 
1097 		if (IS_ERR(pent))
1098 			ret = PTR_ERR(pent);
1099 		else
1100 			ret = lv2set_page(pent, paddr, size, prot,
1101 				       &domain->lv2entcnt[lv1ent_offset(iova)]);
1102 	}
1103 
1104 	if (ret)
1105 		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1106 			__func__, ret, size, iova);
1107 
1108 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1109 
1110 	return ret;
1111 }
1112 
1113 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1114 					      sysmmu_iova_t iova, size_t size)
1115 {
1116 	struct sysmmu_drvdata *data;
1117 	unsigned long flags;
1118 
1119 	spin_lock_irqsave(&domain->lock, flags);
1120 
1121 	list_for_each_entry(data, &domain->clients, domain_node)
1122 		sysmmu_tlb_invalidate_entry(data, iova, size);
1123 
1124 	spin_unlock_irqrestore(&domain->lock, flags);
1125 }
1126 
1127 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1128 				 unsigned long l_iova, size_t size,
1129 				 struct iommu_iotlb_gather *gather)
1130 {
1131 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1132 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1133 	sysmmu_pte_t *ent;
1134 	size_t err_pgsize;
1135 	unsigned long flags;
1136 
1137 	BUG_ON(domain->pgtable == NULL);
1138 
1139 	spin_lock_irqsave(&domain->pgtablelock, flags);
1140 
1141 	ent = section_entry(domain->pgtable, iova);
1142 
1143 	if (lv1ent_section(ent)) {
1144 		if (WARN_ON(size < SECT_SIZE)) {
1145 			err_pgsize = SECT_SIZE;
1146 			goto err;
1147 		}
1148 
1149 		/* workaround for h/w bug in System MMU v3.3 */
1150 		exynos_iommu_set_pte(ent, ZERO_LV2LINK);
1151 		size = SECT_SIZE;
1152 		goto done;
1153 	}
1154 
1155 	if (unlikely(lv1ent_fault(ent))) {
1156 		if (size > SECT_SIZE)
1157 			size = SECT_SIZE;
1158 		goto done;
1159 	}
1160 
1161 	/* lv1ent_page(sent) == true here */
1162 
1163 	ent = page_entry(ent, iova);
1164 
1165 	if (unlikely(lv2ent_fault(ent))) {
1166 		size = SPAGE_SIZE;
1167 		goto done;
1168 	}
1169 
1170 	if (lv2ent_small(ent)) {
1171 		exynos_iommu_set_pte(ent, 0);
1172 		size = SPAGE_SIZE;
1173 		domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1174 		goto done;
1175 	}
1176 
1177 	/* lv1ent_large(ent) == true here */
1178 	if (WARN_ON(size < LPAGE_SIZE)) {
1179 		err_pgsize = LPAGE_SIZE;
1180 		goto err;
1181 	}
1182 
1183 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1184 				sizeof(*ent) * SPAGES_PER_LPAGE,
1185 				DMA_TO_DEVICE);
1186 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1187 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1188 				   sizeof(*ent) * SPAGES_PER_LPAGE,
1189 				   DMA_TO_DEVICE);
1190 	size = LPAGE_SIZE;
1191 	domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1192 done:
1193 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1194 
1195 	exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1196 
1197 	return size;
1198 err:
1199 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1200 
1201 	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1202 		__func__, size, iova, err_pgsize);
1203 
1204 	return 0;
1205 }
1206 
1207 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1208 					  dma_addr_t iova)
1209 {
1210 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1211 	sysmmu_pte_t *entry;
1212 	unsigned long flags;
1213 	phys_addr_t phys = 0;
1214 
1215 	spin_lock_irqsave(&domain->pgtablelock, flags);
1216 
1217 	entry = section_entry(domain->pgtable, iova);
1218 
1219 	if (lv1ent_section(entry)) {
1220 		phys = section_phys(entry) + section_offs(iova);
1221 	} else if (lv1ent_page(entry)) {
1222 		entry = page_entry(entry, iova);
1223 
1224 		if (lv2ent_large(entry))
1225 			phys = lpage_phys(entry) + lpage_offs(iova);
1226 		else if (lv2ent_small(entry))
1227 			phys = spage_phys(entry) + spage_offs(iova);
1228 	}
1229 
1230 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1231 
1232 	return phys;
1233 }
1234 
1235 static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1236 {
1237 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1238 	struct sysmmu_drvdata *data;
1239 
1240 	if (!has_sysmmu(dev))
1241 		return ERR_PTR(-ENODEV);
1242 
1243 	list_for_each_entry(data, &owner->controllers, owner_node) {
1244 		/*
1245 		 * SYSMMU will be runtime activated via device link
1246 		 * (dependency) to its master device, so there are no
1247 		 * direct calls to pm_runtime_get/put in this driver.
1248 		 */
1249 		data->link = device_link_add(dev, data->sysmmu,
1250 					     DL_FLAG_STATELESS |
1251 					     DL_FLAG_PM_RUNTIME);
1252 	}
1253 
1254 	/* There is always at least one entry, see exynos_iommu_of_xlate() */
1255 	data = list_first_entry(&owner->controllers,
1256 				struct sysmmu_drvdata, owner_node);
1257 
1258 	return &data->iommu;
1259 }
1260 
1261 static void exynos_iommu_release_device(struct device *dev)
1262 {
1263 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1264 	struct sysmmu_drvdata *data;
1265 
1266 	if (!has_sysmmu(dev))
1267 		return;
1268 
1269 	if (owner->domain) {
1270 		struct iommu_group *group = iommu_group_get(dev);
1271 
1272 		if (group) {
1273 			WARN_ON(owner->domain !=
1274 				iommu_group_default_domain(group));
1275 			exynos_iommu_detach_device(owner->domain, dev);
1276 			iommu_group_put(group);
1277 		}
1278 	}
1279 
1280 	list_for_each_entry(data, &owner->controllers, owner_node)
1281 		device_link_del(data->link);
1282 }
1283 
1284 static int exynos_iommu_of_xlate(struct device *dev,
1285 				 struct of_phandle_args *spec)
1286 {
1287 	struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1288 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1289 	struct sysmmu_drvdata *data, *entry;
1290 
1291 	if (!sysmmu)
1292 		return -ENODEV;
1293 
1294 	data = platform_get_drvdata(sysmmu);
1295 	if (!data) {
1296 		put_device(&sysmmu->dev);
1297 		return -ENODEV;
1298 	}
1299 
1300 	if (!owner) {
1301 		owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1302 		if (!owner) {
1303 			put_device(&sysmmu->dev);
1304 			return -ENOMEM;
1305 		}
1306 
1307 		INIT_LIST_HEAD(&owner->controllers);
1308 		mutex_init(&owner->rpm_lock);
1309 		dev_iommu_priv_set(dev, owner);
1310 	}
1311 
1312 	list_for_each_entry(entry, &owner->controllers, owner_node)
1313 		if (entry == data)
1314 			return 0;
1315 
1316 	list_add_tail(&data->owner_node, &owner->controllers);
1317 	data->master = dev;
1318 
1319 	return 0;
1320 }
1321 
1322 static const struct iommu_ops exynos_iommu_ops = {
1323 	.domain_alloc = exynos_iommu_domain_alloc,
1324 	.domain_free = exynos_iommu_domain_free,
1325 	.attach_dev = exynos_iommu_attach_device,
1326 	.detach_dev = exynos_iommu_detach_device,
1327 	.map = exynos_iommu_map,
1328 	.unmap = exynos_iommu_unmap,
1329 	.iova_to_phys = exynos_iommu_iova_to_phys,
1330 	.device_group = generic_device_group,
1331 	.probe_device = exynos_iommu_probe_device,
1332 	.release_device = exynos_iommu_release_device,
1333 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1334 	.of_xlate = exynos_iommu_of_xlate,
1335 };
1336 
1337 static int __init exynos_iommu_init(void)
1338 {
1339 	struct device_node *np;
1340 	int ret;
1341 
1342 	np = of_find_matching_node(NULL, sysmmu_of_match);
1343 	if (!np)
1344 		return 0;
1345 
1346 	of_node_put(np);
1347 
1348 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1349 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1350 	if (!lv2table_kmem_cache) {
1351 		pr_err("%s: Failed to create kmem cache\n", __func__);
1352 		return -ENOMEM;
1353 	}
1354 
1355 	ret = platform_driver_register(&exynos_sysmmu_driver);
1356 	if (ret) {
1357 		pr_err("%s: Failed to register driver\n", __func__);
1358 		goto err_reg_driver;
1359 	}
1360 
1361 	zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1362 	if (zero_lv2_table == NULL) {
1363 		pr_err("%s: Failed to allocate zero level2 page table\n",
1364 			__func__);
1365 		ret = -ENOMEM;
1366 		goto err_zero_lv2;
1367 	}
1368 
1369 	ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1370 	if (ret) {
1371 		pr_err("%s: Failed to register exynos-iommu driver.\n",
1372 								__func__);
1373 		goto err_set_iommu;
1374 	}
1375 
1376 	return 0;
1377 err_set_iommu:
1378 	kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1379 err_zero_lv2:
1380 	platform_driver_unregister(&exynos_sysmmu_driver);
1381 err_reg_driver:
1382 	kmem_cache_destroy(lv2table_kmem_cache);
1383 	return ret;
1384 }
1385 core_initcall(exynos_iommu_init);
1386