xref: /openbmc/linux/drivers/iommu/mtk_iommu.c (revision 29c37341)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2016 MediaTek Inc.
4  * Author: Yong Wu <yong.wu@mediatek.com>
5  */
6 #include <linux/memblock.h>
7 #include <linux/bug.h>
8 #include <linux/clk.h>
9 #include <linux/component.h>
10 #include <linux/device.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iommu.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/of_address.h>
19 #include <linux/of_iommu.h>
20 #include <linux/of_irq.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <asm/barrier.h>
26 #include <soc/mediatek/smi.h>
27 
28 #include "mtk_iommu.h"
29 
30 #define REG_MMU_PT_BASE_ADDR			0x000
31 #define MMU_PT_ADDR_MASK			GENMASK(31, 7)
32 
33 #define REG_MMU_INVALIDATE			0x020
34 #define F_ALL_INVLD				0x2
35 #define F_MMU_INV_RANGE				0x1
36 
37 #define REG_MMU_INVLD_START_A			0x024
38 #define REG_MMU_INVLD_END_A			0x028
39 
40 #define REG_MMU_INV_SEL_GEN2			0x02c
41 #define REG_MMU_INV_SEL_GEN1			0x038
42 #define F_INVLD_EN0				BIT(0)
43 #define F_INVLD_EN1				BIT(1)
44 
45 #define REG_MMU_MISC_CTRL			0x048
46 #define F_MMU_IN_ORDER_WR_EN_MASK		(BIT(1) | BIT(17))
47 #define F_MMU_STANDARD_AXI_MODE_MASK		(BIT(3) | BIT(19))
48 
49 #define REG_MMU_DCM_DIS				0x050
50 #define REG_MMU_WR_LEN_CTRL			0x054
51 #define F_MMU_WR_THROT_DIS_MASK			(BIT(5) | BIT(21))
52 
53 #define REG_MMU_CTRL_REG			0x110
54 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR		(2 << 4)
55 #define F_MMU_PREFETCH_RT_REPLACE_MOD		BIT(4)
56 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173	(2 << 5)
57 
58 #define REG_MMU_IVRP_PADDR			0x114
59 
60 #define REG_MMU_VLD_PA_RNG			0x118
61 #define F_MMU_VLD_PA_RNG(EA, SA)		(((EA) << 8) | (SA))
62 
63 #define REG_MMU_INT_CONTROL0			0x120
64 #define F_L2_MULIT_HIT_EN			BIT(0)
65 #define F_TABLE_WALK_FAULT_INT_EN		BIT(1)
66 #define F_PREETCH_FIFO_OVERFLOW_INT_EN		BIT(2)
67 #define F_MISS_FIFO_OVERFLOW_INT_EN		BIT(3)
68 #define F_PREFETCH_FIFO_ERR_INT_EN		BIT(5)
69 #define F_MISS_FIFO_ERR_INT_EN			BIT(6)
70 #define F_INT_CLR_BIT				BIT(12)
71 
72 #define REG_MMU_INT_MAIN_CONTROL		0x124
73 						/* mmu0 | mmu1 */
74 #define F_INT_TRANSLATION_FAULT			(BIT(0) | BIT(7))
75 #define F_INT_MAIN_MULTI_HIT_FAULT		(BIT(1) | BIT(8))
76 #define F_INT_INVALID_PA_FAULT			(BIT(2) | BIT(9))
77 #define F_INT_ENTRY_REPLACEMENT_FAULT		(BIT(3) | BIT(10))
78 #define F_INT_TLB_MISS_FAULT			(BIT(4) | BIT(11))
79 #define F_INT_MISS_TRANSACTION_FIFO_FAULT	(BIT(5) | BIT(12))
80 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT	(BIT(6) | BIT(13))
81 
82 #define REG_MMU_CPE_DONE			0x12C
83 
84 #define REG_MMU_FAULT_ST1			0x134
85 #define F_REG_MMU0_FAULT_MASK			GENMASK(6, 0)
86 #define F_REG_MMU1_FAULT_MASK			GENMASK(13, 7)
87 
88 #define REG_MMU0_FAULT_VA			0x13c
89 #define F_MMU_FAULT_VA_WRITE_BIT		BIT(1)
90 #define F_MMU_FAULT_VA_LAYER_BIT		BIT(0)
91 
92 #define REG_MMU0_INVLD_PA			0x140
93 #define REG_MMU1_FAULT_VA			0x144
94 #define REG_MMU1_INVLD_PA			0x148
95 #define REG_MMU0_INT_ID				0x150
96 #define REG_MMU1_INT_ID				0x154
97 #define F_MMU_INT_ID_COMM_ID(a)			(((a) >> 9) & 0x7)
98 #define F_MMU_INT_ID_SUB_COMM_ID(a)		(((a) >> 7) & 0x3)
99 #define F_MMU_INT_ID_LARB_ID(a)			(((a) >> 7) & 0x7)
100 #define F_MMU_INT_ID_PORT_ID(a)			(((a) >> 2) & 0x1f)
101 
102 #define MTK_PROTECT_PA_ALIGN			256
103 
104 /*
105  * Get the local arbiter ID and the portid within the larb arbiter
106  * from mtk_m4u_id which is defined by MTK_M4U_ID.
107  */
108 #define MTK_M4U_TO_LARB(id)		(((id) >> 5) & 0xf)
109 #define MTK_M4U_TO_PORT(id)		((id) & 0x1f)
110 
111 #define HAS_4GB_MODE			BIT(0)
112 /* HW will use the EMI clock if there isn't the "bclk". */
113 #define HAS_BCLK			BIT(1)
114 #define HAS_VLD_PA_RNG			BIT(2)
115 #define RESET_AXI			BIT(3)
116 #define OUT_ORDER_WR_EN			BIT(4)
117 #define HAS_SUB_COMM			BIT(5)
118 #define WR_THROT_EN			BIT(6)
119 
120 #define MTK_IOMMU_HAS_FLAG(pdata, _x) \
121 		((((pdata)->flags) & (_x)) == (_x))
122 
123 struct mtk_iommu_domain {
124 	struct io_pgtable_cfg		cfg;
125 	struct io_pgtable_ops		*iop;
126 
127 	struct iommu_domain		domain;
128 };
129 
130 static const struct iommu_ops mtk_iommu_ops;
131 
132 /*
133  * In M4U 4GB mode, the physical address is remapped as below:
134  *
135  * CPU Physical address:
136  * ====================
137  *
138  * 0      1G       2G     3G       4G     5G
139  * |---A---|---B---|---C---|---D---|---E---|
140  * +--I/O--+------------Memory-------------+
141  *
142  * IOMMU output physical address:
143  *  =============================
144  *
145  *                                 4G      5G     6G      7G      8G
146  *                                 |---E---|---B---|---C---|---D---|
147  *                                 +------------Memory-------------+
148  *
149  * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
150  * bit32 of the CPU physical address always is needed to set, and for Region
151  * 'E', the CPU physical address keep as is.
152  * Additionally, The iommu consumers always use the CPU phyiscal address.
153  */
154 #define MTK_IOMMU_4GB_MODE_REMAP_BASE	 0x140000000UL
155 
156 static LIST_HEAD(m4ulist);	/* List all the M4U HWs */
157 
158 #define for_each_m4u(data)	list_for_each_entry(data, &m4ulist, list)
159 
160 /*
161  * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
162  * for the performance.
163  *
164  * Here always return the mtk_iommu_data of the first probed M4U where the
165  * iommu domain information is recorded.
166  */
167 static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
168 {
169 	struct mtk_iommu_data *data;
170 
171 	for_each_m4u(data)
172 		return data;
173 
174 	return NULL;
175 }
176 
177 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
178 {
179 	return container_of(dom, struct mtk_iommu_domain, domain);
180 }
181 
182 static void mtk_iommu_tlb_flush_all(void *cookie)
183 {
184 	struct mtk_iommu_data *data = cookie;
185 
186 	for_each_m4u(data) {
187 		writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
188 			       data->base + data->plat_data->inv_sel_reg);
189 		writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
190 		wmb(); /* Make sure the tlb flush all done */
191 	}
192 }
193 
194 static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
195 					   size_t granule, void *cookie)
196 {
197 	struct mtk_iommu_data *data = cookie;
198 	unsigned long flags;
199 	int ret;
200 	u32 tmp;
201 
202 	for_each_m4u(data) {
203 		spin_lock_irqsave(&data->tlb_lock, flags);
204 		writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
205 			       data->base + data->plat_data->inv_sel_reg);
206 
207 		writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
208 		writel_relaxed(iova + size - 1,
209 			       data->base + REG_MMU_INVLD_END_A);
210 		writel_relaxed(F_MMU_INV_RANGE,
211 			       data->base + REG_MMU_INVALIDATE);
212 
213 		/* tlb sync */
214 		ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
215 						tmp, tmp != 0, 10, 1000);
216 		if (ret) {
217 			dev_warn(data->dev,
218 				 "Partial TLB flush timed out, falling back to full flush\n");
219 			mtk_iommu_tlb_flush_all(cookie);
220 		}
221 		/* Clear the CPE status */
222 		writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
223 		spin_unlock_irqrestore(&data->tlb_lock, flags);
224 	}
225 }
226 
227 static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
228 					    unsigned long iova, size_t granule,
229 					    void *cookie)
230 {
231 	struct mtk_iommu_data *data = cookie;
232 	struct iommu_domain *domain = &data->m4u_dom->domain;
233 
234 	iommu_iotlb_gather_add_page(domain, gather, iova, granule);
235 }
236 
237 static const struct iommu_flush_ops mtk_iommu_flush_ops = {
238 	.tlb_flush_all = mtk_iommu_tlb_flush_all,
239 	.tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
240 	.tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
241 	.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
242 };
243 
244 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
245 {
246 	struct mtk_iommu_data *data = dev_id;
247 	struct mtk_iommu_domain *dom = data->m4u_dom;
248 	u32 int_state, regval, fault_iova, fault_pa;
249 	unsigned int fault_larb, fault_port, sub_comm = 0;
250 	bool layer, write;
251 
252 	/* Read error info from registers */
253 	int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
254 	if (int_state & F_REG_MMU0_FAULT_MASK) {
255 		regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
256 		fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
257 		fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
258 	} else {
259 		regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
260 		fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
261 		fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
262 	}
263 	layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
264 	write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
265 	fault_port = F_MMU_INT_ID_PORT_ID(regval);
266 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
267 		fault_larb = F_MMU_INT_ID_COMM_ID(regval);
268 		sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
269 	} else {
270 		fault_larb = F_MMU_INT_ID_LARB_ID(regval);
271 	}
272 	fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
273 
274 	if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
275 			       write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
276 		dev_err_ratelimited(
277 			data->dev,
278 			"fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
279 			int_state, fault_iova, fault_pa, fault_larb, fault_port,
280 			layer, write ? "write" : "read");
281 	}
282 
283 	/* Interrupt clear */
284 	regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
285 	regval |= F_INT_CLR_BIT;
286 	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
287 
288 	mtk_iommu_tlb_flush_all(data);
289 
290 	return IRQ_HANDLED;
291 }
292 
293 static void mtk_iommu_config(struct mtk_iommu_data *data,
294 			     struct device *dev, bool enable)
295 {
296 	struct mtk_smi_larb_iommu    *larb_mmu;
297 	unsigned int                 larbid, portid;
298 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
299 	int i;
300 
301 	for (i = 0; i < fwspec->num_ids; ++i) {
302 		larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
303 		portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
304 		larb_mmu = &data->larb_imu[larbid];
305 
306 		dev_dbg(dev, "%s iommu port: %d\n",
307 			enable ? "enable" : "disable", portid);
308 
309 		if (enable)
310 			larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
311 		else
312 			larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
313 	}
314 }
315 
316 static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
317 {
318 	struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
319 
320 	dom->cfg = (struct io_pgtable_cfg) {
321 		.quirks = IO_PGTABLE_QUIRK_ARM_NS |
322 			IO_PGTABLE_QUIRK_NO_PERMS |
323 			IO_PGTABLE_QUIRK_TLBI_ON_MAP |
324 			IO_PGTABLE_QUIRK_ARM_MTK_EXT,
325 		.pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
326 		.ias = 32,
327 		.oas = 34,
328 		.tlb = &mtk_iommu_flush_ops,
329 		.iommu_dev = data->dev,
330 	};
331 
332 	dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
333 	if (!dom->iop) {
334 		dev_err(data->dev, "Failed to alloc io pgtable\n");
335 		return -EINVAL;
336 	}
337 
338 	/* Update our support page sizes bitmap */
339 	dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
340 	return 0;
341 }
342 
343 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
344 {
345 	struct mtk_iommu_domain *dom;
346 
347 	if (type != IOMMU_DOMAIN_DMA)
348 		return NULL;
349 
350 	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
351 	if (!dom)
352 		return NULL;
353 
354 	if (iommu_get_dma_cookie(&dom->domain))
355 		goto  free_dom;
356 
357 	if (mtk_iommu_domain_finalise(dom))
358 		goto  put_dma_cookie;
359 
360 	dom->domain.geometry.aperture_start = 0;
361 	dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
362 	dom->domain.geometry.force_aperture = true;
363 
364 	return &dom->domain;
365 
366 put_dma_cookie:
367 	iommu_put_dma_cookie(&dom->domain);
368 free_dom:
369 	kfree(dom);
370 	return NULL;
371 }
372 
373 static void mtk_iommu_domain_free(struct iommu_domain *domain)
374 {
375 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
376 
377 	free_io_pgtable_ops(dom->iop);
378 	iommu_put_dma_cookie(domain);
379 	kfree(to_mtk_domain(domain));
380 }
381 
382 static int mtk_iommu_attach_device(struct iommu_domain *domain,
383 				   struct device *dev)
384 {
385 	struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
386 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
387 
388 	if (!data)
389 		return -ENODEV;
390 
391 	/* Update the pgtable base address register of the M4U HW */
392 	if (!data->m4u_dom) {
393 		data->m4u_dom = dom;
394 		writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
395 		       data->base + REG_MMU_PT_BASE_ADDR);
396 	}
397 
398 	mtk_iommu_config(data, dev, true);
399 	return 0;
400 }
401 
402 static void mtk_iommu_detach_device(struct iommu_domain *domain,
403 				    struct device *dev)
404 {
405 	struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
406 
407 	if (!data)
408 		return;
409 
410 	mtk_iommu_config(data, dev, false);
411 }
412 
413 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
414 			 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
415 {
416 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
417 	struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
418 
419 	/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
420 	if (data->enable_4GB)
421 		paddr |= BIT_ULL(32);
422 
423 	/* Synchronize with the tlb_lock */
424 	return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
425 }
426 
427 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
428 			      unsigned long iova, size_t size,
429 			      struct iommu_iotlb_gather *gather)
430 {
431 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
432 
433 	return dom->iop->unmap(dom->iop, iova, size, gather);
434 }
435 
436 static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
437 {
438 	mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
439 }
440 
441 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
442 				 struct iommu_iotlb_gather *gather)
443 {
444 	struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
445 	size_t length = gather->end - gather->start;
446 
447 	if (gather->start == ULONG_MAX)
448 		return;
449 
450 	mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
451 				       data);
452 }
453 
454 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
455 					  dma_addr_t iova)
456 {
457 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
458 	struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
459 	phys_addr_t pa;
460 
461 	pa = dom->iop->iova_to_phys(dom->iop, iova);
462 	if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
463 		pa &= ~BIT_ULL(32);
464 
465 	return pa;
466 }
467 
468 static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
469 {
470 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
471 	struct mtk_iommu_data *data;
472 
473 	if (!fwspec || fwspec->ops != &mtk_iommu_ops)
474 		return ERR_PTR(-ENODEV); /* Not a iommu client device */
475 
476 	data = dev_iommu_priv_get(dev);
477 
478 	return &data->iommu;
479 }
480 
481 static void mtk_iommu_release_device(struct device *dev)
482 {
483 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
484 
485 	if (!fwspec || fwspec->ops != &mtk_iommu_ops)
486 		return;
487 
488 	iommu_fwspec_free(dev);
489 }
490 
491 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
492 {
493 	struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
494 
495 	if (!data)
496 		return ERR_PTR(-ENODEV);
497 
498 	/* All the client devices are in the same m4u iommu-group */
499 	if (!data->m4u_group) {
500 		data->m4u_group = iommu_group_alloc();
501 		if (IS_ERR(data->m4u_group))
502 			dev_err(dev, "Failed to allocate M4U IOMMU group\n");
503 	} else {
504 		iommu_group_ref_get(data->m4u_group);
505 	}
506 	return data->m4u_group;
507 }
508 
509 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
510 {
511 	struct platform_device *m4updev;
512 
513 	if (args->args_count != 1) {
514 		dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
515 			args->args_count);
516 		return -EINVAL;
517 	}
518 
519 	if (!dev_iommu_priv_get(dev)) {
520 		/* Get the m4u device */
521 		m4updev = of_find_device_by_node(args->np);
522 		if (WARN_ON(!m4updev))
523 			return -EINVAL;
524 
525 		dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
526 	}
527 
528 	return iommu_fwspec_add_ids(dev, args->args, 1);
529 }
530 
531 static const struct iommu_ops mtk_iommu_ops = {
532 	.domain_alloc	= mtk_iommu_domain_alloc,
533 	.domain_free	= mtk_iommu_domain_free,
534 	.attach_dev	= mtk_iommu_attach_device,
535 	.detach_dev	= mtk_iommu_detach_device,
536 	.map		= mtk_iommu_map,
537 	.unmap		= mtk_iommu_unmap,
538 	.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
539 	.iotlb_sync	= mtk_iommu_iotlb_sync,
540 	.iova_to_phys	= mtk_iommu_iova_to_phys,
541 	.probe_device	= mtk_iommu_probe_device,
542 	.release_device	= mtk_iommu_release_device,
543 	.device_group	= mtk_iommu_device_group,
544 	.of_xlate	= mtk_iommu_of_xlate,
545 	.pgsize_bitmap	= SZ_4K | SZ_64K | SZ_1M | SZ_16M,
546 };
547 
548 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
549 {
550 	u32 regval;
551 	int ret;
552 
553 	ret = clk_prepare_enable(data->bclk);
554 	if (ret) {
555 		dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
556 		return ret;
557 	}
558 
559 	if (data->plat_data->m4u_plat == M4U_MT8173) {
560 		regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
561 			 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
562 	} else {
563 		regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
564 		regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
565 	}
566 	writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
567 
568 	regval = F_L2_MULIT_HIT_EN |
569 		F_TABLE_WALK_FAULT_INT_EN |
570 		F_PREETCH_FIFO_OVERFLOW_INT_EN |
571 		F_MISS_FIFO_OVERFLOW_INT_EN |
572 		F_PREFETCH_FIFO_ERR_INT_EN |
573 		F_MISS_FIFO_ERR_INT_EN;
574 	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
575 
576 	regval = F_INT_TRANSLATION_FAULT |
577 		F_INT_MAIN_MULTI_HIT_FAULT |
578 		F_INT_INVALID_PA_FAULT |
579 		F_INT_ENTRY_REPLACEMENT_FAULT |
580 		F_INT_TLB_MISS_FAULT |
581 		F_INT_MISS_TRANSACTION_FIFO_FAULT |
582 		F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
583 	writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
584 
585 	if (data->plat_data->m4u_plat == M4U_MT8173)
586 		regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
587 	else
588 		regval = lower_32_bits(data->protect_base) |
589 			 upper_32_bits(data->protect_base);
590 	writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
591 
592 	if (data->enable_4GB &&
593 	    MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
594 		/*
595 		 * If 4GB mode is enabled, the validate PA range is from
596 		 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
597 		 */
598 		regval = F_MMU_VLD_PA_RNG(7, 4);
599 		writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
600 	}
601 	writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
602 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
603 		/* write command throttling mode */
604 		regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
605 		regval &= ~F_MMU_WR_THROT_DIS_MASK;
606 		writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
607 	}
608 
609 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
610 		/* The register is called STANDARD_AXI_MODE in this case */
611 		regval = 0;
612 	} else {
613 		regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
614 		regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
615 		if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
616 			regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
617 	}
618 	writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
619 
620 	if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
621 			     dev_name(data->dev), (void *)data)) {
622 		writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
623 		clk_disable_unprepare(data->bclk);
624 		dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
625 		return -ENODEV;
626 	}
627 
628 	return 0;
629 }
630 
631 static const struct component_master_ops mtk_iommu_com_ops = {
632 	.bind		= mtk_iommu_bind,
633 	.unbind		= mtk_iommu_unbind,
634 };
635 
636 static int mtk_iommu_probe(struct platform_device *pdev)
637 {
638 	struct mtk_iommu_data   *data;
639 	struct device           *dev = &pdev->dev;
640 	struct resource         *res;
641 	resource_size_t		ioaddr;
642 	struct component_match  *match = NULL;
643 	void                    *protect;
644 	int                     i, larb_nr, ret;
645 
646 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
647 	if (!data)
648 		return -ENOMEM;
649 	data->dev = dev;
650 	data->plat_data = of_device_get_match_data(dev);
651 
652 	/* Protect memory. HW will access here while translation fault.*/
653 	protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
654 	if (!protect)
655 		return -ENOMEM;
656 	data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
657 
658 	/* Whether the current dram is over 4GB */
659 	data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
660 	if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
661 		data->enable_4GB = false;
662 
663 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
664 	data->base = devm_ioremap_resource(dev, res);
665 	if (IS_ERR(data->base))
666 		return PTR_ERR(data->base);
667 	ioaddr = res->start;
668 
669 	data->irq = platform_get_irq(pdev, 0);
670 	if (data->irq < 0)
671 		return data->irq;
672 
673 	if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
674 		data->bclk = devm_clk_get(dev, "bclk");
675 		if (IS_ERR(data->bclk))
676 			return PTR_ERR(data->bclk);
677 	}
678 
679 	larb_nr = of_count_phandle_with_args(dev->of_node,
680 					     "mediatek,larbs", NULL);
681 	if (larb_nr < 0)
682 		return larb_nr;
683 
684 	for (i = 0; i < larb_nr; i++) {
685 		struct device_node *larbnode;
686 		struct platform_device *plarbdev;
687 		u32 id;
688 
689 		larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
690 		if (!larbnode)
691 			return -EINVAL;
692 
693 		if (!of_device_is_available(larbnode)) {
694 			of_node_put(larbnode);
695 			continue;
696 		}
697 
698 		ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
699 		if (ret)/* The id is consecutive if there is no this property */
700 			id = i;
701 
702 		plarbdev = of_find_device_by_node(larbnode);
703 		if (!plarbdev) {
704 			of_node_put(larbnode);
705 			return -EPROBE_DEFER;
706 		}
707 		data->larb_imu[id].dev = &plarbdev->dev;
708 
709 		component_match_add_release(dev, &match, release_of,
710 					    compare_of, larbnode);
711 	}
712 
713 	platform_set_drvdata(pdev, data);
714 
715 	ret = mtk_iommu_hw_init(data);
716 	if (ret)
717 		return ret;
718 
719 	ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
720 				     "mtk-iommu.%pa", &ioaddr);
721 	if (ret)
722 		return ret;
723 
724 	iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
725 	iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
726 
727 	ret = iommu_device_register(&data->iommu);
728 	if (ret)
729 		return ret;
730 
731 	spin_lock_init(&data->tlb_lock);
732 	list_add_tail(&data->list, &m4ulist);
733 
734 	if (!iommu_present(&platform_bus_type))
735 		bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
736 
737 	return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
738 }
739 
740 static int mtk_iommu_remove(struct platform_device *pdev)
741 {
742 	struct mtk_iommu_data *data = platform_get_drvdata(pdev);
743 
744 	iommu_device_sysfs_remove(&data->iommu);
745 	iommu_device_unregister(&data->iommu);
746 
747 	if (iommu_present(&platform_bus_type))
748 		bus_set_iommu(&platform_bus_type, NULL);
749 
750 	clk_disable_unprepare(data->bclk);
751 	devm_free_irq(&pdev->dev, data->irq, data);
752 	component_master_del(&pdev->dev, &mtk_iommu_com_ops);
753 	return 0;
754 }
755 
756 static int __maybe_unused mtk_iommu_suspend(struct device *dev)
757 {
758 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
759 	struct mtk_iommu_suspend_reg *reg = &data->reg;
760 	void __iomem *base = data->base;
761 
762 	reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
763 	reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
764 	reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
765 	reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
766 	reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
767 	reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
768 	reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
769 	reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
770 	clk_disable_unprepare(data->bclk);
771 	return 0;
772 }
773 
774 static int __maybe_unused mtk_iommu_resume(struct device *dev)
775 {
776 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
777 	struct mtk_iommu_suspend_reg *reg = &data->reg;
778 	struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
779 	void __iomem *base = data->base;
780 	int ret;
781 
782 	ret = clk_prepare_enable(data->bclk);
783 	if (ret) {
784 		dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
785 		return ret;
786 	}
787 	writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
788 	writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
789 	writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
790 	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
791 	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
792 	writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
793 	writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
794 	writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
795 	if (m4u_dom)
796 		writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
797 		       base + REG_MMU_PT_BASE_ADDR);
798 	return 0;
799 }
800 
801 static const struct dev_pm_ops mtk_iommu_pm_ops = {
802 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
803 };
804 
805 static const struct mtk_iommu_plat_data mt2712_data = {
806 	.m4u_plat     = M4U_MT2712,
807 	.flags        = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
808 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
809 	.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
810 };
811 
812 static const struct mtk_iommu_plat_data mt6779_data = {
813 	.m4u_plat      = M4U_MT6779,
814 	.flags         = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
815 	.inv_sel_reg   = REG_MMU_INV_SEL_GEN2,
816 	.larbid_remap  = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
817 };
818 
819 static const struct mtk_iommu_plat_data mt8173_data = {
820 	.m4u_plat     = M4U_MT8173,
821 	.flags	      = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
822 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
823 	.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
824 };
825 
826 static const struct mtk_iommu_plat_data mt8183_data = {
827 	.m4u_plat     = M4U_MT8183,
828 	.flags        = RESET_AXI,
829 	.inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
830 	.larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
831 };
832 
833 static const struct of_device_id mtk_iommu_of_ids[] = {
834 	{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
835 	{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
836 	{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
837 	{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
838 	{}
839 };
840 
841 static struct platform_driver mtk_iommu_driver = {
842 	.probe	= mtk_iommu_probe,
843 	.remove	= mtk_iommu_remove,
844 	.driver	= {
845 		.name = "mtk-iommu",
846 		.of_match_table = of_match_ptr(mtk_iommu_of_ids),
847 		.pm = &mtk_iommu_pm_ops,
848 	}
849 };
850 
851 static int __init mtk_iommu_init(void)
852 {
853 	int ret;
854 
855 	ret = platform_driver_register(&mtk_iommu_driver);
856 	if (ret != 0)
857 		pr_err("Failed to register MTK IOMMU driver\n");
858 
859 	return ret;
860 }
861 
862 subsys_initcall(mtk_iommu_init)
863