11802d0beSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2b17336c5SHonghui Zhang /* 3d4cf5bbdSPaul Gortmaker * IOMMU API for MTK architected m4u v1 implementations 4d4cf5bbdSPaul Gortmaker * 5b17336c5SHonghui Zhang * Copyright (c) 2015-2016 MediaTek Inc. 6b17336c5SHonghui Zhang * Author: Honghui Zhang <honghui.zhang@mediatek.com> 7b17336c5SHonghui Zhang * 8b17336c5SHonghui Zhang * Based on driver/iommu/mtk_iommu.c 9b17336c5SHonghui Zhang */ 10b17336c5SHonghui Zhang #include <linux/bug.h> 11b17336c5SHonghui Zhang #include <linux/clk.h> 12b17336c5SHonghui Zhang #include <linux/component.h> 13b17336c5SHonghui Zhang #include <linux/device.h> 14745b6e74SArnd Bergmann #include <linux/dma-mapping.h> 15b17336c5SHonghui Zhang #include <linux/err.h> 16b17336c5SHonghui Zhang #include <linux/interrupt.h> 17b17336c5SHonghui Zhang #include <linux/io.h> 18b17336c5SHonghui Zhang #include <linux/iommu.h> 19b17336c5SHonghui Zhang #include <linux/iopoll.h> 20b17336c5SHonghui Zhang #include <linux/list.h> 218de000cfSYong Wu #include <linux/module.h> 22b17336c5SHonghui Zhang #include <linux/of_address.h> 23b17336c5SHonghui Zhang #include <linux/of_irq.h> 24b17336c5SHonghui Zhang #include <linux/of_platform.h> 25b17336c5SHonghui Zhang #include <linux/platform_device.h> 26b17336c5SHonghui Zhang #include <linux/slab.h> 27b17336c5SHonghui Zhang #include <linux/spinlock.h> 28b17336c5SHonghui Zhang #include <asm/barrier.h> 29b17336c5SHonghui Zhang #include <asm/dma-iommu.h> 306a513de3SYong Wu #include <dt-bindings/memory/mtk-memory-port.h> 31b17336c5SHonghui Zhang #include <dt-bindings/memory/mt2701-larb-port.h> 32b17336c5SHonghui Zhang #include <soc/mediatek/smi.h> 33b17336c5SHonghui Zhang 34b17336c5SHonghui Zhang #define REG_MMU_PT_BASE_ADDR 0x000 35b17336c5SHonghui Zhang 36b17336c5SHonghui Zhang #define F_ALL_INVLD 0x2 37b17336c5SHonghui Zhang #define F_MMU_INV_RANGE 0x1 38b17336c5SHonghui Zhang #define F_INVLD_EN0 BIT(0) 39b17336c5SHonghui Zhang #define F_INVLD_EN1 BIT(1) 40b17336c5SHonghui Zhang 41b17336c5SHonghui Zhang #define F_MMU_FAULT_VA_MSK 0xfffff000 42b17336c5SHonghui Zhang #define MTK_PROTECT_PA_ALIGN 128 43b17336c5SHonghui Zhang 44b17336c5SHonghui Zhang #define REG_MMU_CTRL_REG 0x210 45b17336c5SHonghui Zhang #define F_MMU_CTRL_COHERENT_EN BIT(8) 46b17336c5SHonghui Zhang #define REG_MMU_IVRP_PADDR 0x214 47b17336c5SHonghui Zhang #define REG_MMU_INT_CONTROL 0x220 48b17336c5SHonghui Zhang #define F_INT_TRANSLATION_FAULT BIT(0) 49b17336c5SHonghui Zhang #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) 50b17336c5SHonghui Zhang #define F_INT_INVALID_PA_FAULT BIT(2) 51b17336c5SHonghui Zhang #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) 52b17336c5SHonghui Zhang #define F_INT_TABLE_WALK_FAULT BIT(4) 53b17336c5SHonghui Zhang #define F_INT_TLB_MISS_FAULT BIT(5) 54b17336c5SHonghui Zhang #define F_INT_PFH_DMA_FIFO_OVERFLOW BIT(6) 55b17336c5SHonghui Zhang #define F_INT_MISS_DMA_FIFO_OVERFLOW BIT(7) 56b17336c5SHonghui Zhang 57b17336c5SHonghui Zhang #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5) 58b17336c5SHonghui Zhang #define F_INT_CLR_BIT BIT(12) 59b17336c5SHonghui Zhang 60b17336c5SHonghui Zhang #define REG_MMU_FAULT_ST 0x224 61b17336c5SHonghui Zhang #define REG_MMU_FAULT_VA 0x228 62b17336c5SHonghui Zhang #define REG_MMU_INVLD_PA 0x22C 63b17336c5SHonghui Zhang #define REG_MMU_INT_ID 0x388 64b17336c5SHonghui Zhang #define REG_MMU_INVALIDATE 0x5c0 65b17336c5SHonghui Zhang #define REG_MMU_INVLD_START_A 0x5c4 66b17336c5SHonghui Zhang #define REG_MMU_INVLD_END_A 0x5c8 67b17336c5SHonghui Zhang 68b17336c5SHonghui Zhang #define REG_MMU_INV_SEL 0x5d8 69b17336c5SHonghui Zhang #define REG_MMU_STANDARD_AXI_MODE 0x5e8 70b17336c5SHonghui Zhang 71b17336c5SHonghui Zhang #define REG_MMU_DCM 0x5f0 72b17336c5SHonghui Zhang #define F_MMU_DCM_ON BIT(1) 73b17336c5SHonghui Zhang #define REG_MMU_CPE_DONE 0x60c 74b17336c5SHonghui Zhang #define F_DESC_VALID 0x2 75b17336c5SHonghui Zhang #define F_DESC_NONSEC BIT(3) 76b17336c5SHonghui Zhang #define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7)) 77b17336c5SHonghui Zhang #define MT2701_M4U_TF_PORT(TF) (((TF) >> 8) & 0xF) 78b17336c5SHonghui Zhang /* MTK generation one iommu HW only support 4K size mapping */ 79b17336c5SHonghui Zhang #define MT2701_IOMMU_PAGE_SHIFT 12 80b17336c5SHonghui Zhang #define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT) 81b17336c5SHonghui Zhang 82b17336c5SHonghui Zhang /* 83b17336c5SHonghui Zhang * MTK m4u support 4GB iova address space, and only support 4K page 84b17336c5SHonghui Zhang * mapping. So the pagetable size should be exactly as 4M. 85b17336c5SHonghui Zhang */ 86b17336c5SHonghui Zhang #define M2701_IOMMU_PGT_SIZE SZ_4M 87b17336c5SHonghui Zhang 88*ad9b10e5SYong Wu struct mtk_iommu_v1_suspend_reg { 896a513de3SYong Wu u32 standard_axi_mode; 906a513de3SYong Wu u32 dcm_dis; 916a513de3SYong Wu u32 ctrl_reg; 926a513de3SYong Wu u32 int_control0; 936a513de3SYong Wu }; 946a513de3SYong Wu 95*ad9b10e5SYong Wu struct mtk_iommu_v1_data { 969485a04aSYong Wu void __iomem *base; 979485a04aSYong Wu int irq; 989485a04aSYong Wu struct device *dev; 999485a04aSYong Wu struct clk *bclk; 1009485a04aSYong Wu phys_addr_t protect_base; /* protect memory base */ 101*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *m4u_dom; 1029485a04aSYong Wu 1039485a04aSYong Wu struct iommu_device iommu; 1049485a04aSYong Wu struct dma_iommu_mapping *mapping; 1059485a04aSYong Wu struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; 1069485a04aSYong Wu 107*ad9b10e5SYong Wu struct mtk_iommu_v1_suspend_reg reg; 1089485a04aSYong Wu }; 1099485a04aSYong Wu 110*ad9b10e5SYong Wu struct mtk_iommu_v1_domain { 111b17336c5SHonghui Zhang spinlock_t pgtlock; /* lock for page table */ 112b17336c5SHonghui Zhang struct iommu_domain domain; 113b17336c5SHonghui Zhang u32 *pgt_va; 114b17336c5SHonghui Zhang dma_addr_t pgt_pa; 115*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data; 116b17336c5SHonghui Zhang }; 117b17336c5SHonghui Zhang 118*ad9b10e5SYong Wu static int mtk_iommu_v1_bind(struct device *dev) 1199485a04aSYong Wu { 120*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = dev_get_drvdata(dev); 1219485a04aSYong Wu 1229485a04aSYong Wu return component_bind_all(dev, &data->larb_imu); 1239485a04aSYong Wu } 1249485a04aSYong Wu 125*ad9b10e5SYong Wu static void mtk_iommu_v1_unbind(struct device *dev) 1269485a04aSYong Wu { 127*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = dev_get_drvdata(dev); 1289485a04aSYong Wu 1299485a04aSYong Wu component_unbind_all(dev, &data->larb_imu); 1309485a04aSYong Wu } 1319485a04aSYong Wu 132*ad9b10e5SYong Wu static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom) 133b17336c5SHonghui Zhang { 134*ad9b10e5SYong Wu return container_of(dom, struct mtk_iommu_v1_domain, domain); 135b17336c5SHonghui Zhang } 136b17336c5SHonghui Zhang 137b17336c5SHonghui Zhang static const int mt2701_m4u_in_larb[] = { 138b17336c5SHonghui Zhang LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, 139b17336c5SHonghui Zhang LARB2_PORT_OFFSET, LARB3_PORT_OFFSET 140b17336c5SHonghui Zhang }; 141b17336c5SHonghui Zhang 142b17336c5SHonghui Zhang static inline int mt2701_m4u_to_larb(int id) 143b17336c5SHonghui Zhang { 144b17336c5SHonghui Zhang int i; 145b17336c5SHonghui Zhang 146b17336c5SHonghui Zhang for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--) 147b17336c5SHonghui Zhang if ((id) >= mt2701_m4u_in_larb[i]) 148b17336c5SHonghui Zhang return i; 149b17336c5SHonghui Zhang 150b17336c5SHonghui Zhang return 0; 151b17336c5SHonghui Zhang } 152b17336c5SHonghui Zhang 153b17336c5SHonghui Zhang static inline int mt2701_m4u_to_port(int id) 154b17336c5SHonghui Zhang { 155b17336c5SHonghui Zhang int larb = mt2701_m4u_to_larb(id); 156b17336c5SHonghui Zhang 157b17336c5SHonghui Zhang return id - mt2701_m4u_in_larb[larb]; 158b17336c5SHonghui Zhang } 159b17336c5SHonghui Zhang 160*ad9b10e5SYong Wu static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data) 161b17336c5SHonghui Zhang { 162b17336c5SHonghui Zhang writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 163b17336c5SHonghui Zhang data->base + REG_MMU_INV_SEL); 164b17336c5SHonghui Zhang writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); 165b17336c5SHonghui Zhang wmb(); /* Make sure the tlb flush all done */ 166b17336c5SHonghui Zhang } 167b17336c5SHonghui Zhang 168*ad9b10e5SYong Wu static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data, 169b17336c5SHonghui Zhang unsigned long iova, size_t size) 170b17336c5SHonghui Zhang { 171b17336c5SHonghui Zhang int ret; 172b17336c5SHonghui Zhang u32 tmp; 173b17336c5SHonghui Zhang 174b17336c5SHonghui Zhang writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 175b17336c5SHonghui Zhang data->base + REG_MMU_INV_SEL); 176b17336c5SHonghui Zhang writel_relaxed(iova & F_MMU_FAULT_VA_MSK, 177b17336c5SHonghui Zhang data->base + REG_MMU_INVLD_START_A); 178b17336c5SHonghui Zhang writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK, 179b17336c5SHonghui Zhang data->base + REG_MMU_INVLD_END_A); 180b17336c5SHonghui Zhang writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); 181b17336c5SHonghui Zhang 182b17336c5SHonghui Zhang ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, 183b17336c5SHonghui Zhang tmp, tmp != 0, 10, 100000); 184b17336c5SHonghui Zhang if (ret) { 185b17336c5SHonghui Zhang dev_warn(data->dev, 186b17336c5SHonghui Zhang "Partial TLB flush timed out, falling back to full flush\n"); 187*ad9b10e5SYong Wu mtk_iommu_v1_tlb_flush_all(data); 188b17336c5SHonghui Zhang } 189b17336c5SHonghui Zhang /* Clear the CPE status */ 190b17336c5SHonghui Zhang writel_relaxed(0, data->base + REG_MMU_CPE_DONE); 191b17336c5SHonghui Zhang } 192b17336c5SHonghui Zhang 193*ad9b10e5SYong Wu static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id) 194b17336c5SHonghui Zhang { 195*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = dev_id; 196*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *dom = data->m4u_dom; 197b17336c5SHonghui Zhang u32 int_state, regval, fault_iova, fault_pa; 198b17336c5SHonghui Zhang unsigned int fault_larb, fault_port; 199b17336c5SHonghui Zhang 200b17336c5SHonghui Zhang /* Read error information from registers */ 201b17336c5SHonghui Zhang int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST); 202b17336c5SHonghui Zhang fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); 203b17336c5SHonghui Zhang 204b17336c5SHonghui Zhang fault_iova &= F_MMU_FAULT_VA_MSK; 205b17336c5SHonghui Zhang fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); 206b17336c5SHonghui Zhang regval = readl_relaxed(data->base + REG_MMU_INT_ID); 207b17336c5SHonghui Zhang fault_larb = MT2701_M4U_TF_LARB(regval); 208b17336c5SHonghui Zhang fault_port = MT2701_M4U_TF_PORT(regval); 209b17336c5SHonghui Zhang 210b17336c5SHonghui Zhang /* 211b17336c5SHonghui Zhang * MTK v1 iommu HW could not determine whether the fault is read or 212b17336c5SHonghui Zhang * write fault, report as read fault. 213b17336c5SHonghui Zhang */ 214b17336c5SHonghui Zhang if (report_iommu_fault(&dom->domain, data->dev, fault_iova, 215b17336c5SHonghui Zhang IOMMU_FAULT_READ)) 216b17336c5SHonghui Zhang dev_err_ratelimited(data->dev, 217b17336c5SHonghui Zhang "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n", 218b17336c5SHonghui Zhang int_state, fault_iova, fault_pa, 219b17336c5SHonghui Zhang fault_larb, fault_port); 220b17336c5SHonghui Zhang 221b17336c5SHonghui Zhang /* Interrupt clear */ 222b17336c5SHonghui Zhang regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL); 223b17336c5SHonghui Zhang regval |= F_INT_CLR_BIT; 224b17336c5SHonghui Zhang writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); 225b17336c5SHonghui Zhang 226*ad9b10e5SYong Wu mtk_iommu_v1_tlb_flush_all(data); 227b17336c5SHonghui Zhang 228b17336c5SHonghui Zhang return IRQ_HANDLED; 229b17336c5SHonghui Zhang } 230b17336c5SHonghui Zhang 231*ad9b10e5SYong Wu static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data, 232b17336c5SHonghui Zhang struct device *dev, bool enable) 233b17336c5SHonghui Zhang { 234b17336c5SHonghui Zhang struct mtk_smi_larb_iommu *larb_mmu; 235b17336c5SHonghui Zhang unsigned int larbid, portid; 236a9bf2eecSJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 23784672f19SRobin Murphy int i; 238b17336c5SHonghui Zhang 23984672f19SRobin Murphy for (i = 0; i < fwspec->num_ids; ++i) { 24084672f19SRobin Murphy larbid = mt2701_m4u_to_larb(fwspec->ids[i]); 24184672f19SRobin Murphy portid = mt2701_m4u_to_port(fwspec->ids[i]); 2421ee9feb2SYong Wu larb_mmu = &data->larb_imu[larbid]; 243b17336c5SHonghui Zhang 244b17336c5SHonghui Zhang dev_dbg(dev, "%s iommu port: %d\n", 245b17336c5SHonghui Zhang enable ? "enable" : "disable", portid); 246b17336c5SHonghui Zhang 247b17336c5SHonghui Zhang if (enable) 248b17336c5SHonghui Zhang larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); 249b17336c5SHonghui Zhang else 250b17336c5SHonghui Zhang larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); 251b17336c5SHonghui Zhang } 252b17336c5SHonghui Zhang } 253b17336c5SHonghui Zhang 254*ad9b10e5SYong Wu static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data) 255b17336c5SHonghui Zhang { 256*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *dom = data->m4u_dom; 257b17336c5SHonghui Zhang 258b17336c5SHonghui Zhang spin_lock_init(&dom->pgtlock); 259b17336c5SHonghui Zhang 260750afb08SLuis Chamberlain dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE, 261b17336c5SHonghui Zhang &dom->pgt_pa, GFP_KERNEL); 262b17336c5SHonghui Zhang if (!dom->pgt_va) 263b17336c5SHonghui Zhang return -ENOMEM; 264b17336c5SHonghui Zhang 265b17336c5SHonghui Zhang writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR); 266b17336c5SHonghui Zhang 267b17336c5SHonghui Zhang dom->data = data; 268b17336c5SHonghui Zhang 269b17336c5SHonghui Zhang return 0; 270b17336c5SHonghui Zhang } 271b17336c5SHonghui Zhang 272*ad9b10e5SYong Wu static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type) 273b17336c5SHonghui Zhang { 274*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *dom; 275b17336c5SHonghui Zhang 276b17336c5SHonghui Zhang if (type != IOMMU_DOMAIN_UNMANAGED) 277b17336c5SHonghui Zhang return NULL; 278b17336c5SHonghui Zhang 279b17336c5SHonghui Zhang dom = kzalloc(sizeof(*dom), GFP_KERNEL); 280b17336c5SHonghui Zhang if (!dom) 281b17336c5SHonghui Zhang return NULL; 282b17336c5SHonghui Zhang 283b17336c5SHonghui Zhang return &dom->domain; 284b17336c5SHonghui Zhang } 285b17336c5SHonghui Zhang 286*ad9b10e5SYong Wu static void mtk_iommu_v1_domain_free(struct iommu_domain *domain) 287b17336c5SHonghui Zhang { 288*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); 289*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = dom->data; 290b17336c5SHonghui Zhang 291b17336c5SHonghui Zhang dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE, 292b17336c5SHonghui Zhang dom->pgt_va, dom->pgt_pa); 293b17336c5SHonghui Zhang kfree(to_mtk_domain(domain)); 294b17336c5SHonghui Zhang } 295b17336c5SHonghui Zhang 296*ad9b10e5SYong Wu static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev) 297b17336c5SHonghui Zhang { 298*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev); 299*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); 3008bbe13f5SYong Wu struct dma_iommu_mapping *mtk_mapping; 301b17336c5SHonghui Zhang int ret; 302b17336c5SHonghui Zhang 3038bbe13f5SYong Wu /* Only allow the domain created internally. */ 30458960172SJoerg Roedel mtk_mapping = data->mapping; 3058bbe13f5SYong Wu if (mtk_mapping->domain != domain) 3068bbe13f5SYong Wu return 0; 307b17336c5SHonghui Zhang 308b17336c5SHonghui Zhang if (!data->m4u_dom) { 309b17336c5SHonghui Zhang data->m4u_dom = dom; 310*ad9b10e5SYong Wu ret = mtk_iommu_v1_domain_finalise(data); 311b17336c5SHonghui Zhang if (ret) { 312b17336c5SHonghui Zhang data->m4u_dom = NULL; 313b17336c5SHonghui Zhang return ret; 314b17336c5SHonghui Zhang } 315b17336c5SHonghui Zhang } 316b17336c5SHonghui Zhang 317*ad9b10e5SYong Wu mtk_iommu_v1_config(data, dev, true); 318b17336c5SHonghui Zhang return 0; 319b17336c5SHonghui Zhang } 320b17336c5SHonghui Zhang 321*ad9b10e5SYong Wu static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev) 322b17336c5SHonghui Zhang { 323*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev); 324b17336c5SHonghui Zhang 325*ad9b10e5SYong Wu mtk_iommu_v1_config(data, dev, false); 326b17336c5SHonghui Zhang } 327b17336c5SHonghui Zhang 328*ad9b10e5SYong Wu static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova, 329781ca2deSTom Murphy phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 330b17336c5SHonghui Zhang { 331*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); 332b17336c5SHonghui Zhang unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; 333b17336c5SHonghui Zhang unsigned long flags; 334b17336c5SHonghui Zhang unsigned int i; 335b17336c5SHonghui Zhang u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); 336b17336c5SHonghui Zhang u32 pabase = (u32)paddr; 337b17336c5SHonghui Zhang int map_size = 0; 338b17336c5SHonghui Zhang 339b17336c5SHonghui Zhang spin_lock_irqsave(&dom->pgtlock, flags); 340b17336c5SHonghui Zhang for (i = 0; i < page_num; i++) { 341b17336c5SHonghui Zhang if (pgt_base_iova[i]) { 342b17336c5SHonghui Zhang memset(pgt_base_iova, 0, i * sizeof(u32)); 343b17336c5SHonghui Zhang break; 344b17336c5SHonghui Zhang } 345b17336c5SHonghui Zhang pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC; 346b17336c5SHonghui Zhang pabase += MT2701_IOMMU_PAGE_SIZE; 347b17336c5SHonghui Zhang map_size += MT2701_IOMMU_PAGE_SIZE; 348b17336c5SHonghui Zhang } 349b17336c5SHonghui Zhang 350b17336c5SHonghui Zhang spin_unlock_irqrestore(&dom->pgtlock, flags); 351b17336c5SHonghui Zhang 352*ad9b10e5SYong Wu mtk_iommu_v1_tlb_flush_range(dom->data, iova, size); 353b17336c5SHonghui Zhang 354b17336c5SHonghui Zhang return map_size == size ? 0 : -EEXIST; 355b17336c5SHonghui Zhang } 356b17336c5SHonghui Zhang 357*ad9b10e5SYong Wu static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova, 358*ad9b10e5SYong Wu size_t size, struct iommu_iotlb_gather *gather) 359b17336c5SHonghui Zhang { 360*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); 361b17336c5SHonghui Zhang unsigned long flags; 362b17336c5SHonghui Zhang u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); 363b17336c5SHonghui Zhang unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; 364b17336c5SHonghui Zhang 365b17336c5SHonghui Zhang spin_lock_irqsave(&dom->pgtlock, flags); 366b17336c5SHonghui Zhang memset(pgt_base_iova, 0, page_num * sizeof(u32)); 367b17336c5SHonghui Zhang spin_unlock_irqrestore(&dom->pgtlock, flags); 368b17336c5SHonghui Zhang 369*ad9b10e5SYong Wu mtk_iommu_v1_tlb_flush_range(dom->data, iova, size); 370b17336c5SHonghui Zhang 371b17336c5SHonghui Zhang return size; 372b17336c5SHonghui Zhang } 373b17336c5SHonghui Zhang 374*ad9b10e5SYong Wu static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 375b17336c5SHonghui Zhang { 376*ad9b10e5SYong Wu struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); 377b17336c5SHonghui Zhang unsigned long flags; 378b17336c5SHonghui Zhang phys_addr_t pa; 379b17336c5SHonghui Zhang 380b17336c5SHonghui Zhang spin_lock_irqsave(&dom->pgtlock, flags); 381b17336c5SHonghui Zhang pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT)); 382b17336c5SHonghui Zhang pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1)); 383b17336c5SHonghui Zhang spin_unlock_irqrestore(&dom->pgtlock, flags); 384b17336c5SHonghui Zhang 385b17336c5SHonghui Zhang return pa; 386b17336c5SHonghui Zhang } 387b17336c5SHonghui Zhang 388*ad9b10e5SYong Wu static const struct iommu_ops mtk_iommu_v1_ops; 38984672f19SRobin Murphy 390b17336c5SHonghui Zhang /* 391b17336c5SHonghui Zhang * MTK generation one iommu HW only support one iommu domain, and all the client 392b17336c5SHonghui Zhang * sharing the same iova address space. 393b17336c5SHonghui Zhang */ 394*ad9b10e5SYong Wu static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args) 395b17336c5SHonghui Zhang { 396a9bf2eecSJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 397*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data; 398b17336c5SHonghui Zhang struct platform_device *m4updev; 399b17336c5SHonghui Zhang struct dma_iommu_mapping *mtk_mapping; 400b17336c5SHonghui Zhang int ret; 401b17336c5SHonghui Zhang 402b17336c5SHonghui Zhang if (args->args_count != 1) { 403b17336c5SHonghui Zhang dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", 404b17336c5SHonghui Zhang args->args_count); 405b17336c5SHonghui Zhang return -EINVAL; 406b17336c5SHonghui Zhang } 407b17336c5SHonghui Zhang 408a9bf2eecSJoerg Roedel if (!fwspec) { 409*ad9b10e5SYong Wu ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops); 41084672f19SRobin Murphy if (ret) 41184672f19SRobin Murphy return ret; 412a9bf2eecSJoerg Roedel fwspec = dev_iommu_fwspec_get(dev); 413*ad9b10e5SYong Wu } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) { 41484672f19SRobin Murphy return -EINVAL; 41584672f19SRobin Murphy } 41684672f19SRobin Murphy 4173524b559SJoerg Roedel if (!dev_iommu_priv_get(dev)) { 418b17336c5SHonghui Zhang /* Get the m4u device */ 419b17336c5SHonghui Zhang m4updev = of_find_device_by_node(args->np); 420b17336c5SHonghui Zhang if (WARN_ON(!m4updev)) 421b17336c5SHonghui Zhang return -EINVAL; 422b17336c5SHonghui Zhang 4233524b559SJoerg Roedel dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); 424b17336c5SHonghui Zhang } 425b17336c5SHonghui Zhang 42684672f19SRobin Murphy ret = iommu_fwspec_add_ids(dev, args->args, 1); 42784672f19SRobin Murphy if (ret) 42884672f19SRobin Murphy return ret; 429b17336c5SHonghui Zhang 4303524b559SJoerg Roedel data = dev_iommu_priv_get(dev); 43158960172SJoerg Roedel mtk_mapping = data->mapping; 432b17336c5SHonghui Zhang if (!mtk_mapping) { 433b17336c5SHonghui Zhang /* MTK iommu support 4GB iova address space. */ 434b17336c5SHonghui Zhang mtk_mapping = arm_iommu_create_mapping(&platform_bus_type, 435b17336c5SHonghui Zhang 0, 1ULL << 32); 43684672f19SRobin Murphy if (IS_ERR(mtk_mapping)) 43784672f19SRobin Murphy return PTR_ERR(mtk_mapping); 43884672f19SRobin Murphy 43958960172SJoerg Roedel data->mapping = mtk_mapping; 440b17336c5SHonghui Zhang } 441b17336c5SHonghui Zhang 442b17336c5SHonghui Zhang return 0; 443b17336c5SHonghui Zhang } 444b17336c5SHonghui Zhang 445*ad9b10e5SYong Wu static int mtk_iommu_v1_def_domain_type(struct device *dev) 4468bbe13f5SYong Wu { 4478bbe13f5SYong Wu return IOMMU_DOMAIN_UNMANAGED; 4488bbe13f5SYong Wu } 4498bbe13f5SYong Wu 450*ad9b10e5SYong Wu static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev) 451b17336c5SHonghui Zhang { 452a9bf2eecSJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 453b17336c5SHonghui Zhang struct of_phandle_args iommu_spec; 454*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data; 455635319a4SYong Wu int err, idx = 0, larbid, larbidx; 456635319a4SYong Wu struct device_link *link; 457635319a4SYong Wu struct device *larbdev; 458b17336c5SHonghui Zhang 459822a2ed8SYong Wu /* 460822a2ed8SYong Wu * In the deferred case, free the existed fwspec. 461822a2ed8SYong Wu * Always initialize the fwspec internally. 462822a2ed8SYong Wu */ 463822a2ed8SYong Wu if (fwspec) { 464822a2ed8SYong Wu iommu_fwspec_free(dev); 465822a2ed8SYong Wu fwspec = dev_iommu_fwspec_get(dev); 466822a2ed8SYong Wu } 467822a2ed8SYong Wu 468f90a9a85SYong Wu while (!of_parse_phandle_with_args(dev->of_node, "iommus", 469f90a9a85SYong Wu "#iommu-cells", 470f90a9a85SYong Wu idx, &iommu_spec)) { 471b17336c5SHonghui Zhang 472*ad9b10e5SYong Wu err = mtk_iommu_v1_create_mapping(dev, &iommu_spec); 473f90a9a85SYong Wu of_node_put(iommu_spec.np); 474f90a9a85SYong Wu if (err) 475f90a9a85SYong Wu return ERR_PTR(err); 476da5d2748SJoerg Roedel 477da5d2748SJoerg Roedel /* dev->iommu_fwspec might have changed */ 478da5d2748SJoerg Roedel fwspec = dev_iommu_fwspec_get(dev); 479f90a9a85SYong Wu idx++; 480b17336c5SHonghui Zhang } 481b17336c5SHonghui Zhang 482*ad9b10e5SYong Wu if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops) 48357dbf81fSJoerg Roedel return ERR_PTR(-ENODEV); /* Not a iommu client device */ 484b17336c5SHonghui Zhang 48557dbf81fSJoerg Roedel data = dev_iommu_priv_get(dev); 486b17336c5SHonghui Zhang 487635319a4SYong Wu /* Link the consumer device with the smi-larb device(supplier) */ 488635319a4SYong Wu larbid = mt2701_m4u_to_larb(fwspec->ids[0]); 489635319a4SYong Wu for (idx = 1; idx < fwspec->num_ids; idx++) { 490635319a4SYong Wu larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]); 491635319a4SYong Wu if (larbid != larbidx) { 492635319a4SYong Wu dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n", 493635319a4SYong Wu larbid, larbidx); 494635319a4SYong Wu return ERR_PTR(-EINVAL); 495635319a4SYong Wu } 496635319a4SYong Wu } 497635319a4SYong Wu 498635319a4SYong Wu larbdev = data->larb_imu[larbid].dev; 499635319a4SYong Wu link = device_link_add(dev, larbdev, 500635319a4SYong Wu DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); 501635319a4SYong Wu if (!link) 502635319a4SYong Wu dev_err(dev, "Unable to link %s\n", dev_name(larbdev)); 503635319a4SYong Wu 50457dbf81fSJoerg Roedel return &data->iommu; 50557dbf81fSJoerg Roedel } 50657dbf81fSJoerg Roedel 507*ad9b10e5SYong Wu static void mtk_iommu_v1_probe_finalize(struct device *dev) 50857dbf81fSJoerg Roedel { 50957dbf81fSJoerg Roedel struct dma_iommu_mapping *mtk_mapping; 510*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data; 51157dbf81fSJoerg Roedel int err; 512f3e827d7SYong Wu 5133524b559SJoerg Roedel data = dev_iommu_priv_get(dev); 51458960172SJoerg Roedel mtk_mapping = data->mapping; 51557dbf81fSJoerg Roedel 516f3e827d7SYong Wu err = arm_iommu_attach_device(dev, mtk_mapping); 51757dbf81fSJoerg Roedel if (err) 51857dbf81fSJoerg Roedel dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); 519f3e827d7SYong Wu } 520f3e827d7SYong Wu 521*ad9b10e5SYong Wu static void mtk_iommu_v1_release_device(struct device *dev) 522b17336c5SHonghui Zhang { 523a9bf2eecSJoerg Roedel struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 524*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data; 525635319a4SYong Wu struct device *larbdev; 526635319a4SYong Wu unsigned int larbid; 5276f66ea09SJoerg Roedel 528*ad9b10e5SYong Wu if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops) 529b17336c5SHonghui Zhang return; 530b17336c5SHonghui Zhang 531635319a4SYong Wu data = dev_iommu_priv_get(dev); 532635319a4SYong Wu larbid = mt2701_m4u_to_larb(fwspec->ids[0]); 533635319a4SYong Wu larbdev = data->larb_imu[larbid].dev; 534635319a4SYong Wu device_link_remove(dev, larbdev); 535635319a4SYong Wu 53684672f19SRobin Murphy iommu_fwspec_free(dev); 537b17336c5SHonghui Zhang } 538b17336c5SHonghui Zhang 539*ad9b10e5SYong Wu static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data) 540b17336c5SHonghui Zhang { 541b17336c5SHonghui Zhang u32 regval; 542b17336c5SHonghui Zhang int ret; 543b17336c5SHonghui Zhang 544b17336c5SHonghui Zhang ret = clk_prepare_enable(data->bclk); 545b17336c5SHonghui Zhang if (ret) { 546b17336c5SHonghui Zhang dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); 547b17336c5SHonghui Zhang return ret; 548b17336c5SHonghui Zhang } 549b17336c5SHonghui Zhang 550b17336c5SHonghui Zhang regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2); 551b17336c5SHonghui Zhang writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); 552b17336c5SHonghui Zhang 553b17336c5SHonghui Zhang regval = F_INT_TRANSLATION_FAULT | 554b17336c5SHonghui Zhang F_INT_MAIN_MULTI_HIT_FAULT | 555b17336c5SHonghui Zhang F_INT_INVALID_PA_FAULT | 556b17336c5SHonghui Zhang F_INT_ENTRY_REPLACEMENT_FAULT | 557b17336c5SHonghui Zhang F_INT_TABLE_WALK_FAULT | 558b17336c5SHonghui Zhang F_INT_TLB_MISS_FAULT | 559b17336c5SHonghui Zhang F_INT_PFH_DMA_FIFO_OVERFLOW | 560b17336c5SHonghui Zhang F_INT_MISS_DMA_FIFO_OVERFLOW; 561b17336c5SHonghui Zhang writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); 562b17336c5SHonghui Zhang 563b17336c5SHonghui Zhang /* protect memory,hw will write here while translation fault */ 564b17336c5SHonghui Zhang writel_relaxed(data->protect_base, 565b17336c5SHonghui Zhang data->base + REG_MMU_IVRP_PADDR); 566b17336c5SHonghui Zhang 567b17336c5SHonghui Zhang writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM); 568b17336c5SHonghui Zhang 569*ad9b10e5SYong Wu if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0, 570b17336c5SHonghui Zhang dev_name(data->dev), (void *)data)) { 571b17336c5SHonghui Zhang writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); 572b17336c5SHonghui Zhang clk_disable_unprepare(data->bclk); 573b17336c5SHonghui Zhang dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); 574b17336c5SHonghui Zhang return -ENODEV; 575b17336c5SHonghui Zhang } 576b17336c5SHonghui Zhang 577b17336c5SHonghui Zhang return 0; 578b17336c5SHonghui Zhang } 579b17336c5SHonghui Zhang 580*ad9b10e5SYong Wu static const struct iommu_ops mtk_iommu_v1_ops = { 581*ad9b10e5SYong Wu .domain_alloc = mtk_iommu_v1_domain_alloc, 582*ad9b10e5SYong Wu .probe_device = mtk_iommu_v1_probe_device, 583*ad9b10e5SYong Wu .probe_finalize = mtk_iommu_v1_probe_finalize, 584*ad9b10e5SYong Wu .release_device = mtk_iommu_v1_release_device, 585*ad9b10e5SYong Wu .def_domain_type = mtk_iommu_v1_def_domain_type, 58657dbf81fSJoerg Roedel .device_group = generic_device_group, 587b17336c5SHonghui Zhang .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, 5888de000cfSYong Wu .owner = THIS_MODULE, 5899a630a4bSLu Baolu .default_domain_ops = &(const struct iommu_domain_ops) { 590*ad9b10e5SYong Wu .attach_dev = mtk_iommu_v1_attach_device, 591*ad9b10e5SYong Wu .detach_dev = mtk_iommu_v1_detach_device, 592*ad9b10e5SYong Wu .map = mtk_iommu_v1_map, 593*ad9b10e5SYong Wu .unmap = mtk_iommu_v1_unmap, 594*ad9b10e5SYong Wu .iova_to_phys = mtk_iommu_v1_iova_to_phys, 595*ad9b10e5SYong Wu .free = mtk_iommu_v1_domain_free, 5969a630a4bSLu Baolu } 597b17336c5SHonghui Zhang }; 598b17336c5SHonghui Zhang 599*ad9b10e5SYong Wu static const struct of_device_id mtk_iommu_v1_of_ids[] = { 600b17336c5SHonghui Zhang { .compatible = "mediatek,mt2701-m4u", }, 601b17336c5SHonghui Zhang {} 602b17336c5SHonghui Zhang }; 603b17336c5SHonghui Zhang 604*ad9b10e5SYong Wu static const struct component_master_ops mtk_iommu_v1_com_ops = { 605*ad9b10e5SYong Wu .bind = mtk_iommu_v1_bind, 606*ad9b10e5SYong Wu .unbind = mtk_iommu_v1_unbind, 607b17336c5SHonghui Zhang }; 608b17336c5SHonghui Zhang 609*ad9b10e5SYong Wu static int mtk_iommu_v1_probe(struct platform_device *pdev) 610b17336c5SHonghui Zhang { 611b17336c5SHonghui Zhang struct device *dev = &pdev->dev; 612*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data; 613b17336c5SHonghui Zhang struct resource *res; 614b17336c5SHonghui Zhang struct component_match *match = NULL; 615b17336c5SHonghui Zhang void *protect; 616f90a9a85SYong Wu int larb_nr, ret, i; 617b17336c5SHonghui Zhang 618b17336c5SHonghui Zhang data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 619b17336c5SHonghui Zhang if (!data) 620b17336c5SHonghui Zhang return -ENOMEM; 621b17336c5SHonghui Zhang 622b17336c5SHonghui Zhang data->dev = dev; 623b17336c5SHonghui Zhang 624b17336c5SHonghui Zhang /* Protect memory. HW will access here while translation fault.*/ 625b17336c5SHonghui Zhang protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, 626b17336c5SHonghui Zhang GFP_KERNEL | GFP_DMA); 627b17336c5SHonghui Zhang if (!protect) 628b17336c5SHonghui Zhang return -ENOMEM; 629b17336c5SHonghui Zhang data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); 630b17336c5SHonghui Zhang 631b17336c5SHonghui Zhang res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 632b17336c5SHonghui Zhang data->base = devm_ioremap_resource(dev, res); 633b17336c5SHonghui Zhang if (IS_ERR(data->base)) 634b17336c5SHonghui Zhang return PTR_ERR(data->base); 635b17336c5SHonghui Zhang 636b17336c5SHonghui Zhang data->irq = platform_get_irq(pdev, 0); 637b17336c5SHonghui Zhang if (data->irq < 0) 638b17336c5SHonghui Zhang return data->irq; 639b17336c5SHonghui Zhang 640b17336c5SHonghui Zhang data->bclk = devm_clk_get(dev, "bclk"); 641b17336c5SHonghui Zhang if (IS_ERR(data->bclk)) 642b17336c5SHonghui Zhang return PTR_ERR(data->bclk); 643b17336c5SHonghui Zhang 644f90a9a85SYong Wu larb_nr = of_count_phandle_with_args(dev->of_node, 645f90a9a85SYong Wu "mediatek,larbs", NULL); 646f90a9a85SYong Wu if (larb_nr < 0) 647f90a9a85SYong Wu return larb_nr; 648f90a9a85SYong Wu 649f90a9a85SYong Wu for (i = 0; i < larb_nr; i++) { 650f90a9a85SYong Wu struct device_node *larbnode; 651b17336c5SHonghui Zhang struct platform_device *plarbdev; 652b17336c5SHonghui Zhang 653f90a9a85SYong Wu larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); 654f90a9a85SYong Wu if (!larbnode) 655f90a9a85SYong Wu return -EINVAL; 656f90a9a85SYong Wu 657f90a9a85SYong Wu if (!of_device_is_available(larbnode)) { 658f90a9a85SYong Wu of_node_put(larbnode); 659b17336c5SHonghui Zhang continue; 660f90a9a85SYong Wu } 661b17336c5SHonghui Zhang 662f90a9a85SYong Wu plarbdev = of_find_device_by_node(larbnode); 663b17336c5SHonghui Zhang if (!plarbdev) { 664f90a9a85SYong Wu of_node_put(larbnode); 6652fb0feedSYong Wu return -ENODEV; 666b17336c5SHonghui Zhang } 6677d09aaf8SYong Wu if (!plarbdev->dev.driver) { 6687d09aaf8SYong Wu of_node_put(larbnode); 6697d09aaf8SYong Wu return -EPROBE_DEFER; 6707d09aaf8SYong Wu } 671f90a9a85SYong Wu data->larb_imu[i].dev = &plarbdev->dev; 672b17336c5SHonghui Zhang 6734811a485SYong Wu component_match_add_release(dev, &match, component_release_of, 6744811a485SYong Wu component_compare_of, larbnode); 675b17336c5SHonghui Zhang } 676b17336c5SHonghui Zhang 677b17336c5SHonghui Zhang platform_set_drvdata(pdev, data); 678b17336c5SHonghui Zhang 679*ad9b10e5SYong Wu ret = mtk_iommu_v1_hw_init(data); 680b17336c5SHonghui Zhang if (ret) 681b17336c5SHonghui Zhang return ret; 682b17336c5SHonghui Zhang 6836f66ea09SJoerg Roedel ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, 6846f66ea09SJoerg Roedel dev_name(&pdev->dev)); 6856f66ea09SJoerg Roedel if (ret) 6866f66ea09SJoerg Roedel return ret; 6876f66ea09SJoerg Roedel 688*ad9b10e5SYong Wu ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev); 6896f66ea09SJoerg Roedel if (ret) 690ac304c07SYong Wu goto out_sysfs_remove; 691ac304c07SYong Wu 692ac304c07SYong Wu if (!iommu_present(&platform_bus_type)) { 693*ad9b10e5SYong Wu ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_v1_ops); 694ac304c07SYong Wu if (ret) 695ac304c07SYong Wu goto out_dev_unreg; 696ac304c07SYong Wu } 697ac304c07SYong Wu 698*ad9b10e5SYong Wu ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match); 699ac304c07SYong Wu if (ret) 700ac304c07SYong Wu goto out_bus_set_null; 7016f66ea09SJoerg Roedel return ret; 7026f66ea09SJoerg Roedel 703ac304c07SYong Wu out_bus_set_null: 704ac304c07SYong Wu bus_set_iommu(&platform_bus_type, NULL); 705ac304c07SYong Wu out_dev_unreg: 706ac304c07SYong Wu iommu_device_unregister(&data->iommu); 707ac304c07SYong Wu out_sysfs_remove: 708ac304c07SYong Wu iommu_device_sysfs_remove(&data->iommu); 709ac304c07SYong Wu return ret; 710b17336c5SHonghui Zhang } 711b17336c5SHonghui Zhang 712*ad9b10e5SYong Wu static int mtk_iommu_v1_remove(struct platform_device *pdev) 713b17336c5SHonghui Zhang { 714*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev); 715b17336c5SHonghui Zhang 7166f66ea09SJoerg Roedel iommu_device_sysfs_remove(&data->iommu); 7176f66ea09SJoerg Roedel iommu_device_unregister(&data->iommu); 7186f66ea09SJoerg Roedel 719b17336c5SHonghui Zhang if (iommu_present(&platform_bus_type)) 720b17336c5SHonghui Zhang bus_set_iommu(&platform_bus_type, NULL); 721b17336c5SHonghui Zhang 722b17336c5SHonghui Zhang clk_disable_unprepare(data->bclk); 723b17336c5SHonghui Zhang devm_free_irq(&pdev->dev, data->irq, data); 724*ad9b10e5SYong Wu component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops); 725b17336c5SHonghui Zhang return 0; 726b17336c5SHonghui Zhang } 727b17336c5SHonghui Zhang 728*ad9b10e5SYong Wu static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev) 729b17336c5SHonghui Zhang { 730*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = dev_get_drvdata(dev); 731*ad9b10e5SYong Wu struct mtk_iommu_v1_suspend_reg *reg = &data->reg; 732b17336c5SHonghui Zhang void __iomem *base = data->base; 733b17336c5SHonghui Zhang 734b17336c5SHonghui Zhang reg->standard_axi_mode = readl_relaxed(base + 735b17336c5SHonghui Zhang REG_MMU_STANDARD_AXI_MODE); 736b17336c5SHonghui Zhang reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM); 737b17336c5SHonghui Zhang reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); 738b17336c5SHonghui Zhang reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL); 739b17336c5SHonghui Zhang return 0; 740b17336c5SHonghui Zhang } 741b17336c5SHonghui Zhang 742*ad9b10e5SYong Wu static int __maybe_unused mtk_iommu_v1_resume(struct device *dev) 743b17336c5SHonghui Zhang { 744*ad9b10e5SYong Wu struct mtk_iommu_v1_data *data = dev_get_drvdata(dev); 745*ad9b10e5SYong Wu struct mtk_iommu_v1_suspend_reg *reg = &data->reg; 746b17336c5SHonghui Zhang void __iomem *base = data->base; 747b17336c5SHonghui Zhang 748b17336c5SHonghui Zhang writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR); 749b17336c5SHonghui Zhang writel_relaxed(reg->standard_axi_mode, 750b17336c5SHonghui Zhang base + REG_MMU_STANDARD_AXI_MODE); 751b17336c5SHonghui Zhang writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM); 752b17336c5SHonghui Zhang writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); 753b17336c5SHonghui Zhang writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL); 754b17336c5SHonghui Zhang writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR); 755b17336c5SHonghui Zhang return 0; 756b17336c5SHonghui Zhang } 757b17336c5SHonghui Zhang 758*ad9b10e5SYong Wu static const struct dev_pm_ops mtk_iommu_v1_pm_ops = { 759*ad9b10e5SYong Wu SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume) 760b17336c5SHonghui Zhang }; 761b17336c5SHonghui Zhang 762*ad9b10e5SYong Wu static struct platform_driver mtk_iommu_v1_driver = { 763*ad9b10e5SYong Wu .probe = mtk_iommu_v1_probe, 764*ad9b10e5SYong Wu .remove = mtk_iommu_v1_remove, 765b17336c5SHonghui Zhang .driver = { 766395df08dSMatthias Brugger .name = "mtk-iommu-v1", 767*ad9b10e5SYong Wu .of_match_table = mtk_iommu_v1_of_ids, 768*ad9b10e5SYong Wu .pm = &mtk_iommu_v1_pm_ops, 769b17336c5SHonghui Zhang } 770b17336c5SHonghui Zhang }; 771*ad9b10e5SYong Wu module_platform_driver(mtk_iommu_v1_driver); 772b17336c5SHonghui Zhang 7738de000cfSYong Wu MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations"); 7748de000cfSYong Wu MODULE_LICENSE("GPL v2"); 775