1b17336c5SHonghui Zhang /* 2b17336c5SHonghui Zhang * Copyright (c) 2015-2016 MediaTek Inc. 3b17336c5SHonghui Zhang * Author: Honghui Zhang <honghui.zhang@mediatek.com> 4b17336c5SHonghui Zhang * 5b17336c5SHonghui Zhang * Based on driver/iommu/mtk_iommu.c 6b17336c5SHonghui Zhang * 7b17336c5SHonghui Zhang * This program is free software; you can redistribute it and/or modify 8b17336c5SHonghui Zhang * it under the terms of the GNU General Public License version 2 as 9b17336c5SHonghui Zhang * published by the Free Software Foundation. 10b17336c5SHonghui Zhang * 11b17336c5SHonghui Zhang * This program is distributed in the hope that it will be useful, 12b17336c5SHonghui Zhang * but WITHOUT ANY WARRANTY; without even the implied warranty of 13b17336c5SHonghui Zhang * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14b17336c5SHonghui Zhang * GNU General Public License for more details. 15b17336c5SHonghui Zhang */ 16b17336c5SHonghui Zhang #include <linux/bootmem.h> 17b17336c5SHonghui Zhang #include <linux/bug.h> 18b17336c5SHonghui Zhang #include <linux/clk.h> 19b17336c5SHonghui Zhang #include <linux/component.h> 20b17336c5SHonghui Zhang #include <linux/device.h> 21b17336c5SHonghui Zhang #include <linux/dma-iommu.h> 22b17336c5SHonghui Zhang #include <linux/err.h> 23b17336c5SHonghui Zhang #include <linux/interrupt.h> 24b17336c5SHonghui Zhang #include <linux/io.h> 25b17336c5SHonghui Zhang #include <linux/iommu.h> 26b17336c5SHonghui Zhang #include <linux/iopoll.h> 27b17336c5SHonghui Zhang #include <linux/kmemleak.h> 28b17336c5SHonghui Zhang #include <linux/list.h> 29b17336c5SHonghui Zhang #include <linux/of_address.h> 30b17336c5SHonghui Zhang #include <linux/of_iommu.h> 31b17336c5SHonghui Zhang #include <linux/of_irq.h> 32b17336c5SHonghui Zhang #include <linux/of_platform.h> 33b17336c5SHonghui Zhang #include <linux/platform_device.h> 34b17336c5SHonghui Zhang #include <linux/slab.h> 35b17336c5SHonghui Zhang #include <linux/spinlock.h> 36b17336c5SHonghui Zhang #include <asm/barrier.h> 37b17336c5SHonghui Zhang #include <asm/dma-iommu.h> 38b17336c5SHonghui Zhang #include <linux/module.h> 39b17336c5SHonghui Zhang #include <dt-bindings/memory/mt2701-larb-port.h> 40b17336c5SHonghui Zhang #include <soc/mediatek/smi.h> 41b17336c5SHonghui Zhang #include "mtk_iommu.h" 42b17336c5SHonghui Zhang 43b17336c5SHonghui Zhang #define REG_MMU_PT_BASE_ADDR 0x000 44b17336c5SHonghui Zhang 45b17336c5SHonghui Zhang #define F_ALL_INVLD 0x2 46b17336c5SHonghui Zhang #define F_MMU_INV_RANGE 0x1 47b17336c5SHonghui Zhang #define F_INVLD_EN0 BIT(0) 48b17336c5SHonghui Zhang #define F_INVLD_EN1 BIT(1) 49b17336c5SHonghui Zhang 50b17336c5SHonghui Zhang #define F_MMU_FAULT_VA_MSK 0xfffff000 51b17336c5SHonghui Zhang #define MTK_PROTECT_PA_ALIGN 128 52b17336c5SHonghui Zhang 53b17336c5SHonghui Zhang #define REG_MMU_CTRL_REG 0x210 54b17336c5SHonghui Zhang #define F_MMU_CTRL_COHERENT_EN BIT(8) 55b17336c5SHonghui Zhang #define REG_MMU_IVRP_PADDR 0x214 56b17336c5SHonghui Zhang #define REG_MMU_INT_CONTROL 0x220 57b17336c5SHonghui Zhang #define F_INT_TRANSLATION_FAULT BIT(0) 58b17336c5SHonghui Zhang #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) 59b17336c5SHonghui Zhang #define F_INT_INVALID_PA_FAULT BIT(2) 60b17336c5SHonghui Zhang #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) 61b17336c5SHonghui Zhang #define F_INT_TABLE_WALK_FAULT BIT(4) 62b17336c5SHonghui Zhang #define F_INT_TLB_MISS_FAULT BIT(5) 63b17336c5SHonghui Zhang #define F_INT_PFH_DMA_FIFO_OVERFLOW BIT(6) 64b17336c5SHonghui Zhang #define F_INT_MISS_DMA_FIFO_OVERFLOW BIT(7) 65b17336c5SHonghui Zhang 66b17336c5SHonghui Zhang #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5) 67b17336c5SHonghui Zhang #define F_INT_CLR_BIT BIT(12) 68b17336c5SHonghui Zhang 69b17336c5SHonghui Zhang #define REG_MMU_FAULT_ST 0x224 70b17336c5SHonghui Zhang #define REG_MMU_FAULT_VA 0x228 71b17336c5SHonghui Zhang #define REG_MMU_INVLD_PA 0x22C 72b17336c5SHonghui Zhang #define REG_MMU_INT_ID 0x388 73b17336c5SHonghui Zhang #define REG_MMU_INVALIDATE 0x5c0 74b17336c5SHonghui Zhang #define REG_MMU_INVLD_START_A 0x5c4 75b17336c5SHonghui Zhang #define REG_MMU_INVLD_END_A 0x5c8 76b17336c5SHonghui Zhang 77b17336c5SHonghui Zhang #define REG_MMU_INV_SEL 0x5d8 78b17336c5SHonghui Zhang #define REG_MMU_STANDARD_AXI_MODE 0x5e8 79b17336c5SHonghui Zhang 80b17336c5SHonghui Zhang #define REG_MMU_DCM 0x5f0 81b17336c5SHonghui Zhang #define F_MMU_DCM_ON BIT(1) 82b17336c5SHonghui Zhang #define REG_MMU_CPE_DONE 0x60c 83b17336c5SHonghui Zhang #define F_DESC_VALID 0x2 84b17336c5SHonghui Zhang #define F_DESC_NONSEC BIT(3) 85b17336c5SHonghui Zhang #define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7)) 86b17336c5SHonghui Zhang #define MT2701_M4U_TF_PORT(TF) (((TF) >> 8) & 0xF) 87b17336c5SHonghui Zhang /* MTK generation one iommu HW only support 4K size mapping */ 88b17336c5SHonghui Zhang #define MT2701_IOMMU_PAGE_SHIFT 12 89b17336c5SHonghui Zhang #define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT) 90b17336c5SHonghui Zhang 91b17336c5SHonghui Zhang /* 92b17336c5SHonghui Zhang * MTK m4u support 4GB iova address space, and only support 4K page 93b17336c5SHonghui Zhang * mapping. So the pagetable size should be exactly as 4M. 94b17336c5SHonghui Zhang */ 95b17336c5SHonghui Zhang #define M2701_IOMMU_PGT_SIZE SZ_4M 96b17336c5SHonghui Zhang 97b17336c5SHonghui Zhang struct mtk_iommu_domain { 98b17336c5SHonghui Zhang spinlock_t pgtlock; /* lock for page table */ 99b17336c5SHonghui Zhang struct iommu_domain domain; 100b17336c5SHonghui Zhang u32 *pgt_va; 101b17336c5SHonghui Zhang dma_addr_t pgt_pa; 102b17336c5SHonghui Zhang struct mtk_iommu_data *data; 103b17336c5SHonghui Zhang }; 104b17336c5SHonghui Zhang 105b17336c5SHonghui Zhang static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) 106b17336c5SHonghui Zhang { 107b17336c5SHonghui Zhang return container_of(dom, struct mtk_iommu_domain, domain); 108b17336c5SHonghui Zhang } 109b17336c5SHonghui Zhang 110b17336c5SHonghui Zhang static const int mt2701_m4u_in_larb[] = { 111b17336c5SHonghui Zhang LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, 112b17336c5SHonghui Zhang LARB2_PORT_OFFSET, LARB3_PORT_OFFSET 113b17336c5SHonghui Zhang }; 114b17336c5SHonghui Zhang 115b17336c5SHonghui Zhang static inline int mt2701_m4u_to_larb(int id) 116b17336c5SHonghui Zhang { 117b17336c5SHonghui Zhang int i; 118b17336c5SHonghui Zhang 119b17336c5SHonghui Zhang for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--) 120b17336c5SHonghui Zhang if ((id) >= mt2701_m4u_in_larb[i]) 121b17336c5SHonghui Zhang return i; 122b17336c5SHonghui Zhang 123b17336c5SHonghui Zhang return 0; 124b17336c5SHonghui Zhang } 125b17336c5SHonghui Zhang 126b17336c5SHonghui Zhang static inline int mt2701_m4u_to_port(int id) 127b17336c5SHonghui Zhang { 128b17336c5SHonghui Zhang int larb = mt2701_m4u_to_larb(id); 129b17336c5SHonghui Zhang 130b17336c5SHonghui Zhang return id - mt2701_m4u_in_larb[larb]; 131b17336c5SHonghui Zhang } 132b17336c5SHonghui Zhang 133b17336c5SHonghui Zhang static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) 134b17336c5SHonghui Zhang { 135b17336c5SHonghui Zhang writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 136b17336c5SHonghui Zhang data->base + REG_MMU_INV_SEL); 137b17336c5SHonghui Zhang writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); 138b17336c5SHonghui Zhang wmb(); /* Make sure the tlb flush all done */ 139b17336c5SHonghui Zhang } 140b17336c5SHonghui Zhang 141b17336c5SHonghui Zhang static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data, 142b17336c5SHonghui Zhang unsigned long iova, size_t size) 143b17336c5SHonghui Zhang { 144b17336c5SHonghui Zhang int ret; 145b17336c5SHonghui Zhang u32 tmp; 146b17336c5SHonghui Zhang 147b17336c5SHonghui Zhang writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 148b17336c5SHonghui Zhang data->base + REG_MMU_INV_SEL); 149b17336c5SHonghui Zhang writel_relaxed(iova & F_MMU_FAULT_VA_MSK, 150b17336c5SHonghui Zhang data->base + REG_MMU_INVLD_START_A); 151b17336c5SHonghui Zhang writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK, 152b17336c5SHonghui Zhang data->base + REG_MMU_INVLD_END_A); 153b17336c5SHonghui Zhang writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); 154b17336c5SHonghui Zhang 155b17336c5SHonghui Zhang ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, 156b17336c5SHonghui Zhang tmp, tmp != 0, 10, 100000); 157b17336c5SHonghui Zhang if (ret) { 158b17336c5SHonghui Zhang dev_warn(data->dev, 159b17336c5SHonghui Zhang "Partial TLB flush timed out, falling back to full flush\n"); 160b17336c5SHonghui Zhang mtk_iommu_tlb_flush_all(data); 161b17336c5SHonghui Zhang } 162b17336c5SHonghui Zhang /* Clear the CPE status */ 163b17336c5SHonghui Zhang writel_relaxed(0, data->base + REG_MMU_CPE_DONE); 164b17336c5SHonghui Zhang } 165b17336c5SHonghui Zhang 166b17336c5SHonghui Zhang static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) 167b17336c5SHonghui Zhang { 168b17336c5SHonghui Zhang struct mtk_iommu_data *data = dev_id; 169b17336c5SHonghui Zhang struct mtk_iommu_domain *dom = data->m4u_dom; 170b17336c5SHonghui Zhang u32 int_state, regval, fault_iova, fault_pa; 171b17336c5SHonghui Zhang unsigned int fault_larb, fault_port; 172b17336c5SHonghui Zhang 173b17336c5SHonghui Zhang /* Read error information from registers */ 174b17336c5SHonghui Zhang int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST); 175b17336c5SHonghui Zhang fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); 176b17336c5SHonghui Zhang 177b17336c5SHonghui Zhang fault_iova &= F_MMU_FAULT_VA_MSK; 178b17336c5SHonghui Zhang fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); 179b17336c5SHonghui Zhang regval = readl_relaxed(data->base + REG_MMU_INT_ID); 180b17336c5SHonghui Zhang fault_larb = MT2701_M4U_TF_LARB(regval); 181b17336c5SHonghui Zhang fault_port = MT2701_M4U_TF_PORT(regval); 182b17336c5SHonghui Zhang 183b17336c5SHonghui Zhang /* 184b17336c5SHonghui Zhang * MTK v1 iommu HW could not determine whether the fault is read or 185b17336c5SHonghui Zhang * write fault, report as read fault. 186b17336c5SHonghui Zhang */ 187b17336c5SHonghui Zhang if (report_iommu_fault(&dom->domain, data->dev, fault_iova, 188b17336c5SHonghui Zhang IOMMU_FAULT_READ)) 189b17336c5SHonghui Zhang dev_err_ratelimited(data->dev, 190b17336c5SHonghui Zhang "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n", 191b17336c5SHonghui Zhang int_state, fault_iova, fault_pa, 192b17336c5SHonghui Zhang fault_larb, fault_port); 193b17336c5SHonghui Zhang 194b17336c5SHonghui Zhang /* Interrupt clear */ 195b17336c5SHonghui Zhang regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL); 196b17336c5SHonghui Zhang regval |= F_INT_CLR_BIT; 197b17336c5SHonghui Zhang writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); 198b17336c5SHonghui Zhang 199b17336c5SHonghui Zhang mtk_iommu_tlb_flush_all(data); 200b17336c5SHonghui Zhang 201b17336c5SHonghui Zhang return IRQ_HANDLED; 202b17336c5SHonghui Zhang } 203b17336c5SHonghui Zhang 204b17336c5SHonghui Zhang static void mtk_iommu_config(struct mtk_iommu_data *data, 205b17336c5SHonghui Zhang struct device *dev, bool enable) 206b17336c5SHonghui Zhang { 207b17336c5SHonghui Zhang struct mtk_iommu_client_priv *head, *cur, *next; 208b17336c5SHonghui Zhang struct mtk_smi_larb_iommu *larb_mmu; 209b17336c5SHonghui Zhang unsigned int larbid, portid; 210b17336c5SHonghui Zhang 211b17336c5SHonghui Zhang head = dev->archdata.iommu; 212b17336c5SHonghui Zhang list_for_each_entry_safe(cur, next, &head->client, client) { 213b17336c5SHonghui Zhang larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id); 214b17336c5SHonghui Zhang portid = mt2701_m4u_to_port(cur->mtk_m4u_id); 215b17336c5SHonghui Zhang larb_mmu = &data->smi_imu.larb_imu[larbid]; 216b17336c5SHonghui Zhang 217b17336c5SHonghui Zhang dev_dbg(dev, "%s iommu port: %d\n", 218b17336c5SHonghui Zhang enable ? "enable" : "disable", portid); 219b17336c5SHonghui Zhang 220b17336c5SHonghui Zhang if (enable) 221b17336c5SHonghui Zhang larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); 222b17336c5SHonghui Zhang else 223b17336c5SHonghui Zhang larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); 224b17336c5SHonghui Zhang } 225b17336c5SHonghui Zhang } 226b17336c5SHonghui Zhang 227b17336c5SHonghui Zhang static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) 228b17336c5SHonghui Zhang { 229b17336c5SHonghui Zhang struct mtk_iommu_domain *dom = data->m4u_dom; 230b17336c5SHonghui Zhang 231b17336c5SHonghui Zhang spin_lock_init(&dom->pgtlock); 232b17336c5SHonghui Zhang 233b17336c5SHonghui Zhang dom->pgt_va = dma_zalloc_coherent(data->dev, 234b17336c5SHonghui Zhang M2701_IOMMU_PGT_SIZE, 235b17336c5SHonghui Zhang &dom->pgt_pa, GFP_KERNEL); 236b17336c5SHonghui Zhang if (!dom->pgt_va) 237b17336c5SHonghui Zhang return -ENOMEM; 238b17336c5SHonghui Zhang 239b17336c5SHonghui Zhang writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR); 240b17336c5SHonghui Zhang 241b17336c5SHonghui Zhang dom->data = data; 242b17336c5SHonghui Zhang 243b17336c5SHonghui Zhang return 0; 244b17336c5SHonghui Zhang } 245b17336c5SHonghui Zhang 246b17336c5SHonghui Zhang static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) 247b17336c5SHonghui Zhang { 248b17336c5SHonghui Zhang struct mtk_iommu_domain *dom; 249b17336c5SHonghui Zhang 250b17336c5SHonghui Zhang if (type != IOMMU_DOMAIN_UNMANAGED) 251b17336c5SHonghui Zhang return NULL; 252b17336c5SHonghui Zhang 253b17336c5SHonghui Zhang dom = kzalloc(sizeof(*dom), GFP_KERNEL); 254b17336c5SHonghui Zhang if (!dom) 255b17336c5SHonghui Zhang return NULL; 256b17336c5SHonghui Zhang 257b17336c5SHonghui Zhang return &dom->domain; 258b17336c5SHonghui Zhang } 259b17336c5SHonghui Zhang 260b17336c5SHonghui Zhang static void mtk_iommu_domain_free(struct iommu_domain *domain) 261b17336c5SHonghui Zhang { 262b17336c5SHonghui Zhang struct mtk_iommu_domain *dom = to_mtk_domain(domain); 263b17336c5SHonghui Zhang struct mtk_iommu_data *data = dom->data; 264b17336c5SHonghui Zhang 265b17336c5SHonghui Zhang dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE, 266b17336c5SHonghui Zhang dom->pgt_va, dom->pgt_pa); 267b17336c5SHonghui Zhang kfree(to_mtk_domain(domain)); 268b17336c5SHonghui Zhang } 269b17336c5SHonghui Zhang 270b17336c5SHonghui Zhang static int mtk_iommu_attach_device(struct iommu_domain *domain, 271b17336c5SHonghui Zhang struct device *dev) 272b17336c5SHonghui Zhang { 273b17336c5SHonghui Zhang struct mtk_iommu_domain *dom = to_mtk_domain(domain); 274b17336c5SHonghui Zhang struct mtk_iommu_client_priv *priv = dev->archdata.iommu; 275b17336c5SHonghui Zhang struct mtk_iommu_data *data; 276b17336c5SHonghui Zhang int ret; 277b17336c5SHonghui Zhang 278b17336c5SHonghui Zhang if (!priv) 279b17336c5SHonghui Zhang return -ENODEV; 280b17336c5SHonghui Zhang 281b17336c5SHonghui Zhang data = dev_get_drvdata(priv->m4udev); 282b17336c5SHonghui Zhang if (!data->m4u_dom) { 283b17336c5SHonghui Zhang data->m4u_dom = dom; 284b17336c5SHonghui Zhang ret = mtk_iommu_domain_finalise(data); 285b17336c5SHonghui Zhang if (ret) { 286b17336c5SHonghui Zhang data->m4u_dom = NULL; 287b17336c5SHonghui Zhang return ret; 288b17336c5SHonghui Zhang } 289b17336c5SHonghui Zhang } 290b17336c5SHonghui Zhang 291b17336c5SHonghui Zhang mtk_iommu_config(data, dev, true); 292b17336c5SHonghui Zhang return 0; 293b17336c5SHonghui Zhang } 294b17336c5SHonghui Zhang 295b17336c5SHonghui Zhang static void mtk_iommu_detach_device(struct iommu_domain *domain, 296b17336c5SHonghui Zhang struct device *dev) 297b17336c5SHonghui Zhang { 298b17336c5SHonghui Zhang struct mtk_iommu_client_priv *priv = dev->archdata.iommu; 299b17336c5SHonghui Zhang struct mtk_iommu_data *data; 300b17336c5SHonghui Zhang 301b17336c5SHonghui Zhang if (!priv) 302b17336c5SHonghui Zhang return; 303b17336c5SHonghui Zhang 304b17336c5SHonghui Zhang data = dev_get_drvdata(priv->m4udev); 305b17336c5SHonghui Zhang mtk_iommu_config(data, dev, false); 306b17336c5SHonghui Zhang } 307b17336c5SHonghui Zhang 308b17336c5SHonghui Zhang static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, 309b17336c5SHonghui Zhang phys_addr_t paddr, size_t size, int prot) 310b17336c5SHonghui Zhang { 311b17336c5SHonghui Zhang struct mtk_iommu_domain *dom = to_mtk_domain(domain); 312b17336c5SHonghui Zhang unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; 313b17336c5SHonghui Zhang unsigned long flags; 314b17336c5SHonghui Zhang unsigned int i; 315b17336c5SHonghui Zhang u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); 316b17336c5SHonghui Zhang u32 pabase = (u32)paddr; 317b17336c5SHonghui Zhang int map_size = 0; 318b17336c5SHonghui Zhang 319b17336c5SHonghui Zhang spin_lock_irqsave(&dom->pgtlock, flags); 320b17336c5SHonghui Zhang for (i = 0; i < page_num; i++) { 321b17336c5SHonghui Zhang if (pgt_base_iova[i]) { 322b17336c5SHonghui Zhang memset(pgt_base_iova, 0, i * sizeof(u32)); 323b17336c5SHonghui Zhang break; 324b17336c5SHonghui Zhang } 325b17336c5SHonghui Zhang pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC; 326b17336c5SHonghui Zhang pabase += MT2701_IOMMU_PAGE_SIZE; 327b17336c5SHonghui Zhang map_size += MT2701_IOMMU_PAGE_SIZE; 328b17336c5SHonghui Zhang } 329b17336c5SHonghui Zhang 330b17336c5SHonghui Zhang spin_unlock_irqrestore(&dom->pgtlock, flags); 331b17336c5SHonghui Zhang 332b17336c5SHonghui Zhang mtk_iommu_tlb_flush_range(dom->data, iova, size); 333b17336c5SHonghui Zhang 334b17336c5SHonghui Zhang return map_size == size ? 0 : -EEXIST; 335b17336c5SHonghui Zhang } 336b17336c5SHonghui Zhang 337b17336c5SHonghui Zhang static size_t mtk_iommu_unmap(struct iommu_domain *domain, 338b17336c5SHonghui Zhang unsigned long iova, size_t size) 339b17336c5SHonghui Zhang { 340b17336c5SHonghui Zhang struct mtk_iommu_domain *dom = to_mtk_domain(domain); 341b17336c5SHonghui Zhang unsigned long flags; 342b17336c5SHonghui Zhang u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); 343b17336c5SHonghui Zhang unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; 344b17336c5SHonghui Zhang 345b17336c5SHonghui Zhang spin_lock_irqsave(&dom->pgtlock, flags); 346b17336c5SHonghui Zhang memset(pgt_base_iova, 0, page_num * sizeof(u32)); 347b17336c5SHonghui Zhang spin_unlock_irqrestore(&dom->pgtlock, flags); 348b17336c5SHonghui Zhang 349b17336c5SHonghui Zhang mtk_iommu_tlb_flush_range(dom->data, iova, size); 350b17336c5SHonghui Zhang 351b17336c5SHonghui Zhang return size; 352b17336c5SHonghui Zhang } 353b17336c5SHonghui Zhang 354b17336c5SHonghui Zhang static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, 355b17336c5SHonghui Zhang dma_addr_t iova) 356b17336c5SHonghui Zhang { 357b17336c5SHonghui Zhang struct mtk_iommu_domain *dom = to_mtk_domain(domain); 358b17336c5SHonghui Zhang unsigned long flags; 359b17336c5SHonghui Zhang phys_addr_t pa; 360b17336c5SHonghui Zhang 361b17336c5SHonghui Zhang spin_lock_irqsave(&dom->pgtlock, flags); 362b17336c5SHonghui Zhang pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT)); 363b17336c5SHonghui Zhang pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1)); 364b17336c5SHonghui Zhang spin_unlock_irqrestore(&dom->pgtlock, flags); 365b17336c5SHonghui Zhang 366b17336c5SHonghui Zhang return pa; 367b17336c5SHonghui Zhang } 368b17336c5SHonghui Zhang 369b17336c5SHonghui Zhang /* 370b17336c5SHonghui Zhang * MTK generation one iommu HW only support one iommu domain, and all the client 371b17336c5SHonghui Zhang * sharing the same iova address space. 372b17336c5SHonghui Zhang */ 373b17336c5SHonghui Zhang static int mtk_iommu_create_mapping(struct device *dev, 374b17336c5SHonghui Zhang struct of_phandle_args *args) 375b17336c5SHonghui Zhang { 376b17336c5SHonghui Zhang struct mtk_iommu_client_priv *head, *priv, *next; 377b17336c5SHonghui Zhang struct platform_device *m4updev; 378b17336c5SHonghui Zhang struct dma_iommu_mapping *mtk_mapping; 379b17336c5SHonghui Zhang struct device *m4udev; 380b17336c5SHonghui Zhang int ret; 381b17336c5SHonghui Zhang 382b17336c5SHonghui Zhang if (args->args_count != 1) { 383b17336c5SHonghui Zhang dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", 384b17336c5SHonghui Zhang args->args_count); 385b17336c5SHonghui Zhang return -EINVAL; 386b17336c5SHonghui Zhang } 387b17336c5SHonghui Zhang 388b17336c5SHonghui Zhang if (!dev->archdata.iommu) { 389b17336c5SHonghui Zhang /* Get the m4u device */ 390b17336c5SHonghui Zhang m4updev = of_find_device_by_node(args->np); 391b17336c5SHonghui Zhang if (WARN_ON(!m4updev)) 392b17336c5SHonghui Zhang return -EINVAL; 393b17336c5SHonghui Zhang 394b17336c5SHonghui Zhang head = kzalloc(sizeof(*head), GFP_KERNEL); 395b17336c5SHonghui Zhang if (!head) 396b17336c5SHonghui Zhang return -ENOMEM; 397b17336c5SHonghui Zhang 398b17336c5SHonghui Zhang dev->archdata.iommu = head; 399b17336c5SHonghui Zhang INIT_LIST_HEAD(&head->client); 400b17336c5SHonghui Zhang head->m4udev = &m4updev->dev; 401b17336c5SHonghui Zhang } else { 402b17336c5SHonghui Zhang head = dev->archdata.iommu; 403b17336c5SHonghui Zhang } 404b17336c5SHonghui Zhang 405b17336c5SHonghui Zhang priv = kzalloc(sizeof(*priv), GFP_KERNEL); 406b17336c5SHonghui Zhang if (!priv) { 407b17336c5SHonghui Zhang ret = -ENOMEM; 408b17336c5SHonghui Zhang goto err_free_mem; 409b17336c5SHonghui Zhang } 410b17336c5SHonghui Zhang priv->mtk_m4u_id = args->args[0]; 411b17336c5SHonghui Zhang list_add_tail(&priv->client, &head->client); 412b17336c5SHonghui Zhang 413b17336c5SHonghui Zhang m4udev = head->m4udev; 414b17336c5SHonghui Zhang mtk_mapping = m4udev->archdata.iommu; 415b17336c5SHonghui Zhang if (!mtk_mapping) { 416b17336c5SHonghui Zhang /* MTK iommu support 4GB iova address space. */ 417b17336c5SHonghui Zhang mtk_mapping = arm_iommu_create_mapping(&platform_bus_type, 418b17336c5SHonghui Zhang 0, 1ULL << 32); 419b17336c5SHonghui Zhang if (IS_ERR(mtk_mapping)) { 420b17336c5SHonghui Zhang ret = PTR_ERR(mtk_mapping); 421b17336c5SHonghui Zhang goto err_free_mem; 422b17336c5SHonghui Zhang } 423b17336c5SHonghui Zhang m4udev->archdata.iommu = mtk_mapping; 424b17336c5SHonghui Zhang } 425b17336c5SHonghui Zhang 426b17336c5SHonghui Zhang ret = arm_iommu_attach_device(dev, mtk_mapping); 427b17336c5SHonghui Zhang if (ret) 428b17336c5SHonghui Zhang goto err_release_mapping; 429b17336c5SHonghui Zhang 430b17336c5SHonghui Zhang return 0; 431b17336c5SHonghui Zhang 432b17336c5SHonghui Zhang err_release_mapping: 433b17336c5SHonghui Zhang arm_iommu_release_mapping(mtk_mapping); 434b17336c5SHonghui Zhang m4udev->archdata.iommu = NULL; 435b17336c5SHonghui Zhang err_free_mem: 436b17336c5SHonghui Zhang list_for_each_entry_safe(priv, next, &head->client, client) 437b17336c5SHonghui Zhang kfree(priv); 438b17336c5SHonghui Zhang kfree(head); 439b17336c5SHonghui Zhang dev->archdata.iommu = NULL; 440b17336c5SHonghui Zhang return ret; 441b17336c5SHonghui Zhang } 442b17336c5SHonghui Zhang 443b17336c5SHonghui Zhang static int mtk_iommu_add_device(struct device *dev) 444b17336c5SHonghui Zhang { 445b17336c5SHonghui Zhang struct iommu_group *group; 446b17336c5SHonghui Zhang struct of_phandle_args iommu_spec; 447b17336c5SHonghui Zhang struct of_phandle_iterator it; 448b17336c5SHonghui Zhang int err; 449b17336c5SHonghui Zhang 450b17336c5SHonghui Zhang of_for_each_phandle(&it, err, dev->of_node, "iommus", 451b17336c5SHonghui Zhang "#iommu-cells", 0) { 452b17336c5SHonghui Zhang int count = of_phandle_iterator_args(&it, iommu_spec.args, 453b17336c5SHonghui Zhang MAX_PHANDLE_ARGS); 454b17336c5SHonghui Zhang iommu_spec.np = of_node_get(it.node); 455b17336c5SHonghui Zhang iommu_spec.args_count = count; 456b17336c5SHonghui Zhang 457b17336c5SHonghui Zhang mtk_iommu_create_mapping(dev, &iommu_spec); 458b17336c5SHonghui Zhang of_node_put(iommu_spec.np); 459b17336c5SHonghui Zhang } 460b17336c5SHonghui Zhang 461b17336c5SHonghui Zhang if (!dev->archdata.iommu) /* Not a iommu client device */ 462b17336c5SHonghui Zhang return -ENODEV; 463b17336c5SHonghui Zhang 464b17336c5SHonghui Zhang group = iommu_group_get_for_dev(dev); 465b17336c5SHonghui Zhang if (IS_ERR(group)) 466b17336c5SHonghui Zhang return PTR_ERR(group); 467b17336c5SHonghui Zhang 468b17336c5SHonghui Zhang iommu_group_put(group); 469b17336c5SHonghui Zhang return 0; 470b17336c5SHonghui Zhang } 471b17336c5SHonghui Zhang 472b17336c5SHonghui Zhang static void mtk_iommu_remove_device(struct device *dev) 473b17336c5SHonghui Zhang { 474b17336c5SHonghui Zhang struct mtk_iommu_client_priv *head, *cur, *next; 475b17336c5SHonghui Zhang 476b17336c5SHonghui Zhang head = dev->archdata.iommu; 477b17336c5SHonghui Zhang if (!head) 478b17336c5SHonghui Zhang return; 479b17336c5SHonghui Zhang 480b17336c5SHonghui Zhang list_for_each_entry_safe(cur, next, &head->client, client) { 481b17336c5SHonghui Zhang list_del(&cur->client); 482b17336c5SHonghui Zhang kfree(cur); 483b17336c5SHonghui Zhang } 484b17336c5SHonghui Zhang kfree(head); 485b17336c5SHonghui Zhang dev->archdata.iommu = NULL; 486b17336c5SHonghui Zhang 487b17336c5SHonghui Zhang iommu_group_remove_device(dev); 488b17336c5SHonghui Zhang } 489b17336c5SHonghui Zhang 490b17336c5SHonghui Zhang static struct iommu_group *mtk_iommu_device_group(struct device *dev) 491b17336c5SHonghui Zhang { 492b17336c5SHonghui Zhang struct mtk_iommu_data *data; 493b17336c5SHonghui Zhang struct mtk_iommu_client_priv *priv; 494b17336c5SHonghui Zhang 495b17336c5SHonghui Zhang priv = dev->archdata.iommu; 496b17336c5SHonghui Zhang if (!priv) 497b17336c5SHonghui Zhang return ERR_PTR(-ENODEV); 498b17336c5SHonghui Zhang 499b17336c5SHonghui Zhang /* All the client devices are in the same m4u iommu-group */ 500b17336c5SHonghui Zhang data = dev_get_drvdata(priv->m4udev); 501b17336c5SHonghui Zhang if (!data->m4u_group) { 502b17336c5SHonghui Zhang data->m4u_group = iommu_group_alloc(); 503b17336c5SHonghui Zhang if (IS_ERR(data->m4u_group)) 504b17336c5SHonghui Zhang dev_err(dev, "Failed to allocate M4U IOMMU group\n"); 505b17336c5SHonghui Zhang } 506b17336c5SHonghui Zhang return data->m4u_group; 507b17336c5SHonghui Zhang } 508b17336c5SHonghui Zhang 509b17336c5SHonghui Zhang static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) 510b17336c5SHonghui Zhang { 511b17336c5SHonghui Zhang u32 regval; 512b17336c5SHonghui Zhang int ret; 513b17336c5SHonghui Zhang 514b17336c5SHonghui Zhang ret = clk_prepare_enable(data->bclk); 515b17336c5SHonghui Zhang if (ret) { 516b17336c5SHonghui Zhang dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); 517b17336c5SHonghui Zhang return ret; 518b17336c5SHonghui Zhang } 519b17336c5SHonghui Zhang 520b17336c5SHonghui Zhang regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2); 521b17336c5SHonghui Zhang writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); 522b17336c5SHonghui Zhang 523b17336c5SHonghui Zhang regval = F_INT_TRANSLATION_FAULT | 524b17336c5SHonghui Zhang F_INT_MAIN_MULTI_HIT_FAULT | 525b17336c5SHonghui Zhang F_INT_INVALID_PA_FAULT | 526b17336c5SHonghui Zhang F_INT_ENTRY_REPLACEMENT_FAULT | 527b17336c5SHonghui Zhang F_INT_TABLE_WALK_FAULT | 528b17336c5SHonghui Zhang F_INT_TLB_MISS_FAULT | 529b17336c5SHonghui Zhang F_INT_PFH_DMA_FIFO_OVERFLOW | 530b17336c5SHonghui Zhang F_INT_MISS_DMA_FIFO_OVERFLOW; 531b17336c5SHonghui Zhang writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); 532b17336c5SHonghui Zhang 533b17336c5SHonghui Zhang /* protect memory,hw will write here while translation fault */ 534b17336c5SHonghui Zhang writel_relaxed(data->protect_base, 535b17336c5SHonghui Zhang data->base + REG_MMU_IVRP_PADDR); 536b17336c5SHonghui Zhang 537b17336c5SHonghui Zhang writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM); 538b17336c5SHonghui Zhang 539b17336c5SHonghui Zhang if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, 540b17336c5SHonghui Zhang dev_name(data->dev), (void *)data)) { 541b17336c5SHonghui Zhang writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); 542b17336c5SHonghui Zhang clk_disable_unprepare(data->bclk); 543b17336c5SHonghui Zhang dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); 544b17336c5SHonghui Zhang return -ENODEV; 545b17336c5SHonghui Zhang } 546b17336c5SHonghui Zhang 547b17336c5SHonghui Zhang return 0; 548b17336c5SHonghui Zhang } 549b17336c5SHonghui Zhang 550b17336c5SHonghui Zhang static struct iommu_ops mtk_iommu_ops = { 551b17336c5SHonghui Zhang .domain_alloc = mtk_iommu_domain_alloc, 552b17336c5SHonghui Zhang .domain_free = mtk_iommu_domain_free, 553b17336c5SHonghui Zhang .attach_dev = mtk_iommu_attach_device, 554b17336c5SHonghui Zhang .detach_dev = mtk_iommu_detach_device, 555b17336c5SHonghui Zhang .map = mtk_iommu_map, 556b17336c5SHonghui Zhang .unmap = mtk_iommu_unmap, 557b17336c5SHonghui Zhang .map_sg = default_iommu_map_sg, 558b17336c5SHonghui Zhang .iova_to_phys = mtk_iommu_iova_to_phys, 559b17336c5SHonghui Zhang .add_device = mtk_iommu_add_device, 560b17336c5SHonghui Zhang .remove_device = mtk_iommu_remove_device, 561b17336c5SHonghui Zhang .device_group = mtk_iommu_device_group, 562b17336c5SHonghui Zhang .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, 563b17336c5SHonghui Zhang }; 564b17336c5SHonghui Zhang 565b17336c5SHonghui Zhang static const struct of_device_id mtk_iommu_of_ids[] = { 566b17336c5SHonghui Zhang { .compatible = "mediatek,mt2701-m4u", }, 567b17336c5SHonghui Zhang {} 568b17336c5SHonghui Zhang }; 569b17336c5SHonghui Zhang 570b17336c5SHonghui Zhang static const struct component_master_ops mtk_iommu_com_ops = { 571b17336c5SHonghui Zhang .bind = mtk_iommu_bind, 572b17336c5SHonghui Zhang .unbind = mtk_iommu_unbind, 573b17336c5SHonghui Zhang }; 574b17336c5SHonghui Zhang 575b17336c5SHonghui Zhang static int mtk_iommu_probe(struct platform_device *pdev) 576b17336c5SHonghui Zhang { 577b17336c5SHonghui Zhang struct mtk_iommu_data *data; 578b17336c5SHonghui Zhang struct device *dev = &pdev->dev; 579b17336c5SHonghui Zhang struct resource *res; 580b17336c5SHonghui Zhang struct component_match *match = NULL; 581b17336c5SHonghui Zhang struct of_phandle_args larb_spec; 582b17336c5SHonghui Zhang struct of_phandle_iterator it; 583b17336c5SHonghui Zhang void *protect; 584b17336c5SHonghui Zhang int larb_nr, ret, err; 585b17336c5SHonghui Zhang 586b17336c5SHonghui Zhang data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 587b17336c5SHonghui Zhang if (!data) 588b17336c5SHonghui Zhang return -ENOMEM; 589b17336c5SHonghui Zhang 590b17336c5SHonghui Zhang data->dev = dev; 591b17336c5SHonghui Zhang 592b17336c5SHonghui Zhang /* Protect memory. HW will access here while translation fault.*/ 593b17336c5SHonghui Zhang protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, 594b17336c5SHonghui Zhang GFP_KERNEL | GFP_DMA); 595b17336c5SHonghui Zhang if (!protect) 596b17336c5SHonghui Zhang return -ENOMEM; 597b17336c5SHonghui Zhang data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); 598b17336c5SHonghui Zhang 599b17336c5SHonghui Zhang res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 600b17336c5SHonghui Zhang data->base = devm_ioremap_resource(dev, res); 601b17336c5SHonghui Zhang if (IS_ERR(data->base)) 602b17336c5SHonghui Zhang return PTR_ERR(data->base); 603b17336c5SHonghui Zhang 604b17336c5SHonghui Zhang data->irq = platform_get_irq(pdev, 0); 605b17336c5SHonghui Zhang if (data->irq < 0) 606b17336c5SHonghui Zhang return data->irq; 607b17336c5SHonghui Zhang 608b17336c5SHonghui Zhang data->bclk = devm_clk_get(dev, "bclk"); 609b17336c5SHonghui Zhang if (IS_ERR(data->bclk)) 610b17336c5SHonghui Zhang return PTR_ERR(data->bclk); 611b17336c5SHonghui Zhang 612b17336c5SHonghui Zhang larb_nr = 0; 613b17336c5SHonghui Zhang of_for_each_phandle(&it, err, dev->of_node, 614b17336c5SHonghui Zhang "mediatek,larbs", NULL, 0) { 615b17336c5SHonghui Zhang struct platform_device *plarbdev; 616b17336c5SHonghui Zhang int count = of_phandle_iterator_args(&it, larb_spec.args, 617b17336c5SHonghui Zhang MAX_PHANDLE_ARGS); 618b17336c5SHonghui Zhang 619b17336c5SHonghui Zhang if (count) 620b17336c5SHonghui Zhang continue; 621b17336c5SHonghui Zhang 622b17336c5SHonghui Zhang larb_spec.np = of_node_get(it.node); 623b17336c5SHonghui Zhang if (!of_device_is_available(larb_spec.np)) 624b17336c5SHonghui Zhang continue; 625b17336c5SHonghui Zhang 626b17336c5SHonghui Zhang plarbdev = of_find_device_by_node(larb_spec.np); 627b17336c5SHonghui Zhang of_node_put(larb_spec.np); 628b17336c5SHonghui Zhang if (!plarbdev) { 629b17336c5SHonghui Zhang plarbdev = of_platform_device_create( 630b17336c5SHonghui Zhang larb_spec.np, NULL, 631b17336c5SHonghui Zhang platform_bus_type.dev_root); 632b17336c5SHonghui Zhang if (!plarbdev) 633b17336c5SHonghui Zhang return -EPROBE_DEFER; 634b17336c5SHonghui Zhang } 635b17336c5SHonghui Zhang 636b17336c5SHonghui Zhang data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev; 637b17336c5SHonghui Zhang component_match_add(dev, &match, compare_of, larb_spec.np); 638b17336c5SHonghui Zhang larb_nr++; 639b17336c5SHonghui Zhang } 640b17336c5SHonghui Zhang 641b17336c5SHonghui Zhang data->smi_imu.larb_nr = larb_nr; 642b17336c5SHonghui Zhang 643b17336c5SHonghui Zhang platform_set_drvdata(pdev, data); 644b17336c5SHonghui Zhang 645b17336c5SHonghui Zhang ret = mtk_iommu_hw_init(data); 646b17336c5SHonghui Zhang if (ret) 647b17336c5SHonghui Zhang return ret; 648b17336c5SHonghui Zhang 649b17336c5SHonghui Zhang if (!iommu_present(&platform_bus_type)) 650b17336c5SHonghui Zhang bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); 651b17336c5SHonghui Zhang 652b17336c5SHonghui Zhang return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); 653b17336c5SHonghui Zhang } 654b17336c5SHonghui Zhang 655b17336c5SHonghui Zhang static int mtk_iommu_remove(struct platform_device *pdev) 656b17336c5SHonghui Zhang { 657b17336c5SHonghui Zhang struct mtk_iommu_data *data = platform_get_drvdata(pdev); 658b17336c5SHonghui Zhang 659b17336c5SHonghui Zhang if (iommu_present(&platform_bus_type)) 660b17336c5SHonghui Zhang bus_set_iommu(&platform_bus_type, NULL); 661b17336c5SHonghui Zhang 662b17336c5SHonghui Zhang clk_disable_unprepare(data->bclk); 663b17336c5SHonghui Zhang devm_free_irq(&pdev->dev, data->irq, data); 664b17336c5SHonghui Zhang component_master_del(&pdev->dev, &mtk_iommu_com_ops); 665b17336c5SHonghui Zhang return 0; 666b17336c5SHonghui Zhang } 667b17336c5SHonghui Zhang 668b17336c5SHonghui Zhang static int __maybe_unused mtk_iommu_suspend(struct device *dev) 669b17336c5SHonghui Zhang { 670b17336c5SHonghui Zhang struct mtk_iommu_data *data = dev_get_drvdata(dev); 671b17336c5SHonghui Zhang struct mtk_iommu_suspend_reg *reg = &data->reg; 672b17336c5SHonghui Zhang void __iomem *base = data->base; 673b17336c5SHonghui Zhang 674b17336c5SHonghui Zhang reg->standard_axi_mode = readl_relaxed(base + 675b17336c5SHonghui Zhang REG_MMU_STANDARD_AXI_MODE); 676b17336c5SHonghui Zhang reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM); 677b17336c5SHonghui Zhang reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); 678b17336c5SHonghui Zhang reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL); 679b17336c5SHonghui Zhang return 0; 680b17336c5SHonghui Zhang } 681b17336c5SHonghui Zhang 682b17336c5SHonghui Zhang static int __maybe_unused mtk_iommu_resume(struct device *dev) 683b17336c5SHonghui Zhang { 684b17336c5SHonghui Zhang struct mtk_iommu_data *data = dev_get_drvdata(dev); 685b17336c5SHonghui Zhang struct mtk_iommu_suspend_reg *reg = &data->reg; 686b17336c5SHonghui Zhang void __iomem *base = data->base; 687b17336c5SHonghui Zhang 688b17336c5SHonghui Zhang writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR); 689b17336c5SHonghui Zhang writel_relaxed(reg->standard_axi_mode, 690b17336c5SHonghui Zhang base + REG_MMU_STANDARD_AXI_MODE); 691b17336c5SHonghui Zhang writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM); 692b17336c5SHonghui Zhang writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); 693b17336c5SHonghui Zhang writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL); 694b17336c5SHonghui Zhang writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR); 695b17336c5SHonghui Zhang return 0; 696b17336c5SHonghui Zhang } 697b17336c5SHonghui Zhang 698131bc8ebSJoerg Roedel static const struct dev_pm_ops mtk_iommu_pm_ops = { 699b17336c5SHonghui Zhang SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) 700b17336c5SHonghui Zhang }; 701b17336c5SHonghui Zhang 702b17336c5SHonghui Zhang static struct platform_driver mtk_iommu_driver = { 703b17336c5SHonghui Zhang .probe = mtk_iommu_probe, 704b17336c5SHonghui Zhang .remove = mtk_iommu_remove, 705b17336c5SHonghui Zhang .driver = { 706b17336c5SHonghui Zhang .name = "mtk-iommu", 707b17336c5SHonghui Zhang .of_match_table = mtk_iommu_of_ids, 708b17336c5SHonghui Zhang .pm = &mtk_iommu_pm_ops, 709b17336c5SHonghui Zhang } 710b17336c5SHonghui Zhang }; 711b17336c5SHonghui Zhang 712b17336c5SHonghui Zhang static int __init m4u_init(void) 713b17336c5SHonghui Zhang { 714b17336c5SHonghui Zhang return platform_driver_register(&mtk_iommu_driver); 715b17336c5SHonghui Zhang } 716b17336c5SHonghui Zhang 717b17336c5SHonghui Zhang static void __exit m4u_exit(void) 718b17336c5SHonghui Zhang { 719b17336c5SHonghui Zhang return platform_driver_unregister(&mtk_iommu_driver); 720b17336c5SHonghui Zhang } 721b17336c5SHonghui Zhang 722b17336c5SHonghui Zhang subsys_initcall(m4u_init); 723b17336c5SHonghui Zhang module_exit(m4u_exit); 724b17336c5SHonghui Zhang 725b17336c5SHonghui Zhang MODULE_DESCRIPTION("IOMMU API for MTK architected m4u v1 implementations"); 726b17336c5SHonghui Zhang MODULE_AUTHOR("Honghui Zhang <honghui.zhang@mediatek.com>"); 727b17336c5SHonghui Zhang MODULE_LICENSE("GPL v2"); 728