1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2016 MediaTek Inc. 4 * Author: Yong Wu <yong.wu@mediatek.com> 5 */ 6 #include <linux/memblock.h> 7 #include <linux/bug.h> 8 #include <linux/clk.h> 9 #include <linux/component.h> 10 #include <linux/device.h> 11 #include <linux/dma-iommu.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/iommu.h> 16 #include <linux/iopoll.h> 17 #include <linux/list.h> 18 #include <linux/of_address.h> 19 #include <linux/of_iommu.h> 20 #include <linux/of_irq.h> 21 #include <linux/of_platform.h> 22 #include <linux/platform_device.h> 23 #include <linux/slab.h> 24 #include <linux/spinlock.h> 25 #include <asm/barrier.h> 26 #include <soc/mediatek/smi.h> 27 28 #include "mtk_iommu.h" 29 30 #define REG_MMU_PT_BASE_ADDR 0x000 31 #define MMU_PT_ADDR_MASK GENMASK(31, 7) 32 33 #define REG_MMU_INVALIDATE 0x020 34 #define F_ALL_INVLD 0x2 35 #define F_MMU_INV_RANGE 0x1 36 37 #define REG_MMU_INVLD_START_A 0x024 38 #define REG_MMU_INVLD_END_A 0x028 39 40 #define REG_MMU_INV_SEL 0x038 41 #define F_INVLD_EN0 BIT(0) 42 #define F_INVLD_EN1 BIT(1) 43 44 #define REG_MMU_STANDARD_AXI_MODE 0x048 45 #define REG_MMU_DCM_DIS 0x050 46 47 #define REG_MMU_CTRL_REG 0x110 48 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) 49 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) 50 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) 51 52 #define REG_MMU_IVRP_PADDR 0x114 53 54 #define REG_MMU_VLD_PA_RNG 0x118 55 #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) 56 57 #define REG_MMU_INT_CONTROL0 0x120 58 #define F_L2_MULIT_HIT_EN BIT(0) 59 #define F_TABLE_WALK_FAULT_INT_EN BIT(1) 60 #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) 61 #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) 62 #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) 63 #define F_MISS_FIFO_ERR_INT_EN BIT(6) 64 #define F_INT_CLR_BIT BIT(12) 65 66 #define REG_MMU_INT_MAIN_CONTROL 0x124 67 /* mmu0 | mmu1 */ 68 #define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) 69 #define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) 70 #define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) 71 #define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) 72 #define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) 73 #define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) 74 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) 75 76 #define REG_MMU_CPE_DONE 0x12C 77 78 #define REG_MMU_FAULT_ST1 0x134 79 #define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) 80 #define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) 81 82 #define REG_MMU0_FAULT_VA 0x13c 83 #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) 84 #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) 85 86 #define REG_MMU0_INVLD_PA 0x140 87 #define REG_MMU1_FAULT_VA 0x144 88 #define REG_MMU1_INVLD_PA 0x148 89 #define REG_MMU0_INT_ID 0x150 90 #define REG_MMU1_INT_ID 0x154 91 #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) 92 #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) 93 94 #define MTK_PROTECT_PA_ALIGN 128 95 96 /* 97 * Get the local arbiter ID and the portid within the larb arbiter 98 * from mtk_m4u_id which is defined by MTK_M4U_ID. 99 */ 100 #define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf) 101 #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) 102 103 struct mtk_iommu_domain { 104 struct io_pgtable_cfg cfg; 105 struct io_pgtable_ops *iop; 106 107 struct iommu_domain domain; 108 }; 109 110 static const struct iommu_ops mtk_iommu_ops; 111 112 /* 113 * In M4U 4GB mode, the physical address is remapped as below: 114 * 115 * CPU Physical address: 116 * ==================== 117 * 118 * 0 1G 2G 3G 4G 5G 119 * |---A---|---B---|---C---|---D---|---E---| 120 * +--I/O--+------------Memory-------------+ 121 * 122 * IOMMU output physical address: 123 * ============================= 124 * 125 * 4G 5G 6G 7G 8G 126 * |---E---|---B---|---C---|---D---| 127 * +------------Memory-------------+ 128 * 129 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the 130 * bit32 of the CPU physical address always is needed to set, and for Region 131 * 'E', the CPU physical address keep as is. 132 * Additionally, The iommu consumers always use the CPU phyiscal address. 133 */ 134 #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL 135 136 static LIST_HEAD(m4ulist); /* List all the M4U HWs */ 137 138 #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) 139 140 /* 141 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain 142 * for the performance. 143 * 144 * Here always return the mtk_iommu_data of the first probed M4U where the 145 * iommu domain information is recorded. 146 */ 147 static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) 148 { 149 struct mtk_iommu_data *data; 150 151 for_each_m4u(data) 152 return data; 153 154 return NULL; 155 } 156 157 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) 158 { 159 return container_of(dom, struct mtk_iommu_domain, domain); 160 } 161 162 static void mtk_iommu_tlb_flush_all(void *cookie) 163 { 164 struct mtk_iommu_data *data = cookie; 165 166 for_each_m4u(data) { 167 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 168 data->base + REG_MMU_INV_SEL); 169 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); 170 wmb(); /* Make sure the tlb flush all done */ 171 } 172 } 173 174 static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, 175 size_t granule, void *cookie) 176 { 177 struct mtk_iommu_data *data = cookie; 178 unsigned long flags; 179 int ret; 180 u32 tmp; 181 182 for_each_m4u(data) { 183 spin_lock_irqsave(&data->tlb_lock, flags); 184 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 185 data->base + REG_MMU_INV_SEL); 186 187 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); 188 writel_relaxed(iova + size - 1, 189 data->base + REG_MMU_INVLD_END_A); 190 writel_relaxed(F_MMU_INV_RANGE, 191 data->base + REG_MMU_INVALIDATE); 192 193 /* tlb sync */ 194 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, 195 tmp, tmp != 0, 10, 1000); 196 if (ret) { 197 dev_warn(data->dev, 198 "Partial TLB flush timed out, falling back to full flush\n"); 199 mtk_iommu_tlb_flush_all(cookie); 200 } 201 /* Clear the CPE status */ 202 writel_relaxed(0, data->base + REG_MMU_CPE_DONE); 203 spin_unlock_irqrestore(&data->tlb_lock, flags); 204 } 205 } 206 207 static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, 208 unsigned long iova, size_t granule, 209 void *cookie) 210 { 211 struct mtk_iommu_data *data = cookie; 212 struct iommu_domain *domain = &data->m4u_dom->domain; 213 214 iommu_iotlb_gather_add_page(domain, gather, iova, granule); 215 } 216 217 static const struct iommu_flush_ops mtk_iommu_flush_ops = { 218 .tlb_flush_all = mtk_iommu_tlb_flush_all, 219 .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync, 220 .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync, 221 .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, 222 }; 223 224 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) 225 { 226 struct mtk_iommu_data *data = dev_id; 227 struct mtk_iommu_domain *dom = data->m4u_dom; 228 u32 int_state, regval, fault_iova, fault_pa; 229 unsigned int fault_larb, fault_port; 230 bool layer, write; 231 232 /* Read error info from registers */ 233 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); 234 if (int_state & F_REG_MMU0_FAULT_MASK) { 235 regval = readl_relaxed(data->base + REG_MMU0_INT_ID); 236 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA); 237 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA); 238 } else { 239 regval = readl_relaxed(data->base + REG_MMU1_INT_ID); 240 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA); 241 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA); 242 } 243 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; 244 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; 245 fault_larb = F_MMU_INT_ID_LARB_ID(regval); 246 fault_port = F_MMU_INT_ID_PORT_ID(regval); 247 248 fault_larb = data->plat_data->larbid_remap[fault_larb]; 249 250 if (report_iommu_fault(&dom->domain, data->dev, fault_iova, 251 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { 252 dev_err_ratelimited( 253 data->dev, 254 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n", 255 int_state, fault_iova, fault_pa, fault_larb, fault_port, 256 layer, write ? "write" : "read"); 257 } 258 259 /* Interrupt clear */ 260 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0); 261 regval |= F_INT_CLR_BIT; 262 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); 263 264 mtk_iommu_tlb_flush_all(data); 265 266 return IRQ_HANDLED; 267 } 268 269 static void mtk_iommu_config(struct mtk_iommu_data *data, 270 struct device *dev, bool enable) 271 { 272 struct mtk_smi_larb_iommu *larb_mmu; 273 unsigned int larbid, portid; 274 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 275 int i; 276 277 for (i = 0; i < fwspec->num_ids; ++i) { 278 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); 279 portid = MTK_M4U_TO_PORT(fwspec->ids[i]); 280 larb_mmu = &data->larb_imu[larbid]; 281 282 dev_dbg(dev, "%s iommu port: %d\n", 283 enable ? "enable" : "disable", portid); 284 285 if (enable) 286 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); 287 else 288 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); 289 } 290 } 291 292 static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) 293 { 294 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); 295 296 dom->cfg = (struct io_pgtable_cfg) { 297 .quirks = IO_PGTABLE_QUIRK_ARM_NS | 298 IO_PGTABLE_QUIRK_NO_PERMS | 299 IO_PGTABLE_QUIRK_TLBI_ON_MAP | 300 IO_PGTABLE_QUIRK_ARM_MTK_EXT, 301 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, 302 .ias = 32, 303 .oas = 34, 304 .tlb = &mtk_iommu_flush_ops, 305 .iommu_dev = data->dev, 306 }; 307 308 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); 309 if (!dom->iop) { 310 dev_err(data->dev, "Failed to alloc io pgtable\n"); 311 return -EINVAL; 312 } 313 314 /* Update our support page sizes bitmap */ 315 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; 316 return 0; 317 } 318 319 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) 320 { 321 struct mtk_iommu_domain *dom; 322 323 if (type != IOMMU_DOMAIN_DMA) 324 return NULL; 325 326 dom = kzalloc(sizeof(*dom), GFP_KERNEL); 327 if (!dom) 328 return NULL; 329 330 if (iommu_get_dma_cookie(&dom->domain)) 331 goto free_dom; 332 333 if (mtk_iommu_domain_finalise(dom)) 334 goto put_dma_cookie; 335 336 dom->domain.geometry.aperture_start = 0; 337 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); 338 dom->domain.geometry.force_aperture = true; 339 340 return &dom->domain; 341 342 put_dma_cookie: 343 iommu_put_dma_cookie(&dom->domain); 344 free_dom: 345 kfree(dom); 346 return NULL; 347 } 348 349 static void mtk_iommu_domain_free(struct iommu_domain *domain) 350 { 351 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 352 353 free_io_pgtable_ops(dom->iop); 354 iommu_put_dma_cookie(domain); 355 kfree(to_mtk_domain(domain)); 356 } 357 358 static int mtk_iommu_attach_device(struct iommu_domain *domain, 359 struct device *dev) 360 { 361 struct mtk_iommu_data *data = dev_iommu_priv_get(dev); 362 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 363 364 if (!data) 365 return -ENODEV; 366 367 /* Update the pgtable base address register of the M4U HW */ 368 if (!data->m4u_dom) { 369 data->m4u_dom = dom; 370 writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, 371 data->base + REG_MMU_PT_BASE_ADDR); 372 } 373 374 mtk_iommu_config(data, dev, true); 375 return 0; 376 } 377 378 static void mtk_iommu_detach_device(struct iommu_domain *domain, 379 struct device *dev) 380 { 381 struct mtk_iommu_data *data = dev_iommu_priv_get(dev); 382 383 if (!data) 384 return; 385 386 mtk_iommu_config(data, dev, false); 387 } 388 389 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, 390 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 391 { 392 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 393 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); 394 395 /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ 396 if (data->enable_4GB) 397 paddr |= BIT_ULL(32); 398 399 /* Synchronize with the tlb_lock */ 400 return dom->iop->map(dom->iop, iova, paddr, size, prot); 401 } 402 403 static size_t mtk_iommu_unmap(struct iommu_domain *domain, 404 unsigned long iova, size_t size, 405 struct iommu_iotlb_gather *gather) 406 { 407 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 408 409 return dom->iop->unmap(dom->iop, iova, size, gather); 410 } 411 412 static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) 413 { 414 mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data()); 415 } 416 417 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, 418 struct iommu_iotlb_gather *gather) 419 { 420 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); 421 size_t length = gather->end - gather->start; 422 423 if (gather->start == ULONG_MAX) 424 return; 425 426 mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize, 427 data); 428 } 429 430 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, 431 dma_addr_t iova) 432 { 433 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 434 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); 435 phys_addr_t pa; 436 437 pa = dom->iop->iova_to_phys(dom->iop, iova); 438 if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) 439 pa &= ~BIT_ULL(32); 440 441 return pa; 442 } 443 444 static struct iommu_device *mtk_iommu_probe_device(struct device *dev) 445 { 446 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 447 struct mtk_iommu_data *data; 448 449 if (!fwspec || fwspec->ops != &mtk_iommu_ops) 450 return ERR_PTR(-ENODEV); /* Not a iommu client device */ 451 452 data = dev_iommu_priv_get(dev); 453 454 return &data->iommu; 455 } 456 457 static void mtk_iommu_release_device(struct device *dev) 458 { 459 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 460 461 if (!fwspec || fwspec->ops != &mtk_iommu_ops) 462 return; 463 464 iommu_fwspec_free(dev); 465 } 466 467 static struct iommu_group *mtk_iommu_device_group(struct device *dev) 468 { 469 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); 470 471 if (!data) 472 return ERR_PTR(-ENODEV); 473 474 /* All the client devices are in the same m4u iommu-group */ 475 if (!data->m4u_group) { 476 data->m4u_group = iommu_group_alloc(); 477 if (IS_ERR(data->m4u_group)) 478 dev_err(dev, "Failed to allocate M4U IOMMU group\n"); 479 } else { 480 iommu_group_ref_get(data->m4u_group); 481 } 482 return data->m4u_group; 483 } 484 485 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) 486 { 487 struct platform_device *m4updev; 488 489 if (args->args_count != 1) { 490 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", 491 args->args_count); 492 return -EINVAL; 493 } 494 495 if (!dev_iommu_priv_get(dev)) { 496 /* Get the m4u device */ 497 m4updev = of_find_device_by_node(args->np); 498 if (WARN_ON(!m4updev)) 499 return -EINVAL; 500 501 dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); 502 } 503 504 return iommu_fwspec_add_ids(dev, args->args, 1); 505 } 506 507 static const struct iommu_ops mtk_iommu_ops = { 508 .domain_alloc = mtk_iommu_domain_alloc, 509 .domain_free = mtk_iommu_domain_free, 510 .attach_dev = mtk_iommu_attach_device, 511 .detach_dev = mtk_iommu_detach_device, 512 .map = mtk_iommu_map, 513 .unmap = mtk_iommu_unmap, 514 .flush_iotlb_all = mtk_iommu_flush_iotlb_all, 515 .iotlb_sync = mtk_iommu_iotlb_sync, 516 .iova_to_phys = mtk_iommu_iova_to_phys, 517 .probe_device = mtk_iommu_probe_device, 518 .release_device = mtk_iommu_release_device, 519 .device_group = mtk_iommu_device_group, 520 .of_xlate = mtk_iommu_of_xlate, 521 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, 522 }; 523 524 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) 525 { 526 u32 regval; 527 int ret; 528 529 ret = clk_prepare_enable(data->bclk); 530 if (ret) { 531 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); 532 return ret; 533 } 534 535 if (data->plat_data->m4u_plat == M4U_MT8173) 536 regval = F_MMU_PREFETCH_RT_REPLACE_MOD | 537 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; 538 else 539 regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR; 540 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); 541 542 regval = F_L2_MULIT_HIT_EN | 543 F_TABLE_WALK_FAULT_INT_EN | 544 F_PREETCH_FIFO_OVERFLOW_INT_EN | 545 F_MISS_FIFO_OVERFLOW_INT_EN | 546 F_PREFETCH_FIFO_ERR_INT_EN | 547 F_MISS_FIFO_ERR_INT_EN; 548 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); 549 550 regval = F_INT_TRANSLATION_FAULT | 551 F_INT_MAIN_MULTI_HIT_FAULT | 552 F_INT_INVALID_PA_FAULT | 553 F_INT_ENTRY_REPLACEMENT_FAULT | 554 F_INT_TLB_MISS_FAULT | 555 F_INT_MISS_TRANSACTION_FIFO_FAULT | 556 F_INT_PRETETCH_TRANSATION_FIFO_FAULT; 557 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); 558 559 if (data->plat_data->m4u_plat == M4U_MT8173) 560 regval = (data->protect_base >> 1) | (data->enable_4GB << 31); 561 else 562 regval = lower_32_bits(data->protect_base) | 563 upper_32_bits(data->protect_base); 564 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); 565 566 if (data->enable_4GB && data->plat_data->has_vld_pa_rng) { 567 /* 568 * If 4GB mode is enabled, the validate PA range is from 569 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. 570 */ 571 regval = F_MMU_VLD_PA_RNG(7, 4); 572 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); 573 } 574 writel_relaxed(0, data->base + REG_MMU_DCM_DIS); 575 576 if (data->plat_data->reset_axi) 577 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); 578 579 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, 580 dev_name(data->dev), (void *)data)) { 581 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); 582 clk_disable_unprepare(data->bclk); 583 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); 584 return -ENODEV; 585 } 586 587 return 0; 588 } 589 590 static const struct component_master_ops mtk_iommu_com_ops = { 591 .bind = mtk_iommu_bind, 592 .unbind = mtk_iommu_unbind, 593 }; 594 595 static int mtk_iommu_probe(struct platform_device *pdev) 596 { 597 struct mtk_iommu_data *data; 598 struct device *dev = &pdev->dev; 599 struct resource *res; 600 resource_size_t ioaddr; 601 struct component_match *match = NULL; 602 void *protect; 603 int i, larb_nr, ret; 604 605 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 606 if (!data) 607 return -ENOMEM; 608 data->dev = dev; 609 data->plat_data = of_device_get_match_data(dev); 610 611 /* Protect memory. HW will access here while translation fault.*/ 612 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); 613 if (!protect) 614 return -ENOMEM; 615 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); 616 617 /* Whether the current dram is over 4GB */ 618 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); 619 if (!data->plat_data->has_4gb_mode) 620 data->enable_4GB = false; 621 622 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 623 data->base = devm_ioremap_resource(dev, res); 624 if (IS_ERR(data->base)) 625 return PTR_ERR(data->base); 626 ioaddr = res->start; 627 628 data->irq = platform_get_irq(pdev, 0); 629 if (data->irq < 0) 630 return data->irq; 631 632 if (data->plat_data->has_bclk) { 633 data->bclk = devm_clk_get(dev, "bclk"); 634 if (IS_ERR(data->bclk)) 635 return PTR_ERR(data->bclk); 636 } 637 638 larb_nr = of_count_phandle_with_args(dev->of_node, 639 "mediatek,larbs", NULL); 640 if (larb_nr < 0) 641 return larb_nr; 642 643 for (i = 0; i < larb_nr; i++) { 644 struct device_node *larbnode; 645 struct platform_device *plarbdev; 646 u32 id; 647 648 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); 649 if (!larbnode) 650 return -EINVAL; 651 652 if (!of_device_is_available(larbnode)) { 653 of_node_put(larbnode); 654 continue; 655 } 656 657 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); 658 if (ret)/* The id is consecutive if there is no this property */ 659 id = i; 660 661 plarbdev = of_find_device_by_node(larbnode); 662 if (!plarbdev) { 663 of_node_put(larbnode); 664 return -EPROBE_DEFER; 665 } 666 data->larb_imu[id].dev = &plarbdev->dev; 667 668 component_match_add_release(dev, &match, release_of, 669 compare_of, larbnode); 670 } 671 672 platform_set_drvdata(pdev, data); 673 674 ret = mtk_iommu_hw_init(data); 675 if (ret) 676 return ret; 677 678 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, 679 "mtk-iommu.%pa", &ioaddr); 680 if (ret) 681 return ret; 682 683 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); 684 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); 685 686 ret = iommu_device_register(&data->iommu); 687 if (ret) 688 return ret; 689 690 spin_lock_init(&data->tlb_lock); 691 list_add_tail(&data->list, &m4ulist); 692 693 if (!iommu_present(&platform_bus_type)) 694 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); 695 696 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); 697 } 698 699 static int mtk_iommu_remove(struct platform_device *pdev) 700 { 701 struct mtk_iommu_data *data = platform_get_drvdata(pdev); 702 703 iommu_device_sysfs_remove(&data->iommu); 704 iommu_device_unregister(&data->iommu); 705 706 if (iommu_present(&platform_bus_type)) 707 bus_set_iommu(&platform_bus_type, NULL); 708 709 clk_disable_unprepare(data->bclk); 710 devm_free_irq(&pdev->dev, data->irq, data); 711 component_master_del(&pdev->dev, &mtk_iommu_com_ops); 712 return 0; 713 } 714 715 static int __maybe_unused mtk_iommu_suspend(struct device *dev) 716 { 717 struct mtk_iommu_data *data = dev_get_drvdata(dev); 718 struct mtk_iommu_suspend_reg *reg = &data->reg; 719 void __iomem *base = data->base; 720 721 reg->standard_axi_mode = readl_relaxed(base + 722 REG_MMU_STANDARD_AXI_MODE); 723 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); 724 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); 725 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); 726 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); 727 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); 728 reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); 729 clk_disable_unprepare(data->bclk); 730 return 0; 731 } 732 733 static int __maybe_unused mtk_iommu_resume(struct device *dev) 734 { 735 struct mtk_iommu_data *data = dev_get_drvdata(dev); 736 struct mtk_iommu_suspend_reg *reg = &data->reg; 737 struct mtk_iommu_domain *m4u_dom = data->m4u_dom; 738 void __iomem *base = data->base; 739 int ret; 740 741 ret = clk_prepare_enable(data->bclk); 742 if (ret) { 743 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); 744 return ret; 745 } 746 writel_relaxed(reg->standard_axi_mode, 747 base + REG_MMU_STANDARD_AXI_MODE); 748 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); 749 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); 750 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); 751 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); 752 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); 753 writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); 754 if (m4u_dom) 755 writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, 756 base + REG_MMU_PT_BASE_ADDR); 757 return 0; 758 } 759 760 static const struct dev_pm_ops mtk_iommu_pm_ops = { 761 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) 762 }; 763 764 static const struct mtk_iommu_plat_data mt2712_data = { 765 .m4u_plat = M4U_MT2712, 766 .has_4gb_mode = true, 767 .has_bclk = true, 768 .has_vld_pa_rng = true, 769 .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, 770 }; 771 772 static const struct mtk_iommu_plat_data mt8173_data = { 773 .m4u_plat = M4U_MT8173, 774 .has_4gb_mode = true, 775 .has_bclk = true, 776 .reset_axi = true, 777 .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */ 778 }; 779 780 static const struct mtk_iommu_plat_data mt8183_data = { 781 .m4u_plat = M4U_MT8183, 782 .reset_axi = true, 783 .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1}, 784 }; 785 786 static const struct of_device_id mtk_iommu_of_ids[] = { 787 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, 788 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, 789 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, 790 {} 791 }; 792 793 static struct platform_driver mtk_iommu_driver = { 794 .probe = mtk_iommu_probe, 795 .remove = mtk_iommu_remove, 796 .driver = { 797 .name = "mtk-iommu", 798 .of_match_table = of_match_ptr(mtk_iommu_of_ids), 799 .pm = &mtk_iommu_pm_ops, 800 } 801 }; 802 803 static int __init mtk_iommu_init(void) 804 { 805 int ret; 806 807 ret = platform_driver_register(&mtk_iommu_driver); 808 if (ret != 0) 809 pr_err("Failed to register MTK IOMMU driver\n"); 810 811 return ret; 812 } 813 814 subsys_initcall(mtk_iommu_init) 815