1 /* 2 * Copyright (c) 2015-2016 MediaTek Inc. 3 * Author: Yong Wu <yong.wu@mediatek.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 #include <linux/bug.h> 15 #include <linux/clk.h> 16 #include <linux/component.h> 17 #include <linux/device.h> 18 #include <linux/dma-iommu.h> 19 #include <linux/err.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/iommu.h> 23 #include <linux/iopoll.h> 24 #include <linux/list.h> 25 #include <linux/of_address.h> 26 #include <linux/of_iommu.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_platform.h> 29 #include <linux/platform_device.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <asm/barrier.h> 33 #include <dt-bindings/memory/mt8173-larb-port.h> 34 #include <soc/mediatek/smi.h> 35 36 #include "io-pgtable.h" 37 38 #define REG_MMU_PT_BASE_ADDR 0x000 39 40 #define REG_MMU_INVALIDATE 0x020 41 #define F_ALL_INVLD 0x2 42 #define F_MMU_INV_RANGE 0x1 43 44 #define REG_MMU_INVLD_START_A 0x024 45 #define REG_MMU_INVLD_END_A 0x028 46 47 #define REG_MMU_INV_SEL 0x038 48 #define F_INVLD_EN0 BIT(0) 49 #define F_INVLD_EN1 BIT(1) 50 51 #define REG_MMU_STANDARD_AXI_MODE 0x048 52 #define REG_MMU_DCM_DIS 0x050 53 54 #define REG_MMU_CTRL_REG 0x110 55 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) 56 #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5) 57 58 #define REG_MMU_IVRP_PADDR 0x114 59 #define F_MMU_IVRP_PA_SET(pa) ((pa) >> 1) 60 61 #define REG_MMU_INT_CONTROL0 0x120 62 #define F_L2_MULIT_HIT_EN BIT(0) 63 #define F_TABLE_WALK_FAULT_INT_EN BIT(1) 64 #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) 65 #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) 66 #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) 67 #define F_MISS_FIFO_ERR_INT_EN BIT(6) 68 #define F_INT_CLR_BIT BIT(12) 69 70 #define REG_MMU_INT_MAIN_CONTROL 0x124 71 #define F_INT_TRANSLATION_FAULT BIT(0) 72 #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) 73 #define F_INT_INVALID_PA_FAULT BIT(2) 74 #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) 75 #define F_INT_TLB_MISS_FAULT BIT(4) 76 #define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5) 77 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6) 78 79 #define REG_MMU_CPE_DONE 0x12C 80 81 #define REG_MMU_FAULT_ST1 0x134 82 83 #define REG_MMU_FAULT_VA 0x13c 84 #define F_MMU_FAULT_VA_MSK 0xfffff000 85 #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) 86 #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) 87 88 #define REG_MMU_INVLD_PA 0x140 89 #define REG_MMU_INT_ID 0x150 90 #define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) 91 #define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) 92 93 #define MTK_PROTECT_PA_ALIGN 128 94 95 struct mtk_iommu_suspend_reg { 96 u32 standard_axi_mode; 97 u32 dcm_dis; 98 u32 ctrl_reg; 99 u32 int_control0; 100 u32 int_main_control; 101 }; 102 103 struct mtk_iommu_client_priv { 104 struct list_head client; 105 unsigned int mtk_m4u_id; 106 struct device *m4udev; 107 }; 108 109 struct mtk_iommu_domain { 110 spinlock_t pgtlock; /* lock for page table */ 111 112 struct io_pgtable_cfg cfg; 113 struct io_pgtable_ops *iop; 114 115 struct iommu_domain domain; 116 }; 117 118 struct mtk_iommu_data { 119 void __iomem *base; 120 int irq; 121 struct device *dev; 122 struct clk *bclk; 123 phys_addr_t protect_base; /* protect memory base */ 124 struct mtk_iommu_suspend_reg reg; 125 struct mtk_iommu_domain *m4u_dom; 126 struct iommu_group *m4u_group; 127 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ 128 }; 129 130 static struct iommu_ops mtk_iommu_ops; 131 132 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) 133 { 134 return container_of(dom, struct mtk_iommu_domain, domain); 135 } 136 137 static void mtk_iommu_tlb_flush_all(void *cookie) 138 { 139 struct mtk_iommu_data *data = cookie; 140 141 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL); 142 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); 143 wmb(); /* Make sure the tlb flush all done */ 144 } 145 146 static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, 147 size_t granule, bool leaf, 148 void *cookie) 149 { 150 struct mtk_iommu_data *data = cookie; 151 152 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL); 153 154 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); 155 writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); 156 writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); 157 } 158 159 static void mtk_iommu_tlb_sync(void *cookie) 160 { 161 struct mtk_iommu_data *data = cookie; 162 int ret; 163 u32 tmp; 164 165 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, 166 tmp != 0, 10, 100000); 167 if (ret) { 168 dev_warn(data->dev, 169 "Partial TLB flush timed out, falling back to full flush\n"); 170 mtk_iommu_tlb_flush_all(cookie); 171 } 172 /* Clear the CPE status */ 173 writel_relaxed(0, data->base + REG_MMU_CPE_DONE); 174 } 175 176 static const struct iommu_gather_ops mtk_iommu_gather_ops = { 177 .tlb_flush_all = mtk_iommu_tlb_flush_all, 178 .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, 179 .tlb_sync = mtk_iommu_tlb_sync, 180 }; 181 182 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) 183 { 184 struct mtk_iommu_data *data = dev_id; 185 struct mtk_iommu_domain *dom = data->m4u_dom; 186 u32 int_state, regval, fault_iova, fault_pa; 187 unsigned int fault_larb, fault_port; 188 bool layer, write; 189 190 /* Read error info from registers */ 191 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); 192 fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); 193 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; 194 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; 195 fault_iova &= F_MMU_FAULT_VA_MSK; 196 fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); 197 regval = readl_relaxed(data->base + REG_MMU_INT_ID); 198 fault_larb = F_MMU0_INT_ID_LARB_ID(regval); 199 fault_port = F_MMU0_INT_ID_PORT_ID(regval); 200 201 if (report_iommu_fault(&dom->domain, data->dev, fault_iova, 202 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { 203 dev_err_ratelimited( 204 data->dev, 205 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n", 206 int_state, fault_iova, fault_pa, fault_larb, fault_port, 207 layer, write ? "write" : "read"); 208 } 209 210 /* Interrupt clear */ 211 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0); 212 regval |= F_INT_CLR_BIT; 213 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); 214 215 mtk_iommu_tlb_flush_all(data); 216 217 return IRQ_HANDLED; 218 } 219 220 static void mtk_iommu_config(struct mtk_iommu_data *data, 221 struct device *dev, bool enable) 222 { 223 struct mtk_iommu_client_priv *head, *cur, *next; 224 struct mtk_smi_larb_iommu *larb_mmu; 225 unsigned int larbid, portid; 226 227 head = dev->archdata.iommu; 228 list_for_each_entry_safe(cur, next, &head->client, client) { 229 larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id); 230 portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id); 231 larb_mmu = &data->smi_imu.larb_imu[larbid]; 232 233 dev_dbg(dev, "%s iommu port: %d\n", 234 enable ? "enable" : "disable", portid); 235 236 if (enable) 237 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); 238 else 239 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); 240 } 241 } 242 243 static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) 244 { 245 struct mtk_iommu_domain *dom = data->m4u_dom; 246 247 spin_lock_init(&dom->pgtlock); 248 249 dom->cfg = (struct io_pgtable_cfg) { 250 .quirks = IO_PGTABLE_QUIRK_ARM_NS | 251 IO_PGTABLE_QUIRK_NO_PERMS | 252 IO_PGTABLE_QUIRK_TLBI_ON_MAP, 253 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, 254 .ias = 32, 255 .oas = 32, 256 .tlb = &mtk_iommu_gather_ops, 257 .iommu_dev = data->dev, 258 }; 259 260 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); 261 if (!dom->iop) { 262 dev_err(data->dev, "Failed to alloc io pgtable\n"); 263 return -EINVAL; 264 } 265 266 /* Update our support page sizes bitmap */ 267 mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap; 268 269 writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], 270 data->base + REG_MMU_PT_BASE_ADDR); 271 return 0; 272 } 273 274 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) 275 { 276 struct mtk_iommu_domain *dom; 277 278 if (type != IOMMU_DOMAIN_DMA) 279 return NULL; 280 281 dom = kzalloc(sizeof(*dom), GFP_KERNEL); 282 if (!dom) 283 return NULL; 284 285 if (iommu_get_dma_cookie(&dom->domain)) { 286 kfree(dom); 287 return NULL; 288 } 289 290 dom->domain.geometry.aperture_start = 0; 291 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); 292 dom->domain.geometry.force_aperture = true; 293 294 return &dom->domain; 295 } 296 297 static void mtk_iommu_domain_free(struct iommu_domain *domain) 298 { 299 iommu_put_dma_cookie(domain); 300 kfree(to_mtk_domain(domain)); 301 } 302 303 static int mtk_iommu_attach_device(struct iommu_domain *domain, 304 struct device *dev) 305 { 306 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 307 struct mtk_iommu_client_priv *priv = dev->archdata.iommu; 308 struct mtk_iommu_data *data; 309 int ret; 310 311 if (!priv) 312 return -ENODEV; 313 314 data = dev_get_drvdata(priv->m4udev); 315 if (!data->m4u_dom) { 316 data->m4u_dom = dom; 317 ret = mtk_iommu_domain_finalise(data); 318 if (ret) { 319 data->m4u_dom = NULL; 320 return ret; 321 } 322 } else if (data->m4u_dom != dom) { 323 /* All the client devices should be in the same m4u domain */ 324 dev_err(dev, "try to attach into the error iommu domain\n"); 325 return -EPERM; 326 } 327 328 mtk_iommu_config(data, dev, true); 329 return 0; 330 } 331 332 static void mtk_iommu_detach_device(struct iommu_domain *domain, 333 struct device *dev) 334 { 335 struct mtk_iommu_client_priv *priv = dev->archdata.iommu; 336 struct mtk_iommu_data *data; 337 338 if (!priv) 339 return; 340 341 data = dev_get_drvdata(priv->m4udev); 342 mtk_iommu_config(data, dev, false); 343 } 344 345 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, 346 phys_addr_t paddr, size_t size, int prot) 347 { 348 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 349 unsigned long flags; 350 int ret; 351 352 spin_lock_irqsave(&dom->pgtlock, flags); 353 ret = dom->iop->map(dom->iop, iova, paddr, size, prot); 354 spin_unlock_irqrestore(&dom->pgtlock, flags); 355 356 return ret; 357 } 358 359 static size_t mtk_iommu_unmap(struct iommu_domain *domain, 360 unsigned long iova, size_t size) 361 { 362 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 363 unsigned long flags; 364 size_t unmapsz; 365 366 spin_lock_irqsave(&dom->pgtlock, flags); 367 unmapsz = dom->iop->unmap(dom->iop, iova, size); 368 spin_unlock_irqrestore(&dom->pgtlock, flags); 369 370 return unmapsz; 371 } 372 373 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, 374 dma_addr_t iova) 375 { 376 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 377 unsigned long flags; 378 phys_addr_t pa; 379 380 spin_lock_irqsave(&dom->pgtlock, flags); 381 pa = dom->iop->iova_to_phys(dom->iop, iova); 382 spin_unlock_irqrestore(&dom->pgtlock, flags); 383 384 return pa; 385 } 386 387 static int mtk_iommu_add_device(struct device *dev) 388 { 389 struct iommu_group *group; 390 391 if (!dev->archdata.iommu) /* Not a iommu client device */ 392 return -ENODEV; 393 394 group = iommu_group_get_for_dev(dev); 395 if (IS_ERR(group)) 396 return PTR_ERR(group); 397 398 iommu_group_put(group); 399 return 0; 400 } 401 402 static void mtk_iommu_remove_device(struct device *dev) 403 { 404 struct mtk_iommu_client_priv *head, *cur, *next; 405 406 head = dev->archdata.iommu; 407 if (!head) 408 return; 409 410 list_for_each_entry_safe(cur, next, &head->client, client) { 411 list_del(&cur->client); 412 kfree(cur); 413 } 414 kfree(head); 415 dev->archdata.iommu = NULL; 416 417 iommu_group_remove_device(dev); 418 } 419 420 static struct iommu_group *mtk_iommu_device_group(struct device *dev) 421 { 422 struct mtk_iommu_data *data; 423 struct mtk_iommu_client_priv *priv; 424 425 priv = dev->archdata.iommu; 426 if (!priv) 427 return ERR_PTR(-ENODEV); 428 429 /* All the client devices are in the same m4u iommu-group */ 430 data = dev_get_drvdata(priv->m4udev); 431 if (!data->m4u_group) { 432 data->m4u_group = iommu_group_alloc(); 433 if (IS_ERR(data->m4u_group)) 434 dev_err(dev, "Failed to allocate M4U IOMMU group\n"); 435 } 436 return data->m4u_group; 437 } 438 439 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) 440 { 441 struct mtk_iommu_client_priv *head, *priv, *next; 442 struct platform_device *m4updev; 443 444 if (args->args_count != 1) { 445 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", 446 args->args_count); 447 return -EINVAL; 448 } 449 450 if (!dev->archdata.iommu) { 451 /* Get the m4u device */ 452 m4updev = of_find_device_by_node(args->np); 453 of_node_put(args->np); 454 if (WARN_ON(!m4updev)) 455 return -EINVAL; 456 457 head = kzalloc(sizeof(*head), GFP_KERNEL); 458 if (!head) 459 return -ENOMEM; 460 461 dev->archdata.iommu = head; 462 INIT_LIST_HEAD(&head->client); 463 head->m4udev = &m4updev->dev; 464 } else { 465 head = dev->archdata.iommu; 466 } 467 468 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 469 if (!priv) 470 goto err_free_mem; 471 472 priv->mtk_m4u_id = args->args[0]; 473 list_add_tail(&priv->client, &head->client); 474 475 return 0; 476 477 err_free_mem: 478 list_for_each_entry_safe(priv, next, &head->client, client) 479 kfree(priv); 480 kfree(head); 481 dev->archdata.iommu = NULL; 482 return -ENOMEM; 483 } 484 485 static struct iommu_ops mtk_iommu_ops = { 486 .domain_alloc = mtk_iommu_domain_alloc, 487 .domain_free = mtk_iommu_domain_free, 488 .attach_dev = mtk_iommu_attach_device, 489 .detach_dev = mtk_iommu_detach_device, 490 .map = mtk_iommu_map, 491 .unmap = mtk_iommu_unmap, 492 .map_sg = default_iommu_map_sg, 493 .iova_to_phys = mtk_iommu_iova_to_phys, 494 .add_device = mtk_iommu_add_device, 495 .remove_device = mtk_iommu_remove_device, 496 .device_group = mtk_iommu_device_group, 497 .of_xlate = mtk_iommu_of_xlate, 498 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, 499 }; 500 501 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) 502 { 503 u32 regval; 504 int ret; 505 506 ret = clk_prepare_enable(data->bclk); 507 if (ret) { 508 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); 509 return ret; 510 } 511 512 regval = F_MMU_PREFETCH_RT_REPLACE_MOD | 513 F_MMU_TF_PROTECT_SEL(2); 514 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); 515 516 regval = F_L2_MULIT_HIT_EN | 517 F_TABLE_WALK_FAULT_INT_EN | 518 F_PREETCH_FIFO_OVERFLOW_INT_EN | 519 F_MISS_FIFO_OVERFLOW_INT_EN | 520 F_PREFETCH_FIFO_ERR_INT_EN | 521 F_MISS_FIFO_ERR_INT_EN; 522 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); 523 524 regval = F_INT_TRANSLATION_FAULT | 525 F_INT_MAIN_MULTI_HIT_FAULT | 526 F_INT_INVALID_PA_FAULT | 527 F_INT_ENTRY_REPLACEMENT_FAULT | 528 F_INT_TLB_MISS_FAULT | 529 F_INT_MISS_TRANSACTION_FIFO_FAULT | 530 F_INT_PRETETCH_TRANSATION_FIFO_FAULT; 531 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); 532 533 writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base), 534 data->base + REG_MMU_IVRP_PADDR); 535 536 writel_relaxed(0, data->base + REG_MMU_DCM_DIS); 537 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); 538 539 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, 540 dev_name(data->dev), (void *)data)) { 541 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); 542 clk_disable_unprepare(data->bclk); 543 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); 544 return -ENODEV; 545 } 546 547 return 0; 548 } 549 550 static int compare_of(struct device *dev, void *data) 551 { 552 return dev->of_node == data; 553 } 554 555 static int mtk_iommu_bind(struct device *dev) 556 { 557 struct mtk_iommu_data *data = dev_get_drvdata(dev); 558 559 return component_bind_all(dev, &data->smi_imu); 560 } 561 562 static void mtk_iommu_unbind(struct device *dev) 563 { 564 struct mtk_iommu_data *data = dev_get_drvdata(dev); 565 566 component_unbind_all(dev, &data->smi_imu); 567 } 568 569 static const struct component_master_ops mtk_iommu_com_ops = { 570 .bind = mtk_iommu_bind, 571 .unbind = mtk_iommu_unbind, 572 }; 573 574 static int mtk_iommu_probe(struct platform_device *pdev) 575 { 576 struct mtk_iommu_data *data; 577 struct device *dev = &pdev->dev; 578 struct resource *res; 579 struct component_match *match = NULL; 580 void *protect; 581 int i, larb_nr, ret; 582 583 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 584 if (!data) 585 return -ENOMEM; 586 data->dev = dev; 587 588 /* Protect memory. HW will access here while translation fault.*/ 589 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); 590 if (!protect) 591 return -ENOMEM; 592 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); 593 594 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 595 data->base = devm_ioremap_resource(dev, res); 596 if (IS_ERR(data->base)) 597 return PTR_ERR(data->base); 598 599 data->irq = platform_get_irq(pdev, 0); 600 if (data->irq < 0) 601 return data->irq; 602 603 data->bclk = devm_clk_get(dev, "bclk"); 604 if (IS_ERR(data->bclk)) 605 return PTR_ERR(data->bclk); 606 607 larb_nr = of_count_phandle_with_args(dev->of_node, 608 "mediatek,larbs", NULL); 609 if (larb_nr < 0) 610 return larb_nr; 611 data->smi_imu.larb_nr = larb_nr; 612 613 for (i = 0; i < larb_nr; i++) { 614 struct device_node *larbnode; 615 struct platform_device *plarbdev; 616 617 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); 618 if (!larbnode) 619 return -EINVAL; 620 621 if (!of_device_is_available(larbnode)) 622 continue; 623 624 plarbdev = of_find_device_by_node(larbnode); 625 of_node_put(larbnode); 626 if (!plarbdev) { 627 plarbdev = of_platform_device_create( 628 larbnode, NULL, 629 platform_bus_type.dev_root); 630 if (!plarbdev) 631 return -EPROBE_DEFER; 632 } 633 data->smi_imu.larb_imu[i].dev = &plarbdev->dev; 634 635 component_match_add(dev, &match, compare_of, larbnode); 636 } 637 638 platform_set_drvdata(pdev, data); 639 640 ret = mtk_iommu_hw_init(data); 641 if (ret) 642 return ret; 643 644 if (!iommu_present(&platform_bus_type)) 645 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); 646 647 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); 648 } 649 650 static int mtk_iommu_remove(struct platform_device *pdev) 651 { 652 struct mtk_iommu_data *data = platform_get_drvdata(pdev); 653 654 if (iommu_present(&platform_bus_type)) 655 bus_set_iommu(&platform_bus_type, NULL); 656 657 free_io_pgtable_ops(data->m4u_dom->iop); 658 clk_disable_unprepare(data->bclk); 659 devm_free_irq(&pdev->dev, data->irq, data); 660 component_master_del(&pdev->dev, &mtk_iommu_com_ops); 661 return 0; 662 } 663 664 static int __maybe_unused mtk_iommu_suspend(struct device *dev) 665 { 666 struct mtk_iommu_data *data = dev_get_drvdata(dev); 667 struct mtk_iommu_suspend_reg *reg = &data->reg; 668 void __iomem *base = data->base; 669 670 reg->standard_axi_mode = readl_relaxed(base + 671 REG_MMU_STANDARD_AXI_MODE); 672 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); 673 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); 674 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); 675 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); 676 return 0; 677 } 678 679 static int __maybe_unused mtk_iommu_resume(struct device *dev) 680 { 681 struct mtk_iommu_data *data = dev_get_drvdata(dev); 682 struct mtk_iommu_suspend_reg *reg = &data->reg; 683 void __iomem *base = data->base; 684 685 writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], 686 base + REG_MMU_PT_BASE_ADDR); 687 writel_relaxed(reg->standard_axi_mode, 688 base + REG_MMU_STANDARD_AXI_MODE); 689 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); 690 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); 691 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); 692 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); 693 writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base), 694 base + REG_MMU_IVRP_PADDR); 695 return 0; 696 } 697 698 const struct dev_pm_ops mtk_iommu_pm_ops = { 699 SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) 700 }; 701 702 static const struct of_device_id mtk_iommu_of_ids[] = { 703 { .compatible = "mediatek,mt8173-m4u", }, 704 {} 705 }; 706 707 static struct platform_driver mtk_iommu_driver = { 708 .probe = mtk_iommu_probe, 709 .remove = mtk_iommu_remove, 710 .driver = { 711 .name = "mtk-iommu", 712 .of_match_table = mtk_iommu_of_ids, 713 .pm = &mtk_iommu_pm_ops, 714 } 715 }; 716 717 static int mtk_iommu_init_fn(struct device_node *np) 718 { 719 int ret; 720 struct platform_device *pdev; 721 722 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); 723 if (!pdev) 724 return -ENOMEM; 725 726 ret = platform_driver_register(&mtk_iommu_driver); 727 if (ret) { 728 pr_err("%s: Failed to register driver\n", __func__); 729 return ret; 730 } 731 732 of_iommu_set_ops(np, &mtk_iommu_ops); 733 return 0; 734 } 735 736 IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn); 737