1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2016 MediaTek Inc. 4 * Author: Yong Wu <yong.wu@mediatek.com> 5 */ 6 #include <linux/bitfield.h> 7 #include <linux/bug.h> 8 #include <linux/clk.h> 9 #include <linux/component.h> 10 #include <linux/device.h> 11 #include <linux/dma-direct.h> 12 #include <linux/dma-iommu.h> 13 #include <linux/err.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/iommu.h> 17 #include <linux/iopoll.h> 18 #include <linux/list.h> 19 #include <linux/mfd/syscon.h> 20 #include <linux/of_address.h> 21 #include <linux/of_iommu.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/regmap.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <linux/soc/mediatek/infracfg.h> 30 #include <asm/barrier.h> 31 #include <soc/mediatek/smi.h> 32 33 #include "mtk_iommu.h" 34 35 #define REG_MMU_PT_BASE_ADDR 0x000 36 #define MMU_PT_ADDR_MASK GENMASK(31, 7) 37 38 #define REG_MMU_INVALIDATE 0x020 39 #define F_ALL_INVLD 0x2 40 #define F_MMU_INV_RANGE 0x1 41 42 #define REG_MMU_INVLD_START_A 0x024 43 #define REG_MMU_INVLD_END_A 0x028 44 45 #define REG_MMU_INV_SEL_GEN2 0x02c 46 #define REG_MMU_INV_SEL_GEN1 0x038 47 #define F_INVLD_EN0 BIT(0) 48 #define F_INVLD_EN1 BIT(1) 49 50 #define REG_MMU_MISC_CTRL 0x048 51 #define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17)) 52 #define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19)) 53 54 #define REG_MMU_DCM_DIS 0x050 55 #define REG_MMU_WR_LEN_CTRL 0x054 56 #define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21)) 57 58 #define REG_MMU_CTRL_REG 0x110 59 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) 60 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) 61 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) 62 63 #define REG_MMU_IVRP_PADDR 0x114 64 65 #define REG_MMU_VLD_PA_RNG 0x118 66 #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) 67 68 #define REG_MMU_INT_CONTROL0 0x120 69 #define F_L2_MULIT_HIT_EN BIT(0) 70 #define F_TABLE_WALK_FAULT_INT_EN BIT(1) 71 #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) 72 #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) 73 #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) 74 #define F_MISS_FIFO_ERR_INT_EN BIT(6) 75 #define F_INT_CLR_BIT BIT(12) 76 77 #define REG_MMU_INT_MAIN_CONTROL 0x124 78 /* mmu0 | mmu1 */ 79 #define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) 80 #define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) 81 #define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) 82 #define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) 83 #define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) 84 #define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) 85 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) 86 87 #define REG_MMU_CPE_DONE 0x12C 88 89 #define REG_MMU_FAULT_ST1 0x134 90 #define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) 91 #define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) 92 93 #define REG_MMU0_FAULT_VA 0x13c 94 #define F_MMU_INVAL_VA_31_12_MASK GENMASK(31, 12) 95 #define F_MMU_INVAL_VA_34_32_MASK GENMASK(11, 9) 96 #define F_MMU_INVAL_PA_34_32_MASK GENMASK(8, 6) 97 #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) 98 #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) 99 100 #define REG_MMU0_INVLD_PA 0x140 101 #define REG_MMU1_FAULT_VA 0x144 102 #define REG_MMU1_INVLD_PA 0x148 103 #define REG_MMU0_INT_ID 0x150 104 #define REG_MMU1_INT_ID 0x154 105 #define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7) 106 #define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) 107 #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) 108 #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) 109 110 #define MTK_PROTECT_PA_ALIGN 256 111 112 #define HAS_4GB_MODE BIT(0) 113 /* HW will use the EMI clock if there isn't the "bclk". */ 114 #define HAS_BCLK BIT(1) 115 #define HAS_VLD_PA_RNG BIT(2) 116 #define RESET_AXI BIT(3) 117 #define OUT_ORDER_WR_EN BIT(4) 118 #define HAS_SUB_COMM BIT(5) 119 #define WR_THROT_EN BIT(6) 120 #define HAS_LEGACY_IVRP_PADDR BIT(7) 121 #define IOVA_34_EN BIT(8) 122 123 #define MTK_IOMMU_HAS_FLAG(pdata, _x) \ 124 ((((pdata)->flags) & (_x)) == (_x)) 125 126 struct mtk_iommu_domain { 127 struct io_pgtable_cfg cfg; 128 struct io_pgtable_ops *iop; 129 130 struct mtk_iommu_data *data; 131 struct iommu_domain domain; 132 }; 133 134 static const struct iommu_ops mtk_iommu_ops; 135 136 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data); 137 138 #define MTK_IOMMU_TLB_ADDR(iova) ({ \ 139 dma_addr_t _addr = iova; \ 140 ((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\ 141 }) 142 143 /* 144 * In M4U 4GB mode, the physical address is remapped as below: 145 * 146 * CPU Physical address: 147 * ==================== 148 * 149 * 0 1G 2G 3G 4G 5G 150 * |---A---|---B---|---C---|---D---|---E---| 151 * +--I/O--+------------Memory-------------+ 152 * 153 * IOMMU output physical address: 154 * ============================= 155 * 156 * 4G 5G 6G 7G 8G 157 * |---E---|---B---|---C---|---D---| 158 * +------------Memory-------------+ 159 * 160 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the 161 * bit32 of the CPU physical address always is needed to set, and for Region 162 * 'E', the CPU physical address keep as is. 163 * Additionally, The iommu consumers always use the CPU phyiscal address. 164 */ 165 #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL 166 167 static LIST_HEAD(m4ulist); /* List all the M4U HWs */ 168 169 #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) 170 171 struct mtk_iommu_iova_region { 172 dma_addr_t iova_base; 173 unsigned long long size; 174 }; 175 176 static const struct mtk_iommu_iova_region single_domain[] = { 177 {.iova_base = 0, .size = SZ_4G}, 178 }; 179 180 /* 181 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain 182 * for the performance. 183 * 184 * Here always return the mtk_iommu_data of the first probed M4U where the 185 * iommu domain information is recorded. 186 */ 187 static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) 188 { 189 struct mtk_iommu_data *data; 190 191 for_each_m4u(data) 192 return data; 193 194 return NULL; 195 } 196 197 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) 198 { 199 return container_of(dom, struct mtk_iommu_domain, domain); 200 } 201 202 static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) 203 { 204 for_each_m4u(data) { 205 if (pm_runtime_get_if_in_use(data->dev) <= 0) 206 continue; 207 208 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 209 data->base + data->plat_data->inv_sel_reg); 210 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); 211 wmb(); /* Make sure the tlb flush all done */ 212 213 pm_runtime_put(data->dev); 214 } 215 } 216 217 static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, 218 size_t granule, 219 struct mtk_iommu_data *data) 220 { 221 bool has_pm = !!data->dev->pm_domain; 222 unsigned long flags; 223 int ret; 224 u32 tmp; 225 226 for_each_m4u(data) { 227 if (has_pm) { 228 if (pm_runtime_get_if_in_use(data->dev) <= 0) 229 continue; 230 } 231 232 spin_lock_irqsave(&data->tlb_lock, flags); 233 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, 234 data->base + data->plat_data->inv_sel_reg); 235 236 writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), 237 data->base + REG_MMU_INVLD_START_A); 238 writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1), 239 data->base + REG_MMU_INVLD_END_A); 240 writel_relaxed(F_MMU_INV_RANGE, 241 data->base + REG_MMU_INVALIDATE); 242 243 /* tlb sync */ 244 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, 245 tmp, tmp != 0, 10, 1000); 246 if (ret) { 247 dev_warn(data->dev, 248 "Partial TLB flush timed out, falling back to full flush\n"); 249 mtk_iommu_tlb_flush_all(data); 250 } 251 /* Clear the CPE status */ 252 writel_relaxed(0, data->base + REG_MMU_CPE_DONE); 253 spin_unlock_irqrestore(&data->tlb_lock, flags); 254 255 if (has_pm) 256 pm_runtime_put(data->dev); 257 } 258 } 259 260 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) 261 { 262 struct mtk_iommu_data *data = dev_id; 263 struct mtk_iommu_domain *dom = data->m4u_dom; 264 unsigned int fault_larb, fault_port, sub_comm = 0; 265 u32 int_state, regval, va34_32, pa34_32; 266 u64 fault_iova, fault_pa; 267 bool layer, write; 268 269 /* Read error info from registers */ 270 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); 271 if (int_state & F_REG_MMU0_FAULT_MASK) { 272 regval = readl_relaxed(data->base + REG_MMU0_INT_ID); 273 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA); 274 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA); 275 } else { 276 regval = readl_relaxed(data->base + REG_MMU1_INT_ID); 277 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA); 278 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA); 279 } 280 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; 281 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; 282 if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) { 283 va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova); 284 pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova); 285 fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK; 286 fault_iova |= (u64)va34_32 << 32; 287 fault_pa |= (u64)pa34_32 << 32; 288 } 289 290 fault_port = F_MMU_INT_ID_PORT_ID(regval); 291 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) { 292 fault_larb = F_MMU_INT_ID_COMM_ID(regval); 293 sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); 294 } else { 295 fault_larb = F_MMU_INT_ID_LARB_ID(regval); 296 } 297 fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; 298 299 if (report_iommu_fault(&dom->domain, data->dev, fault_iova, 300 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { 301 dev_err_ratelimited( 302 data->dev, 303 "fault type=0x%x iova=0x%llx pa=0x%llx larb=%d port=%d layer=%d %s\n", 304 int_state, fault_iova, fault_pa, fault_larb, fault_port, 305 layer, write ? "write" : "read"); 306 } 307 308 /* Interrupt clear */ 309 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0); 310 regval |= F_INT_CLR_BIT; 311 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); 312 313 mtk_iommu_tlb_flush_all(data); 314 315 return IRQ_HANDLED; 316 } 317 318 static int mtk_iommu_get_domain_id(struct device *dev, 319 const struct mtk_iommu_plat_data *plat_data) 320 { 321 const struct mtk_iommu_iova_region *rgn = plat_data->iova_region; 322 const struct bus_dma_region *dma_rgn = dev->dma_range_map; 323 int i, candidate = -1; 324 dma_addr_t dma_end; 325 326 if (!dma_rgn || plat_data->iova_region_nr == 1) 327 return 0; 328 329 dma_end = dma_rgn->dma_start + dma_rgn->size - 1; 330 for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) { 331 /* Best fit. */ 332 if (dma_rgn->dma_start == rgn->iova_base && 333 dma_end == rgn->iova_base + rgn->size - 1) 334 return i; 335 /* ok if it is inside this region. */ 336 if (dma_rgn->dma_start >= rgn->iova_base && 337 dma_end < rgn->iova_base + rgn->size) 338 candidate = i; 339 } 340 341 if (candidate >= 0) 342 return candidate; 343 dev_err(dev, "Can NOT find the iommu domain id(%pad 0x%llx).\n", 344 &dma_rgn->dma_start, dma_rgn->size); 345 return -EINVAL; 346 } 347 348 static void mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev, 349 bool enable, unsigned int domid) 350 { 351 struct mtk_smi_larb_iommu *larb_mmu; 352 unsigned int larbid, portid; 353 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 354 const struct mtk_iommu_iova_region *region; 355 int i; 356 357 for (i = 0; i < fwspec->num_ids; ++i) { 358 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); 359 portid = MTK_M4U_TO_PORT(fwspec->ids[i]); 360 361 larb_mmu = &data->larb_imu[larbid]; 362 363 region = data->plat_data->iova_region + domid; 364 larb_mmu->bank[portid] = upper_32_bits(region->iova_base); 365 366 dev_dbg(dev, "%s iommu for larb(%s) port %d dom %d bank %d.\n", 367 enable ? "enable" : "disable", dev_name(larb_mmu->dev), 368 portid, domid, larb_mmu->bank[portid]); 369 370 if (enable) 371 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); 372 else 373 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); 374 } 375 } 376 377 static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, 378 struct mtk_iommu_data *data, 379 unsigned int domid) 380 { 381 const struct mtk_iommu_iova_region *region; 382 383 /* Use the exist domain as there is only one pgtable here. */ 384 if (data->m4u_dom) { 385 dom->iop = data->m4u_dom->iop; 386 dom->cfg = data->m4u_dom->cfg; 387 dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap; 388 goto update_iova_region; 389 } 390 391 dom->cfg = (struct io_pgtable_cfg) { 392 .quirks = IO_PGTABLE_QUIRK_ARM_NS | 393 IO_PGTABLE_QUIRK_NO_PERMS | 394 IO_PGTABLE_QUIRK_ARM_MTK_EXT, 395 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, 396 .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32, 397 .iommu_dev = data->dev, 398 }; 399 400 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) 401 dom->cfg.oas = data->enable_4GB ? 33 : 32; 402 else 403 dom->cfg.oas = 35; 404 405 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); 406 if (!dom->iop) { 407 dev_err(data->dev, "Failed to alloc io pgtable\n"); 408 return -EINVAL; 409 } 410 411 /* Update our support page sizes bitmap */ 412 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; 413 414 update_iova_region: 415 /* Update the iova region for this domain */ 416 region = data->plat_data->iova_region + domid; 417 dom->domain.geometry.aperture_start = region->iova_base; 418 dom->domain.geometry.aperture_end = region->iova_base + region->size - 1; 419 dom->domain.geometry.force_aperture = true; 420 return 0; 421 } 422 423 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) 424 { 425 struct mtk_iommu_domain *dom; 426 427 if (type != IOMMU_DOMAIN_DMA) 428 return NULL; 429 430 dom = kzalloc(sizeof(*dom), GFP_KERNEL); 431 if (!dom) 432 return NULL; 433 434 if (iommu_get_dma_cookie(&dom->domain)) { 435 kfree(dom); 436 return NULL; 437 } 438 439 return &dom->domain; 440 } 441 442 static void mtk_iommu_domain_free(struct iommu_domain *domain) 443 { 444 iommu_put_dma_cookie(domain); 445 kfree(to_mtk_domain(domain)); 446 } 447 448 static int mtk_iommu_attach_device(struct iommu_domain *domain, 449 struct device *dev) 450 { 451 struct mtk_iommu_data *data = dev_iommu_priv_get(dev); 452 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 453 struct device *m4udev = data->dev; 454 int ret, domid; 455 456 if (!data) 457 return -ENODEV; 458 459 domid = mtk_iommu_get_domain_id(dev, data->plat_data); 460 if (domid < 0) 461 return domid; 462 463 if (!dom->data) { 464 if (mtk_iommu_domain_finalise(dom, data, domid)) 465 return -ENODEV; 466 dom->data = data; 467 } 468 469 if (!data->m4u_dom) { /* Initialize the M4U HW */ 470 ret = pm_runtime_resume_and_get(m4udev); 471 if (ret < 0) 472 return ret; 473 474 ret = mtk_iommu_hw_init(data); 475 if (ret) { 476 pm_runtime_put(m4udev); 477 return ret; 478 } 479 data->m4u_dom = dom; 480 writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, 481 data->base + REG_MMU_PT_BASE_ADDR); 482 483 pm_runtime_put(m4udev); 484 } 485 486 mtk_iommu_config(data, dev, true, domid); 487 return 0; 488 } 489 490 static void mtk_iommu_detach_device(struct iommu_domain *domain, 491 struct device *dev) 492 { 493 struct mtk_iommu_data *data = dev_iommu_priv_get(dev); 494 495 if (!data) 496 return; 497 498 mtk_iommu_config(data, dev, false, 0); 499 } 500 501 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, 502 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 503 { 504 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 505 506 /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ 507 if (dom->data->enable_4GB) 508 paddr |= BIT_ULL(32); 509 510 /* Synchronize with the tlb_lock */ 511 return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); 512 } 513 514 static size_t mtk_iommu_unmap(struct iommu_domain *domain, 515 unsigned long iova, size_t size, 516 struct iommu_iotlb_gather *gather) 517 { 518 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 519 unsigned long end = iova + size - 1; 520 521 if (gather->start > iova) 522 gather->start = iova; 523 if (gather->end < end) 524 gather->end = end; 525 return dom->iop->unmap(dom->iop, iova, size, gather); 526 } 527 528 static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) 529 { 530 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 531 532 mtk_iommu_tlb_flush_all(dom->data); 533 } 534 535 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, 536 struct iommu_iotlb_gather *gather) 537 { 538 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 539 size_t length = gather->end - gather->start + 1; 540 541 mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize, 542 dom->data); 543 } 544 545 static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, 546 size_t size) 547 { 548 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 549 550 mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data); 551 } 552 553 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, 554 dma_addr_t iova) 555 { 556 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 557 phys_addr_t pa; 558 559 pa = dom->iop->iova_to_phys(dom->iop, iova); 560 if (dom->data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) 561 pa &= ~BIT_ULL(32); 562 563 return pa; 564 } 565 566 static struct iommu_device *mtk_iommu_probe_device(struct device *dev) 567 { 568 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 569 struct mtk_iommu_data *data; 570 571 if (!fwspec || fwspec->ops != &mtk_iommu_ops) 572 return ERR_PTR(-ENODEV); /* Not a iommu client device */ 573 574 data = dev_iommu_priv_get(dev); 575 576 return &data->iommu; 577 } 578 579 static void mtk_iommu_release_device(struct device *dev) 580 { 581 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 582 583 if (!fwspec || fwspec->ops != &mtk_iommu_ops) 584 return; 585 586 iommu_fwspec_free(dev); 587 } 588 589 static struct iommu_group *mtk_iommu_device_group(struct device *dev) 590 { 591 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); 592 struct iommu_group *group; 593 int domid; 594 595 if (!data) 596 return ERR_PTR(-ENODEV); 597 598 domid = mtk_iommu_get_domain_id(dev, data->plat_data); 599 if (domid < 0) 600 return ERR_PTR(domid); 601 602 group = data->m4u_group[domid]; 603 if (!group) { 604 group = iommu_group_alloc(); 605 if (!IS_ERR(group)) 606 data->m4u_group[domid] = group; 607 } else { 608 iommu_group_ref_get(group); 609 } 610 return group; 611 } 612 613 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) 614 { 615 struct platform_device *m4updev; 616 617 if (args->args_count != 1) { 618 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", 619 args->args_count); 620 return -EINVAL; 621 } 622 623 if (!dev_iommu_priv_get(dev)) { 624 /* Get the m4u device */ 625 m4updev = of_find_device_by_node(args->np); 626 if (WARN_ON(!m4updev)) 627 return -EINVAL; 628 629 dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); 630 } 631 632 return iommu_fwspec_add_ids(dev, args->args, 1); 633 } 634 635 static void mtk_iommu_get_resv_regions(struct device *dev, 636 struct list_head *head) 637 { 638 struct mtk_iommu_data *data = dev_iommu_priv_get(dev); 639 unsigned int domid = mtk_iommu_get_domain_id(dev, data->plat_data), i; 640 const struct mtk_iommu_iova_region *resv, *curdom; 641 struct iommu_resv_region *region; 642 int prot = IOMMU_WRITE | IOMMU_READ; 643 644 if (domid < 0) 645 return; 646 curdom = data->plat_data->iova_region + domid; 647 for (i = 0; i < data->plat_data->iova_region_nr; i++) { 648 resv = data->plat_data->iova_region + i; 649 650 /* Only reserve when the region is inside the current domain */ 651 if (resv->iova_base <= curdom->iova_base || 652 resv->iova_base + resv->size >= curdom->iova_base + curdom->size) 653 continue; 654 655 region = iommu_alloc_resv_region(resv->iova_base, resv->size, 656 prot, IOMMU_RESV_RESERVED); 657 if (!region) 658 return; 659 660 list_add_tail(®ion->list, head); 661 } 662 } 663 664 static const struct iommu_ops mtk_iommu_ops = { 665 .domain_alloc = mtk_iommu_domain_alloc, 666 .domain_free = mtk_iommu_domain_free, 667 .attach_dev = mtk_iommu_attach_device, 668 .detach_dev = mtk_iommu_detach_device, 669 .map = mtk_iommu_map, 670 .unmap = mtk_iommu_unmap, 671 .flush_iotlb_all = mtk_iommu_flush_iotlb_all, 672 .iotlb_sync = mtk_iommu_iotlb_sync, 673 .iotlb_sync_map = mtk_iommu_sync_map, 674 .iova_to_phys = mtk_iommu_iova_to_phys, 675 .probe_device = mtk_iommu_probe_device, 676 .release_device = mtk_iommu_release_device, 677 .device_group = mtk_iommu_device_group, 678 .of_xlate = mtk_iommu_of_xlate, 679 .get_resv_regions = mtk_iommu_get_resv_regions, 680 .put_resv_regions = generic_iommu_put_resv_regions, 681 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, 682 }; 683 684 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) 685 { 686 u32 regval; 687 int ret; 688 689 ret = clk_prepare_enable(data->bclk); 690 if (ret) { 691 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); 692 return ret; 693 } 694 695 if (data->plat_data->m4u_plat == M4U_MT8173) { 696 regval = F_MMU_PREFETCH_RT_REPLACE_MOD | 697 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; 698 } else { 699 regval = readl_relaxed(data->base + REG_MMU_CTRL_REG); 700 regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR; 701 } 702 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); 703 704 regval = F_L2_MULIT_HIT_EN | 705 F_TABLE_WALK_FAULT_INT_EN | 706 F_PREETCH_FIFO_OVERFLOW_INT_EN | 707 F_MISS_FIFO_OVERFLOW_INT_EN | 708 F_PREFETCH_FIFO_ERR_INT_EN | 709 F_MISS_FIFO_ERR_INT_EN; 710 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); 711 712 regval = F_INT_TRANSLATION_FAULT | 713 F_INT_MAIN_MULTI_HIT_FAULT | 714 F_INT_INVALID_PA_FAULT | 715 F_INT_ENTRY_REPLACEMENT_FAULT | 716 F_INT_TLB_MISS_FAULT | 717 F_INT_MISS_TRANSACTION_FIFO_FAULT | 718 F_INT_PRETETCH_TRANSATION_FIFO_FAULT; 719 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); 720 721 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR)) 722 regval = (data->protect_base >> 1) | (data->enable_4GB << 31); 723 else 724 regval = lower_32_bits(data->protect_base) | 725 upper_32_bits(data->protect_base); 726 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); 727 728 if (data->enable_4GB && 729 MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { 730 /* 731 * If 4GB mode is enabled, the validate PA range is from 732 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. 733 */ 734 regval = F_MMU_VLD_PA_RNG(7, 4); 735 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); 736 } 737 writel_relaxed(0, data->base + REG_MMU_DCM_DIS); 738 if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) { 739 /* write command throttling mode */ 740 regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL); 741 regval &= ~F_MMU_WR_THROT_DIS_MASK; 742 writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL); 743 } 744 745 if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { 746 /* The register is called STANDARD_AXI_MODE in this case */ 747 regval = 0; 748 } else { 749 regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL); 750 regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; 751 if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN)) 752 regval &= ~F_MMU_IN_ORDER_WR_EN_MASK; 753 } 754 writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL); 755 756 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, 757 dev_name(data->dev), (void *)data)) { 758 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); 759 clk_disable_unprepare(data->bclk); 760 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); 761 return -ENODEV; 762 } 763 764 return 0; 765 } 766 767 static const struct component_master_ops mtk_iommu_com_ops = { 768 .bind = mtk_iommu_bind, 769 .unbind = mtk_iommu_unbind, 770 }; 771 772 static int mtk_iommu_probe(struct platform_device *pdev) 773 { 774 struct mtk_iommu_data *data; 775 struct device *dev = &pdev->dev; 776 struct device_node *larbnode, *smicomm_node; 777 struct platform_device *plarbdev; 778 struct device_link *link; 779 struct resource *res; 780 resource_size_t ioaddr; 781 struct component_match *match = NULL; 782 struct regmap *infracfg; 783 void *protect; 784 int i, larb_nr, ret; 785 u32 val; 786 char *p; 787 788 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 789 if (!data) 790 return -ENOMEM; 791 data->dev = dev; 792 data->plat_data = of_device_get_match_data(dev); 793 794 /* Protect memory. HW will access here while translation fault.*/ 795 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); 796 if (!protect) 797 return -ENOMEM; 798 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); 799 800 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) { 801 switch (data->plat_data->m4u_plat) { 802 case M4U_MT2712: 803 p = "mediatek,mt2712-infracfg"; 804 break; 805 case M4U_MT8173: 806 p = "mediatek,mt8173-infracfg"; 807 break; 808 default: 809 p = NULL; 810 } 811 812 infracfg = syscon_regmap_lookup_by_compatible(p); 813 814 if (IS_ERR(infracfg)) 815 return PTR_ERR(infracfg); 816 817 ret = regmap_read(infracfg, REG_INFRA_MISC, &val); 818 if (ret) 819 return ret; 820 data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN); 821 } 822 823 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 824 data->base = devm_ioremap_resource(dev, res); 825 if (IS_ERR(data->base)) 826 return PTR_ERR(data->base); 827 ioaddr = res->start; 828 829 data->irq = platform_get_irq(pdev, 0); 830 if (data->irq < 0) 831 return data->irq; 832 833 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) { 834 data->bclk = devm_clk_get(dev, "bclk"); 835 if (IS_ERR(data->bclk)) 836 return PTR_ERR(data->bclk); 837 } 838 839 larb_nr = of_count_phandle_with_args(dev->of_node, 840 "mediatek,larbs", NULL); 841 if (larb_nr < 0) 842 return larb_nr; 843 844 for (i = 0; i < larb_nr; i++) { 845 u32 id; 846 847 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); 848 if (!larbnode) 849 return -EINVAL; 850 851 if (!of_device_is_available(larbnode)) { 852 of_node_put(larbnode); 853 continue; 854 } 855 856 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); 857 if (ret)/* The id is consecutive if there is no this property */ 858 id = i; 859 860 plarbdev = of_find_device_by_node(larbnode); 861 if (!plarbdev) { 862 of_node_put(larbnode); 863 return -EPROBE_DEFER; 864 } 865 data->larb_imu[id].dev = &plarbdev->dev; 866 867 component_match_add_release(dev, &match, release_of, 868 compare_of, larbnode); 869 } 870 871 /* Get smi-common dev from the last larb. */ 872 smicomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); 873 if (!smicomm_node) 874 return -EINVAL; 875 876 plarbdev = of_find_device_by_node(smicomm_node); 877 of_node_put(smicomm_node); 878 data->smicomm_dev = &plarbdev->dev; 879 880 pm_runtime_enable(dev); 881 882 link = device_link_add(data->smicomm_dev, dev, 883 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); 884 if (!link) { 885 dev_err(dev, "Unable link %s.\n", dev_name(data->smicomm_dev)); 886 goto out_runtime_disable; 887 } 888 889 platform_set_drvdata(pdev, data); 890 891 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, 892 "mtk-iommu.%pa", &ioaddr); 893 if (ret) 894 goto out_link_remove; 895 896 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); 897 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); 898 899 ret = iommu_device_register(&data->iommu); 900 if (ret) 901 goto out_sysfs_remove; 902 903 spin_lock_init(&data->tlb_lock); 904 list_add_tail(&data->list, &m4ulist); 905 906 if (!iommu_present(&platform_bus_type)) { 907 ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); 908 if (ret) 909 goto out_list_del; 910 } 911 912 ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); 913 if (ret) 914 goto out_bus_set_null; 915 return ret; 916 917 out_bus_set_null: 918 bus_set_iommu(&platform_bus_type, NULL); 919 out_list_del: 920 list_del(&data->list); 921 iommu_device_unregister(&data->iommu); 922 out_sysfs_remove: 923 iommu_device_sysfs_remove(&data->iommu); 924 out_link_remove: 925 device_link_remove(data->smicomm_dev, dev); 926 out_runtime_disable: 927 pm_runtime_disable(dev); 928 return ret; 929 } 930 931 static int mtk_iommu_remove(struct platform_device *pdev) 932 { 933 struct mtk_iommu_data *data = platform_get_drvdata(pdev); 934 935 iommu_device_sysfs_remove(&data->iommu); 936 iommu_device_unregister(&data->iommu); 937 938 if (iommu_present(&platform_bus_type)) 939 bus_set_iommu(&platform_bus_type, NULL); 940 941 clk_disable_unprepare(data->bclk); 942 device_link_remove(data->smicomm_dev, &pdev->dev); 943 pm_runtime_disable(&pdev->dev); 944 devm_free_irq(&pdev->dev, data->irq, data); 945 component_master_del(&pdev->dev, &mtk_iommu_com_ops); 946 return 0; 947 } 948 949 static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev) 950 { 951 struct mtk_iommu_data *data = dev_get_drvdata(dev); 952 struct mtk_iommu_suspend_reg *reg = &data->reg; 953 void __iomem *base = data->base; 954 955 reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL); 956 reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL); 957 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); 958 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); 959 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); 960 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); 961 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); 962 reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); 963 clk_disable_unprepare(data->bclk); 964 return 0; 965 } 966 967 static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev) 968 { 969 struct mtk_iommu_data *data = dev_get_drvdata(dev); 970 struct mtk_iommu_suspend_reg *reg = &data->reg; 971 struct mtk_iommu_domain *m4u_dom = data->m4u_dom; 972 void __iomem *base = data->base; 973 int ret; 974 975 /* Avoid first resume to affect the default value of registers below. */ 976 if (!m4u_dom) 977 return 0; 978 ret = clk_prepare_enable(data->bclk); 979 if (ret) { 980 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); 981 return ret; 982 } 983 writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL); 984 writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL); 985 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); 986 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); 987 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); 988 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); 989 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); 990 writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); 991 writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR); 992 return 0; 993 } 994 995 static const struct dev_pm_ops mtk_iommu_pm_ops = { 996 SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL) 997 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 998 pm_runtime_force_resume) 999 }; 1000 1001 static const struct mtk_iommu_plat_data mt2712_data = { 1002 .m4u_plat = M4U_MT2712, 1003 .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG, 1004 .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 1005 .iova_region = single_domain, 1006 .iova_region_nr = ARRAY_SIZE(single_domain), 1007 .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, 1008 }; 1009 1010 static const struct mtk_iommu_plat_data mt6779_data = { 1011 .m4u_plat = M4U_MT6779, 1012 .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN, 1013 .inv_sel_reg = REG_MMU_INV_SEL_GEN2, 1014 .iova_region = single_domain, 1015 .iova_region_nr = ARRAY_SIZE(single_domain), 1016 .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}}, 1017 }; 1018 1019 static const struct mtk_iommu_plat_data mt8167_data = { 1020 .m4u_plat = M4U_MT8167, 1021 .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR, 1022 .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 1023 .iova_region = single_domain, 1024 .iova_region_nr = ARRAY_SIZE(single_domain), 1025 .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */ 1026 }; 1027 1028 static const struct mtk_iommu_plat_data mt8173_data = { 1029 .m4u_plat = M4U_MT8173, 1030 .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | 1031 HAS_LEGACY_IVRP_PADDR, 1032 .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 1033 .iova_region = single_domain, 1034 .iova_region_nr = ARRAY_SIZE(single_domain), 1035 .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ 1036 }; 1037 1038 static const struct mtk_iommu_plat_data mt8183_data = { 1039 .m4u_plat = M4U_MT8183, 1040 .flags = RESET_AXI, 1041 .inv_sel_reg = REG_MMU_INV_SEL_GEN1, 1042 .iova_region = single_domain, 1043 .iova_region_nr = ARRAY_SIZE(single_domain), 1044 .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}}, 1045 }; 1046 1047 static const struct of_device_id mtk_iommu_of_ids[] = { 1048 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, 1049 { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, 1050 { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data}, 1051 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, 1052 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, 1053 {} 1054 }; 1055 1056 static struct platform_driver mtk_iommu_driver = { 1057 .probe = mtk_iommu_probe, 1058 .remove = mtk_iommu_remove, 1059 .driver = { 1060 .name = "mtk-iommu", 1061 .of_match_table = mtk_iommu_of_ids, 1062 .pm = &mtk_iommu_pm_ops, 1063 } 1064 }; 1065 1066 static int __init mtk_iommu_init(void) 1067 { 1068 int ret; 1069 1070 ret = platform_driver_register(&mtk_iommu_driver); 1071 if (ret != 0) 1072 pr_err("Failed to register MTK IOMMU driver\n"); 1073 1074 return ret; 1075 } 1076 1077 subsys_initcall(mtk_iommu_init) 1078