1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. 4 * http://www.samsung.com 5 */ 6 7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG 8 #define DEBUG 9 #endif 10 11 #include <linux/clk.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/io.h> 15 #include <linux/iommu.h> 16 #include <linux/interrupt.h> 17 #include <linux/kmemleak.h> 18 #include <linux/list.h> 19 #include <linux/of.h> 20 #include <linux/of_platform.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/slab.h> 24 #include <linux/dma-iommu.h> 25 26 typedef u32 sysmmu_iova_t; 27 typedef u32 sysmmu_pte_t; 28 29 /* We do not consider super section mapping (16MB) */ 30 #define SECT_ORDER 20 31 #define LPAGE_ORDER 16 32 #define SPAGE_ORDER 12 33 34 #define SECT_SIZE (1 << SECT_ORDER) 35 #define LPAGE_SIZE (1 << LPAGE_ORDER) 36 #define SPAGE_SIZE (1 << SPAGE_ORDER) 37 38 #define SECT_MASK (~(SECT_SIZE - 1)) 39 #define LPAGE_MASK (~(LPAGE_SIZE - 1)) 40 #define SPAGE_MASK (~(SPAGE_SIZE - 1)) 41 42 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ 43 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) 44 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) 45 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) 46 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ 47 ((*(sent) & 3) == 1)) 48 #define lv1ent_section(sent) ((*(sent) & 3) == 2) 49 50 #define lv2ent_fault(pent) ((*(pent) & 3) == 0) 51 #define lv2ent_small(pent) ((*(pent) & 2) == 2) 52 #define lv2ent_large(pent) ((*(pent) & 3) == 1) 53 54 /* 55 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces 56 * v5.0 introduced support for 36bit physical address space by shifting 57 * all page entry values by 4 bits. 58 * All SYSMMU controllers in the system support the address spaces of the same 59 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper 60 * value (0 or 4). 61 */ 62 static short PG_ENT_SHIFT = -1; 63 #define SYSMMU_PG_ENT_SHIFT 0 64 #define SYSMMU_V5_PG_ENT_SHIFT 4 65 66 static const sysmmu_pte_t *LV1_PROT; 67 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = { 68 ((0 << 15) | (0 << 10)), /* no access */ 69 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */ 70 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */ 71 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */ 72 }; 73 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = { 74 (0 << 4), /* no access */ 75 (1 << 4), /* IOMMU_READ only */ 76 (2 << 4), /* IOMMU_WRITE only */ 77 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */ 78 }; 79 80 static const sysmmu_pte_t *LV2_PROT; 81 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = { 82 ((0 << 9) | (0 << 4)), /* no access */ 83 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */ 84 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */ 85 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */ 86 }; 87 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = { 88 (0 << 2), /* no access */ 89 (1 << 2), /* IOMMU_READ only */ 90 (2 << 2), /* IOMMU_WRITE only */ 91 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */ 92 }; 93 94 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE) 95 96 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) 97 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK) 98 #define section_offs(iova) (iova & (SECT_SIZE - 1)) 99 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK) 100 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) 101 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK) 102 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) 103 104 #define NUM_LV1ENTRIES 4096 105 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) 106 107 static u32 lv1ent_offset(sysmmu_iova_t iova) 108 { 109 return iova >> SECT_ORDER; 110 } 111 112 static u32 lv2ent_offset(sysmmu_iova_t iova) 113 { 114 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); 115 } 116 117 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t)) 118 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) 119 120 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) 121 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0)) 122 123 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2) 124 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1) 125 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1) 126 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2) 127 128 #define CTRL_ENABLE 0x5 129 #define CTRL_BLOCK 0x7 130 #define CTRL_DISABLE 0x0 131 132 #define CFG_LRU 0x1 133 #define CFG_EAP (1 << 2) 134 #define CFG_QOS(n) ((n & 0xF) << 7) 135 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ 136 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ 137 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ 138 139 /* common registers */ 140 #define REG_MMU_CTRL 0x000 141 #define REG_MMU_CFG 0x004 142 #define REG_MMU_STATUS 0x008 143 #define REG_MMU_VERSION 0x034 144 145 #define MMU_MAJ_VER(val) ((val) >> 7) 146 #define MMU_MIN_VER(val) ((val) & 0x7F) 147 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ 148 149 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) 150 151 /* v1.x - v3.x registers */ 152 #define REG_MMU_FLUSH 0x00C 153 #define REG_MMU_FLUSH_ENTRY 0x010 154 #define REG_PT_BASE_ADDR 0x014 155 #define REG_INT_STATUS 0x018 156 #define REG_INT_CLEAR 0x01C 157 158 #define REG_PAGE_FAULT_ADDR 0x024 159 #define REG_AW_FAULT_ADDR 0x028 160 #define REG_AR_FAULT_ADDR 0x02C 161 #define REG_DEFAULT_SLAVE_ADDR 0x030 162 163 /* v5.x registers */ 164 #define REG_V5_PT_BASE_PFN 0x00C 165 #define REG_V5_MMU_FLUSH_ALL 0x010 166 #define REG_V5_MMU_FLUSH_ENTRY 0x014 167 #define REG_V5_MMU_FLUSH_RANGE 0x018 168 #define REG_V5_MMU_FLUSH_START 0x020 169 #define REG_V5_MMU_FLUSH_END 0x024 170 #define REG_V5_INT_STATUS 0x060 171 #define REG_V5_INT_CLEAR 0x064 172 #define REG_V5_FAULT_AR_VA 0x070 173 #define REG_V5_FAULT_AW_VA 0x080 174 175 #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL) 176 177 static struct device *dma_dev; 178 static struct kmem_cache *lv2table_kmem_cache; 179 static sysmmu_pte_t *zero_lv2_table; 180 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) 181 182 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) 183 { 184 return pgtable + lv1ent_offset(iova); 185 } 186 187 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) 188 { 189 return (sysmmu_pte_t *)phys_to_virt( 190 lv2table_base(sent)) + lv2ent_offset(iova); 191 } 192 193 /* 194 * IOMMU fault information register 195 */ 196 struct sysmmu_fault_info { 197 unsigned int bit; /* bit number in STATUS register */ 198 unsigned short addr_reg; /* register to read VA fault address */ 199 const char *name; /* human readable fault name */ 200 unsigned int type; /* fault type for report_iommu_fault */ 201 }; 202 203 static const struct sysmmu_fault_info sysmmu_faults[] = { 204 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ }, 205 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ }, 206 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE }, 207 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ }, 208 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ }, 209 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ }, 210 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE }, 211 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE }, 212 }; 213 214 static const struct sysmmu_fault_info sysmmu_v5_faults[] = { 215 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ }, 216 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ }, 217 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ }, 218 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ }, 219 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ }, 220 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE }, 221 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE }, 222 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE }, 223 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE }, 224 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE }, 225 }; 226 227 /* 228 * This structure is attached to dev->iommu->priv of the master device 229 * on device add, contains a list of SYSMMU controllers defined by device tree, 230 * which are bound to given master device. It is usually referenced by 'owner' 231 * pointer. 232 */ 233 struct exynos_iommu_owner { 234 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */ 235 struct iommu_domain *domain; /* domain this device is attached */ 236 struct mutex rpm_lock; /* for runtime pm of all sysmmus */ 237 }; 238 239 /* 240 * This structure exynos specific generalization of struct iommu_domain. 241 * It contains list of SYSMMU controllers from all master devices, which has 242 * been attached to this domain and page tables of IO address space defined by 243 * it. It is usually referenced by 'domain' pointer. 244 */ 245 struct exynos_iommu_domain { 246 struct list_head clients; /* list of sysmmu_drvdata.domain_node */ 247 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ 248 short *lv2entcnt; /* free lv2 entry counter for each section */ 249 spinlock_t lock; /* lock for modyfying list of clients */ 250 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ 251 struct iommu_domain domain; /* generic domain data structure */ 252 }; 253 254 /* 255 * This structure hold all data of a single SYSMMU controller, this includes 256 * hw resources like registers and clocks, pointers and list nodes to connect 257 * it to all other structures, internal state and parameters read from device 258 * tree. It is usually referenced by 'data' pointer. 259 */ 260 struct sysmmu_drvdata { 261 struct device *sysmmu; /* SYSMMU controller device */ 262 struct device *master; /* master device (owner) */ 263 struct device_link *link; /* runtime PM link to master */ 264 void __iomem *sfrbase; /* our registers */ 265 struct clk *clk; /* SYSMMU's clock */ 266 struct clk *aclk; /* SYSMMU's aclk clock */ 267 struct clk *pclk; /* SYSMMU's pclk clock */ 268 struct clk *clk_master; /* master's device clock */ 269 spinlock_t lock; /* lock for modyfying state */ 270 bool active; /* current status */ 271 struct exynos_iommu_domain *domain; /* domain we belong to */ 272 struct list_head domain_node; /* node for domain clients list */ 273 struct list_head owner_node; /* node for owner controllers list */ 274 phys_addr_t pgtable; /* assigned page table structure */ 275 unsigned int version; /* our version */ 276 277 struct iommu_device iommu; /* IOMMU core handle */ 278 }; 279 280 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) 281 { 282 return container_of(dom, struct exynos_iommu_domain, domain); 283 } 284 285 static void sysmmu_unblock(struct sysmmu_drvdata *data) 286 { 287 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 288 } 289 290 static bool sysmmu_block(struct sysmmu_drvdata *data) 291 { 292 int i = 120; 293 294 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 295 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) 296 --i; 297 298 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { 299 sysmmu_unblock(data); 300 return false; 301 } 302 303 return true; 304 } 305 306 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data) 307 { 308 if (MMU_MAJ_VER(data->version) < 5) 309 writel(0x1, data->sfrbase + REG_MMU_FLUSH); 310 else 311 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL); 312 } 313 314 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 315 sysmmu_iova_t iova, unsigned int num_inv) 316 { 317 unsigned int i; 318 319 if (MMU_MAJ_VER(data->version) < 5) { 320 for (i = 0; i < num_inv; i++) { 321 writel((iova & SPAGE_MASK) | 1, 322 data->sfrbase + REG_MMU_FLUSH_ENTRY); 323 iova += SPAGE_SIZE; 324 } 325 } else { 326 if (num_inv == 1) { 327 writel((iova & SPAGE_MASK) | 1, 328 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); 329 } else { 330 writel((iova & SPAGE_MASK), 331 data->sfrbase + REG_V5_MMU_FLUSH_START); 332 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, 333 data->sfrbase + REG_V5_MMU_FLUSH_END); 334 writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE); 335 } 336 } 337 } 338 339 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) 340 { 341 if (MMU_MAJ_VER(data->version) < 5) 342 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR); 343 else 344 writel(pgd >> PAGE_SHIFT, 345 data->sfrbase + REG_V5_PT_BASE_PFN); 346 347 __sysmmu_tlb_invalidate(data); 348 } 349 350 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data) 351 { 352 BUG_ON(clk_prepare_enable(data->clk_master)); 353 BUG_ON(clk_prepare_enable(data->clk)); 354 BUG_ON(clk_prepare_enable(data->pclk)); 355 BUG_ON(clk_prepare_enable(data->aclk)); 356 } 357 358 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data) 359 { 360 clk_disable_unprepare(data->aclk); 361 clk_disable_unprepare(data->pclk); 362 clk_disable_unprepare(data->clk); 363 clk_disable_unprepare(data->clk_master); 364 } 365 366 static void __sysmmu_get_version(struct sysmmu_drvdata *data) 367 { 368 u32 ver; 369 370 __sysmmu_enable_clocks(data); 371 372 ver = readl(data->sfrbase + REG_MMU_VERSION); 373 374 /* controllers on some SoCs don't report proper version */ 375 if (ver == 0x80000001u) 376 data->version = MAKE_MMU_VER(1, 0); 377 else 378 data->version = MMU_RAW_VER(ver); 379 380 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", 381 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); 382 383 __sysmmu_disable_clocks(data); 384 } 385 386 static void show_fault_information(struct sysmmu_drvdata *data, 387 const struct sysmmu_fault_info *finfo, 388 sysmmu_iova_t fault_addr) 389 { 390 sysmmu_pte_t *ent; 391 392 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n", 393 dev_name(data->master), finfo->name, fault_addr); 394 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); 395 ent = section_entry(phys_to_virt(data->pgtable), fault_addr); 396 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); 397 if (lv1ent_page(ent)) { 398 ent = page_entry(ent, fault_addr); 399 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); 400 } 401 } 402 403 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) 404 { 405 /* SYSMMU is in blocked state when interrupt occurred. */ 406 struct sysmmu_drvdata *data = dev_id; 407 const struct sysmmu_fault_info *finfo; 408 unsigned int i, n, itype; 409 sysmmu_iova_t fault_addr; 410 unsigned short reg_status, reg_clear; 411 int ret = -ENOSYS; 412 413 WARN_ON(!data->active); 414 415 if (MMU_MAJ_VER(data->version) < 5) { 416 reg_status = REG_INT_STATUS; 417 reg_clear = REG_INT_CLEAR; 418 finfo = sysmmu_faults; 419 n = ARRAY_SIZE(sysmmu_faults); 420 } else { 421 reg_status = REG_V5_INT_STATUS; 422 reg_clear = REG_V5_INT_CLEAR; 423 finfo = sysmmu_v5_faults; 424 n = ARRAY_SIZE(sysmmu_v5_faults); 425 } 426 427 spin_lock(&data->lock); 428 429 clk_enable(data->clk_master); 430 431 itype = __ffs(readl(data->sfrbase + reg_status)); 432 for (i = 0; i < n; i++, finfo++) 433 if (finfo->bit == itype) 434 break; 435 /* unknown/unsupported fault */ 436 BUG_ON(i == n); 437 438 /* print debug message */ 439 fault_addr = readl(data->sfrbase + finfo->addr_reg); 440 show_fault_information(data, finfo, fault_addr); 441 442 if (data->domain) 443 ret = report_iommu_fault(&data->domain->domain, 444 data->master, fault_addr, finfo->type); 445 /* fault is not recovered by fault handler */ 446 BUG_ON(ret != 0); 447 448 writel(1 << itype, data->sfrbase + reg_clear); 449 450 sysmmu_unblock(data); 451 452 clk_disable(data->clk_master); 453 454 spin_unlock(&data->lock); 455 456 return IRQ_HANDLED; 457 } 458 459 static void __sysmmu_disable(struct sysmmu_drvdata *data) 460 { 461 unsigned long flags; 462 463 clk_enable(data->clk_master); 464 465 spin_lock_irqsave(&data->lock, flags); 466 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); 467 writel(0, data->sfrbase + REG_MMU_CFG); 468 data->active = false; 469 spin_unlock_irqrestore(&data->lock, flags); 470 471 __sysmmu_disable_clocks(data); 472 } 473 474 static void __sysmmu_init_config(struct sysmmu_drvdata *data) 475 { 476 unsigned int cfg; 477 478 if (data->version <= MAKE_MMU_VER(3, 1)) 479 cfg = CFG_LRU | CFG_QOS(15); 480 else if (data->version <= MAKE_MMU_VER(3, 2)) 481 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL; 482 else 483 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN; 484 485 cfg |= CFG_EAP; /* enable access protection bits check */ 486 487 writel(cfg, data->sfrbase + REG_MMU_CFG); 488 } 489 490 static void __sysmmu_enable(struct sysmmu_drvdata *data) 491 { 492 unsigned long flags; 493 494 __sysmmu_enable_clocks(data); 495 496 spin_lock_irqsave(&data->lock, flags); 497 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 498 __sysmmu_init_config(data); 499 __sysmmu_set_ptbase(data, data->pgtable); 500 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 501 data->active = true; 502 spin_unlock_irqrestore(&data->lock, flags); 503 504 /* 505 * SYSMMU driver keeps master's clock enabled only for the short 506 * time, while accessing the registers. For performing address 507 * translation during DMA transaction it relies on the client 508 * driver to enable it. 509 */ 510 clk_disable(data->clk_master); 511 } 512 513 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, 514 sysmmu_iova_t iova) 515 { 516 unsigned long flags; 517 518 spin_lock_irqsave(&data->lock, flags); 519 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { 520 clk_enable(data->clk_master); 521 if (sysmmu_block(data)) { 522 if (data->version >= MAKE_MMU_VER(5, 0)) 523 __sysmmu_tlb_invalidate(data); 524 else 525 __sysmmu_tlb_invalidate_entry(data, iova, 1); 526 sysmmu_unblock(data); 527 } 528 clk_disable(data->clk_master); 529 } 530 spin_unlock_irqrestore(&data->lock, flags); 531 } 532 533 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 534 sysmmu_iova_t iova, size_t size) 535 { 536 unsigned long flags; 537 538 spin_lock_irqsave(&data->lock, flags); 539 if (data->active) { 540 unsigned int num_inv = 1; 541 542 clk_enable(data->clk_master); 543 544 /* 545 * L2TLB invalidation required 546 * 4KB page: 1 invalidation 547 * 64KB page: 16 invalidations 548 * 1MB page: 64 invalidations 549 * because it is set-associative TLB 550 * with 8-way and 64 sets. 551 * 1MB page can be cached in one of all sets. 552 * 64KB page can be one of 16 consecutive sets. 553 */ 554 if (MMU_MAJ_VER(data->version) == 2) 555 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); 556 557 if (sysmmu_block(data)) { 558 __sysmmu_tlb_invalidate_entry(data, iova, num_inv); 559 sysmmu_unblock(data); 560 } 561 clk_disable(data->clk_master); 562 } 563 spin_unlock_irqrestore(&data->lock, flags); 564 } 565 566 static const struct iommu_ops exynos_iommu_ops; 567 568 static int exynos_sysmmu_probe(struct platform_device *pdev) 569 { 570 int irq, ret; 571 struct device *dev = &pdev->dev; 572 struct sysmmu_drvdata *data; 573 struct resource *res; 574 575 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 576 if (!data) 577 return -ENOMEM; 578 579 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 580 data->sfrbase = devm_ioremap_resource(dev, res); 581 if (IS_ERR(data->sfrbase)) 582 return PTR_ERR(data->sfrbase); 583 584 irq = platform_get_irq(pdev, 0); 585 if (irq <= 0) 586 return irq; 587 588 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, 589 dev_name(dev), data); 590 if (ret) { 591 dev_err(dev, "Unabled to register handler of irq %d\n", irq); 592 return ret; 593 } 594 595 data->clk = devm_clk_get(dev, "sysmmu"); 596 if (PTR_ERR(data->clk) == -ENOENT) 597 data->clk = NULL; 598 else if (IS_ERR(data->clk)) 599 return PTR_ERR(data->clk); 600 601 data->aclk = devm_clk_get(dev, "aclk"); 602 if (PTR_ERR(data->aclk) == -ENOENT) 603 data->aclk = NULL; 604 else if (IS_ERR(data->aclk)) 605 return PTR_ERR(data->aclk); 606 607 data->pclk = devm_clk_get(dev, "pclk"); 608 if (PTR_ERR(data->pclk) == -ENOENT) 609 data->pclk = NULL; 610 else if (IS_ERR(data->pclk)) 611 return PTR_ERR(data->pclk); 612 613 if (!data->clk && (!data->aclk || !data->pclk)) { 614 dev_err(dev, "Failed to get device clock(s)!\n"); 615 return -ENOSYS; 616 } 617 618 data->clk_master = devm_clk_get(dev, "master"); 619 if (PTR_ERR(data->clk_master) == -ENOENT) 620 data->clk_master = NULL; 621 else if (IS_ERR(data->clk_master)) 622 return PTR_ERR(data->clk_master); 623 624 data->sysmmu = dev; 625 spin_lock_init(&data->lock); 626 627 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, 628 dev_name(data->sysmmu)); 629 if (ret) 630 return ret; 631 632 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); 633 if (ret) 634 return ret; 635 636 platform_set_drvdata(pdev, data); 637 638 __sysmmu_get_version(data); 639 if (PG_ENT_SHIFT < 0) { 640 if (MMU_MAJ_VER(data->version) < 5) { 641 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT; 642 LV1_PROT = SYSMMU_LV1_PROT; 643 LV2_PROT = SYSMMU_LV2_PROT; 644 } else { 645 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT; 646 LV1_PROT = SYSMMU_V5_LV1_PROT; 647 LV2_PROT = SYSMMU_V5_LV2_PROT; 648 } 649 } 650 651 /* 652 * use the first registered sysmmu device for performing 653 * dma mapping operations on iommu page tables (cpu cache flush) 654 */ 655 if (!dma_dev) 656 dma_dev = &pdev->dev; 657 658 pm_runtime_enable(dev); 659 660 return 0; 661 } 662 663 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) 664 { 665 struct sysmmu_drvdata *data = dev_get_drvdata(dev); 666 struct device *master = data->master; 667 668 if (master) { 669 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); 670 671 mutex_lock(&owner->rpm_lock); 672 if (data->domain) { 673 dev_dbg(data->sysmmu, "saving state\n"); 674 __sysmmu_disable(data); 675 } 676 mutex_unlock(&owner->rpm_lock); 677 } 678 return 0; 679 } 680 681 static int __maybe_unused exynos_sysmmu_resume(struct device *dev) 682 { 683 struct sysmmu_drvdata *data = dev_get_drvdata(dev); 684 struct device *master = data->master; 685 686 if (master) { 687 struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); 688 689 mutex_lock(&owner->rpm_lock); 690 if (data->domain) { 691 dev_dbg(data->sysmmu, "restoring state\n"); 692 __sysmmu_enable(data); 693 } 694 mutex_unlock(&owner->rpm_lock); 695 } 696 return 0; 697 } 698 699 static const struct dev_pm_ops sysmmu_pm_ops = { 700 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL) 701 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 702 pm_runtime_force_resume) 703 }; 704 705 static const struct of_device_id sysmmu_of_match[] = { 706 { .compatible = "samsung,exynos-sysmmu", }, 707 { }, 708 }; 709 710 static struct platform_driver exynos_sysmmu_driver __refdata = { 711 .probe = exynos_sysmmu_probe, 712 .driver = { 713 .name = "exynos-sysmmu", 714 .of_match_table = sysmmu_of_match, 715 .pm = &sysmmu_pm_ops, 716 .suppress_bind_attrs = true, 717 } 718 }; 719 720 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) 721 { 722 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), 723 DMA_TO_DEVICE); 724 *ent = cpu_to_le32(val); 725 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), 726 DMA_TO_DEVICE); 727 } 728 729 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) 730 { 731 struct exynos_iommu_domain *domain; 732 dma_addr_t handle; 733 int i; 734 735 /* Check if correct PTE offsets are initialized */ 736 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); 737 738 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 739 if (!domain) 740 return NULL; 741 742 if (type == IOMMU_DOMAIN_DMA) { 743 if (iommu_get_dma_cookie(&domain->domain) != 0) 744 goto err_pgtable; 745 } else if (type != IOMMU_DOMAIN_UNMANAGED) { 746 goto err_pgtable; 747 } 748 749 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); 750 if (!domain->pgtable) 751 goto err_dma_cookie; 752 753 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); 754 if (!domain->lv2entcnt) 755 goto err_counter; 756 757 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ 758 for (i = 0; i < NUM_LV1ENTRIES; i++) 759 domain->pgtable[i] = ZERO_LV2LINK; 760 761 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, 762 DMA_TO_DEVICE); 763 /* For mapping page table entries we rely on dma == phys */ 764 BUG_ON(handle != virt_to_phys(domain->pgtable)); 765 if (dma_mapping_error(dma_dev, handle)) 766 goto err_lv2ent; 767 768 spin_lock_init(&domain->lock); 769 spin_lock_init(&domain->pgtablelock); 770 INIT_LIST_HEAD(&domain->clients); 771 772 domain->domain.geometry.aperture_start = 0; 773 domain->domain.geometry.aperture_end = ~0UL; 774 domain->domain.geometry.force_aperture = true; 775 776 return &domain->domain; 777 778 err_lv2ent: 779 free_pages((unsigned long)domain->lv2entcnt, 1); 780 err_counter: 781 free_pages((unsigned long)domain->pgtable, 2); 782 err_dma_cookie: 783 if (type == IOMMU_DOMAIN_DMA) 784 iommu_put_dma_cookie(&domain->domain); 785 err_pgtable: 786 kfree(domain); 787 return NULL; 788 } 789 790 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) 791 { 792 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 793 struct sysmmu_drvdata *data, *next; 794 unsigned long flags; 795 int i; 796 797 WARN_ON(!list_empty(&domain->clients)); 798 799 spin_lock_irqsave(&domain->lock, flags); 800 801 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 802 spin_lock(&data->lock); 803 __sysmmu_disable(data); 804 data->pgtable = 0; 805 data->domain = NULL; 806 list_del_init(&data->domain_node); 807 spin_unlock(&data->lock); 808 } 809 810 spin_unlock_irqrestore(&domain->lock, flags); 811 812 if (iommu_domain->type == IOMMU_DOMAIN_DMA) 813 iommu_put_dma_cookie(iommu_domain); 814 815 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, 816 DMA_TO_DEVICE); 817 818 for (i = 0; i < NUM_LV1ENTRIES; i++) 819 if (lv1ent_page(domain->pgtable + i)) { 820 phys_addr_t base = lv2table_base(domain->pgtable + i); 821 822 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE, 823 DMA_TO_DEVICE); 824 kmem_cache_free(lv2table_kmem_cache, 825 phys_to_virt(base)); 826 } 827 828 free_pages((unsigned long)domain->pgtable, 2); 829 free_pages((unsigned long)domain->lv2entcnt, 1); 830 kfree(domain); 831 } 832 833 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, 834 struct device *dev) 835 { 836 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 837 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 838 phys_addr_t pagetable = virt_to_phys(domain->pgtable); 839 struct sysmmu_drvdata *data, *next; 840 unsigned long flags; 841 842 if (!has_sysmmu(dev) || owner->domain != iommu_domain) 843 return; 844 845 mutex_lock(&owner->rpm_lock); 846 847 list_for_each_entry(data, &owner->controllers, owner_node) { 848 pm_runtime_get_noresume(data->sysmmu); 849 if (pm_runtime_active(data->sysmmu)) 850 __sysmmu_disable(data); 851 pm_runtime_put(data->sysmmu); 852 } 853 854 spin_lock_irqsave(&domain->lock, flags); 855 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 856 spin_lock(&data->lock); 857 data->pgtable = 0; 858 data->domain = NULL; 859 list_del_init(&data->domain_node); 860 spin_unlock(&data->lock); 861 } 862 owner->domain = NULL; 863 spin_unlock_irqrestore(&domain->lock, flags); 864 865 mutex_unlock(&owner->rpm_lock); 866 867 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__, 868 &pagetable); 869 } 870 871 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, 872 struct device *dev) 873 { 874 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 875 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 876 struct sysmmu_drvdata *data; 877 phys_addr_t pagetable = virt_to_phys(domain->pgtable); 878 unsigned long flags; 879 880 if (!has_sysmmu(dev)) 881 return -ENODEV; 882 883 if (owner->domain) 884 exynos_iommu_detach_device(owner->domain, dev); 885 886 mutex_lock(&owner->rpm_lock); 887 888 spin_lock_irqsave(&domain->lock, flags); 889 list_for_each_entry(data, &owner->controllers, owner_node) { 890 spin_lock(&data->lock); 891 data->pgtable = pagetable; 892 data->domain = domain; 893 list_add_tail(&data->domain_node, &domain->clients); 894 spin_unlock(&data->lock); 895 } 896 owner->domain = iommu_domain; 897 spin_unlock_irqrestore(&domain->lock, flags); 898 899 list_for_each_entry(data, &owner->controllers, owner_node) { 900 pm_runtime_get_noresume(data->sysmmu); 901 if (pm_runtime_active(data->sysmmu)) 902 __sysmmu_enable(data); 903 pm_runtime_put(data->sysmmu); 904 } 905 906 mutex_unlock(&owner->rpm_lock); 907 908 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__, 909 &pagetable); 910 911 return 0; 912 } 913 914 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, 915 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) 916 { 917 if (lv1ent_section(sent)) { 918 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); 919 return ERR_PTR(-EADDRINUSE); 920 } 921 922 if (lv1ent_fault(sent)) { 923 dma_addr_t handle; 924 sysmmu_pte_t *pent; 925 bool need_flush_flpd_cache = lv1ent_zero(sent); 926 927 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); 928 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); 929 if (!pent) 930 return ERR_PTR(-ENOMEM); 931 932 exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); 933 kmemleak_ignore(pent); 934 *pgcounter = NUM_LV2ENTRIES; 935 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, 936 DMA_TO_DEVICE); 937 if (dma_mapping_error(dma_dev, handle)) { 938 kmem_cache_free(lv2table_kmem_cache, pent); 939 return ERR_PTR(-EADDRINUSE); 940 } 941 942 /* 943 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, 944 * FLPD cache may cache the address of zero_l2_table. This 945 * function replaces the zero_l2_table with new L2 page table 946 * to write valid mappings. 947 * Accessing the valid area may cause page fault since FLPD 948 * cache may still cache zero_l2_table for the valid area 949 * instead of new L2 page table that has the mapping 950 * information of the valid area. 951 * Thus any replacement of zero_l2_table with other valid L2 952 * page table must involve FLPD cache invalidation for System 953 * MMU v3.3. 954 * FLPD cache invalidation is performed with TLB invalidation 955 * by VPN without blocking. It is safe to invalidate TLB without 956 * blocking because the target address of TLB invalidation is 957 * not currently mapped. 958 */ 959 if (need_flush_flpd_cache) { 960 struct sysmmu_drvdata *data; 961 962 spin_lock(&domain->lock); 963 list_for_each_entry(data, &domain->clients, domain_node) 964 sysmmu_tlb_invalidate_flpdcache(data, iova); 965 spin_unlock(&domain->lock); 966 } 967 } 968 969 return page_entry(sent, iova); 970 } 971 972 static int lv1set_section(struct exynos_iommu_domain *domain, 973 sysmmu_pte_t *sent, sysmmu_iova_t iova, 974 phys_addr_t paddr, int prot, short *pgcnt) 975 { 976 if (lv1ent_section(sent)) { 977 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 978 iova); 979 return -EADDRINUSE; 980 } 981 982 if (lv1ent_page(sent)) { 983 if (*pgcnt != NUM_LV2ENTRIES) { 984 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 985 iova); 986 return -EADDRINUSE; 987 } 988 989 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); 990 *pgcnt = 0; 991 } 992 993 exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot)); 994 995 spin_lock(&domain->lock); 996 if (lv1ent_page_zero(sent)) { 997 struct sysmmu_drvdata *data; 998 /* 999 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD 1000 * entry by speculative prefetch of SLPD which has no mapping. 1001 */ 1002 list_for_each_entry(data, &domain->clients, domain_node) 1003 sysmmu_tlb_invalidate_flpdcache(data, iova); 1004 } 1005 spin_unlock(&domain->lock); 1006 1007 return 0; 1008 } 1009 1010 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, 1011 int prot, short *pgcnt) 1012 { 1013 if (size == SPAGE_SIZE) { 1014 if (WARN_ON(!lv2ent_fault(pent))) 1015 return -EADDRINUSE; 1016 1017 exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot)); 1018 *pgcnt -= 1; 1019 } else { /* size == LPAGE_SIZE */ 1020 int i; 1021 dma_addr_t pent_base = virt_to_phys(pent); 1022 1023 dma_sync_single_for_cpu(dma_dev, pent_base, 1024 sizeof(*pent) * SPAGES_PER_LPAGE, 1025 DMA_TO_DEVICE); 1026 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { 1027 if (WARN_ON(!lv2ent_fault(pent))) { 1028 if (i > 0) 1029 memset(pent - i, 0, sizeof(*pent) * i); 1030 return -EADDRINUSE; 1031 } 1032 1033 *pent = mk_lv2ent_lpage(paddr, prot); 1034 } 1035 dma_sync_single_for_device(dma_dev, pent_base, 1036 sizeof(*pent) * SPAGES_PER_LPAGE, 1037 DMA_TO_DEVICE); 1038 *pgcnt -= SPAGES_PER_LPAGE; 1039 } 1040 1041 return 0; 1042 } 1043 1044 /* 1045 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: 1046 * 1047 * System MMU v3.x has advanced logic to improve address translation 1048 * performance with caching more page table entries by a page table walk. 1049 * However, the logic has a bug that while caching faulty page table entries, 1050 * System MMU reports page fault if the cached fault entry is hit even though 1051 * the fault entry is updated to a valid entry after the entry is cached. 1052 * To prevent caching faulty page table entries which may be updated to valid 1053 * entries later, the virtual memory manager should care about the workaround 1054 * for the problem. The following describes the workaround. 1055 * 1056 * Any two consecutive I/O virtual address regions must have a hole of 128KiB 1057 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). 1058 * 1059 * Precisely, any start address of I/O virtual region must be aligned with 1060 * the following sizes for System MMU v3.1 and v3.2. 1061 * System MMU v3.1: 128KiB 1062 * System MMU v3.2: 256KiB 1063 * 1064 * Because System MMU v3.3 caches page table entries more aggressively, it needs 1065 * more workarounds. 1066 * - Any two consecutive I/O virtual regions must have a hole of size larger 1067 * than or equal to 128KiB. 1068 * - Start address of an I/O virtual region must be aligned by 128KiB. 1069 */ 1070 static int exynos_iommu_map(struct iommu_domain *iommu_domain, 1071 unsigned long l_iova, phys_addr_t paddr, size_t size, 1072 int prot, gfp_t gfp) 1073 { 1074 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1075 sysmmu_pte_t *entry; 1076 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1077 unsigned long flags; 1078 int ret = -ENOMEM; 1079 1080 BUG_ON(domain->pgtable == NULL); 1081 prot &= SYSMMU_SUPPORTED_PROT_BITS; 1082 1083 spin_lock_irqsave(&domain->pgtablelock, flags); 1084 1085 entry = section_entry(domain->pgtable, iova); 1086 1087 if (size == SECT_SIZE) { 1088 ret = lv1set_section(domain, entry, iova, paddr, prot, 1089 &domain->lv2entcnt[lv1ent_offset(iova)]); 1090 } else { 1091 sysmmu_pte_t *pent; 1092 1093 pent = alloc_lv2entry(domain, entry, iova, 1094 &domain->lv2entcnt[lv1ent_offset(iova)]); 1095 1096 if (IS_ERR(pent)) 1097 ret = PTR_ERR(pent); 1098 else 1099 ret = lv2set_page(pent, paddr, size, prot, 1100 &domain->lv2entcnt[lv1ent_offset(iova)]); 1101 } 1102 1103 if (ret) 1104 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", 1105 __func__, ret, size, iova); 1106 1107 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1108 1109 return ret; 1110 } 1111 1112 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, 1113 sysmmu_iova_t iova, size_t size) 1114 { 1115 struct sysmmu_drvdata *data; 1116 unsigned long flags; 1117 1118 spin_lock_irqsave(&domain->lock, flags); 1119 1120 list_for_each_entry(data, &domain->clients, domain_node) 1121 sysmmu_tlb_invalidate_entry(data, iova, size); 1122 1123 spin_unlock_irqrestore(&domain->lock, flags); 1124 } 1125 1126 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, 1127 unsigned long l_iova, size_t size, 1128 struct iommu_iotlb_gather *gather) 1129 { 1130 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1131 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1132 sysmmu_pte_t *ent; 1133 size_t err_pgsize; 1134 unsigned long flags; 1135 1136 BUG_ON(domain->pgtable == NULL); 1137 1138 spin_lock_irqsave(&domain->pgtablelock, flags); 1139 1140 ent = section_entry(domain->pgtable, iova); 1141 1142 if (lv1ent_section(ent)) { 1143 if (WARN_ON(size < SECT_SIZE)) { 1144 err_pgsize = SECT_SIZE; 1145 goto err; 1146 } 1147 1148 /* workaround for h/w bug in System MMU v3.3 */ 1149 exynos_iommu_set_pte(ent, ZERO_LV2LINK); 1150 size = SECT_SIZE; 1151 goto done; 1152 } 1153 1154 if (unlikely(lv1ent_fault(ent))) { 1155 if (size > SECT_SIZE) 1156 size = SECT_SIZE; 1157 goto done; 1158 } 1159 1160 /* lv1ent_page(sent) == true here */ 1161 1162 ent = page_entry(ent, iova); 1163 1164 if (unlikely(lv2ent_fault(ent))) { 1165 size = SPAGE_SIZE; 1166 goto done; 1167 } 1168 1169 if (lv2ent_small(ent)) { 1170 exynos_iommu_set_pte(ent, 0); 1171 size = SPAGE_SIZE; 1172 domain->lv2entcnt[lv1ent_offset(iova)] += 1; 1173 goto done; 1174 } 1175 1176 /* lv1ent_large(ent) == true here */ 1177 if (WARN_ON(size < LPAGE_SIZE)) { 1178 err_pgsize = LPAGE_SIZE; 1179 goto err; 1180 } 1181 1182 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), 1183 sizeof(*ent) * SPAGES_PER_LPAGE, 1184 DMA_TO_DEVICE); 1185 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); 1186 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), 1187 sizeof(*ent) * SPAGES_PER_LPAGE, 1188 DMA_TO_DEVICE); 1189 size = LPAGE_SIZE; 1190 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; 1191 done: 1192 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1193 1194 exynos_iommu_tlb_invalidate_entry(domain, iova, size); 1195 1196 return size; 1197 err: 1198 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1199 1200 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", 1201 __func__, size, iova, err_pgsize); 1202 1203 return 0; 1204 } 1205 1206 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, 1207 dma_addr_t iova) 1208 { 1209 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1210 sysmmu_pte_t *entry; 1211 unsigned long flags; 1212 phys_addr_t phys = 0; 1213 1214 spin_lock_irqsave(&domain->pgtablelock, flags); 1215 1216 entry = section_entry(domain->pgtable, iova); 1217 1218 if (lv1ent_section(entry)) { 1219 phys = section_phys(entry) + section_offs(iova); 1220 } else if (lv1ent_page(entry)) { 1221 entry = page_entry(entry, iova); 1222 1223 if (lv2ent_large(entry)) 1224 phys = lpage_phys(entry) + lpage_offs(iova); 1225 else if (lv2ent_small(entry)) 1226 phys = spage_phys(entry) + spage_offs(iova); 1227 } 1228 1229 spin_unlock_irqrestore(&domain->pgtablelock, flags); 1230 1231 return phys; 1232 } 1233 1234 static struct iommu_device *exynos_iommu_probe_device(struct device *dev) 1235 { 1236 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1237 struct sysmmu_drvdata *data; 1238 1239 if (!has_sysmmu(dev)) 1240 return ERR_PTR(-ENODEV); 1241 1242 list_for_each_entry(data, &owner->controllers, owner_node) { 1243 /* 1244 * SYSMMU will be runtime activated via device link 1245 * (dependency) to its master device, so there are no 1246 * direct calls to pm_runtime_get/put in this driver. 1247 */ 1248 data->link = device_link_add(dev, data->sysmmu, 1249 DL_FLAG_STATELESS | 1250 DL_FLAG_PM_RUNTIME); 1251 } 1252 1253 /* There is always at least one entry, see exynos_iommu_of_xlate() */ 1254 data = list_first_entry(&owner->controllers, 1255 struct sysmmu_drvdata, owner_node); 1256 1257 return &data->iommu; 1258 } 1259 1260 static void exynos_iommu_release_device(struct device *dev) 1261 { 1262 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1263 struct sysmmu_drvdata *data; 1264 1265 if (!has_sysmmu(dev)) 1266 return; 1267 1268 if (owner->domain) { 1269 struct iommu_group *group = iommu_group_get(dev); 1270 1271 if (group) { 1272 WARN_ON(owner->domain != 1273 iommu_group_default_domain(group)); 1274 exynos_iommu_detach_device(owner->domain, dev); 1275 iommu_group_put(group); 1276 } 1277 } 1278 1279 list_for_each_entry(data, &owner->controllers, owner_node) 1280 device_link_del(data->link); 1281 } 1282 1283 static int exynos_iommu_of_xlate(struct device *dev, 1284 struct of_phandle_args *spec) 1285 { 1286 struct platform_device *sysmmu = of_find_device_by_node(spec->np); 1287 struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); 1288 struct sysmmu_drvdata *data, *entry; 1289 1290 if (!sysmmu) 1291 return -ENODEV; 1292 1293 data = platform_get_drvdata(sysmmu); 1294 if (!data) { 1295 put_device(&sysmmu->dev); 1296 return -ENODEV; 1297 } 1298 1299 if (!owner) { 1300 owner = kzalloc(sizeof(*owner), GFP_KERNEL); 1301 if (!owner) { 1302 put_device(&sysmmu->dev); 1303 return -ENOMEM; 1304 } 1305 1306 INIT_LIST_HEAD(&owner->controllers); 1307 mutex_init(&owner->rpm_lock); 1308 dev_iommu_priv_set(dev, owner); 1309 } 1310 1311 list_for_each_entry(entry, &owner->controllers, owner_node) 1312 if (entry == data) 1313 return 0; 1314 1315 list_add_tail(&data->owner_node, &owner->controllers); 1316 data->master = dev; 1317 1318 return 0; 1319 } 1320 1321 static const struct iommu_ops exynos_iommu_ops = { 1322 .domain_alloc = exynos_iommu_domain_alloc, 1323 .domain_free = exynos_iommu_domain_free, 1324 .attach_dev = exynos_iommu_attach_device, 1325 .detach_dev = exynos_iommu_detach_device, 1326 .map = exynos_iommu_map, 1327 .unmap = exynos_iommu_unmap, 1328 .iova_to_phys = exynos_iommu_iova_to_phys, 1329 .device_group = generic_device_group, 1330 .probe_device = exynos_iommu_probe_device, 1331 .release_device = exynos_iommu_release_device, 1332 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, 1333 .of_xlate = exynos_iommu_of_xlate, 1334 }; 1335 1336 static int __init exynos_iommu_init(void) 1337 { 1338 struct device_node *np; 1339 int ret; 1340 1341 np = of_find_matching_node(NULL, sysmmu_of_match); 1342 if (!np) 1343 return 0; 1344 1345 of_node_put(np); 1346 1347 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", 1348 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); 1349 if (!lv2table_kmem_cache) { 1350 pr_err("%s: Failed to create kmem cache\n", __func__); 1351 return -ENOMEM; 1352 } 1353 1354 ret = platform_driver_register(&exynos_sysmmu_driver); 1355 if (ret) { 1356 pr_err("%s: Failed to register driver\n", __func__); 1357 goto err_reg_driver; 1358 } 1359 1360 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); 1361 if (zero_lv2_table == NULL) { 1362 pr_err("%s: Failed to allocate zero level2 page table\n", 1363 __func__); 1364 ret = -ENOMEM; 1365 goto err_zero_lv2; 1366 } 1367 1368 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); 1369 if (ret) { 1370 pr_err("%s: Failed to register exynos-iommu driver.\n", 1371 __func__); 1372 goto err_set_iommu; 1373 } 1374 1375 return 0; 1376 err_set_iommu: 1377 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); 1378 err_zero_lv2: 1379 platform_driver_unregister(&exynos_sysmmu_driver); 1380 err_reg_driver: 1381 kmem_cache_destroy(lv2table_kmem_cache); 1382 return ret; 1383 } 1384 core_initcall(exynos_iommu_init); 1385