1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IOMMU API for Rockchip 4 * 5 * Module Authors: Simon Xue <xxm@rock-chips.com> 6 * Daniel Kurtz <djkurtz@chromium.org> 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/compiler.h> 11 #include <linux/delay.h> 12 #include <linux/device.h> 13 #include <linux/dma-iommu.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/errno.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/iommu.h> 19 #include <linux/iopoll.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/init.h> 23 #include <linux/of.h> 24 #include <linux/of_iommu.h> 25 #include <linux/of_platform.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/slab.h> 29 #include <linux/spinlock.h> 30 31 /** MMU register offsets */ 32 #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ 33 #define RK_MMU_STATUS 0x04 34 #define RK_MMU_COMMAND 0x08 35 #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ 36 #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ 37 #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ 38 #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ 39 #define RK_MMU_INT_MASK 0x1C /* IRQ enable */ 40 #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ 41 #define RK_MMU_AUTO_GATING 0x24 42 43 #define DTE_ADDR_DUMMY 0xCAFEBABE 44 45 #define RK_MMU_POLL_PERIOD_US 100 46 #define RK_MMU_FORCE_RESET_TIMEOUT_US 100000 47 #define RK_MMU_POLL_TIMEOUT_US 1000 48 49 /* RK_MMU_STATUS fields */ 50 #define RK_MMU_STATUS_PAGING_ENABLED BIT(0) 51 #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) 52 #define RK_MMU_STATUS_STALL_ACTIVE BIT(2) 53 #define RK_MMU_STATUS_IDLE BIT(3) 54 #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) 55 #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) 56 #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) 57 58 /* RK_MMU_COMMAND command values */ 59 #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ 60 #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ 61 #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ 62 #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ 63 #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ 64 #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ 65 #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ 66 67 /* RK_MMU_INT_* register fields */ 68 #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ 69 #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ 70 #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) 71 72 #define NUM_DT_ENTRIES 1024 73 #define NUM_PT_ENTRIES 1024 74 75 #define SPAGE_ORDER 12 76 #define SPAGE_SIZE (1 << SPAGE_ORDER) 77 78 /* 79 * Support mapping any size that fits in one page table: 80 * 4 KiB to 4 MiB 81 */ 82 #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 83 84 struct rk_iommu_domain { 85 struct list_head iommus; 86 u32 *dt; /* page directory table */ 87 dma_addr_t dt_dma; 88 spinlock_t iommus_lock; /* lock for iommus list */ 89 spinlock_t dt_lock; /* lock for modifying page directory table */ 90 91 struct iommu_domain domain; 92 }; 93 94 /* list of clocks required by IOMMU */ 95 static const char * const rk_iommu_clocks[] = { 96 "aclk", "iface", 97 }; 98 99 struct rk_iommu { 100 struct device *dev; 101 void __iomem **bases; 102 int num_mmu; 103 struct clk_bulk_data *clocks; 104 int num_clocks; 105 bool reset_disabled; 106 struct iommu_device iommu; 107 struct list_head node; /* entry in rk_iommu_domain.iommus */ 108 struct iommu_domain *domain; /* domain to which iommu is attached */ 109 struct iommu_group *group; 110 }; 111 112 struct rk_iommudata { 113 struct device_link *link; /* runtime PM link from IOMMU to master */ 114 struct rk_iommu *iommu; 115 }; 116 117 static struct device *dma_dev; 118 119 static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, 120 unsigned int count) 121 { 122 size_t size = count * sizeof(u32); /* count of u32 entry */ 123 124 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE); 125 } 126 127 static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) 128 { 129 return container_of(dom, struct rk_iommu_domain, domain); 130 } 131 132 /* 133 * The Rockchip rk3288 iommu uses a 2-level page table. 134 * The first level is the "Directory Table" (DT). 135 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing 136 * to a "Page Table". 137 * The second level is the 1024 Page Tables (PT). 138 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to 139 * a 4 KB page of physical memory. 140 * 141 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). 142 * Each iommu device has a MMU_DTE_ADDR register that contains the physical 143 * address of the start of the DT page. 144 * 145 * The structure of the page table is as follows: 146 * 147 * DT 148 * MMU_DTE_ADDR -> +-----+ 149 * | | 150 * +-----+ PT 151 * | DTE | -> +-----+ 152 * +-----+ | | Memory 153 * | | +-----+ Page 154 * | | | PTE | -> +-----+ 155 * +-----+ +-----+ | | 156 * | | | | 157 * | | | | 158 * +-----+ | | 159 * | | 160 * | | 161 * +-----+ 162 */ 163 164 /* 165 * Each DTE has a PT address and a valid bit: 166 * +---------------------+-----------+-+ 167 * | PT address | Reserved |V| 168 * +---------------------+-----------+-+ 169 * 31:12 - PT address (PTs always starts on a 4 KB boundary) 170 * 11: 1 - Reserved 171 * 0 - 1 if PT @ PT address is valid 172 */ 173 #define RK_DTE_PT_ADDRESS_MASK 0xfffff000 174 #define RK_DTE_PT_VALID BIT(0) 175 176 static inline phys_addr_t rk_dte_pt_address(u32 dte) 177 { 178 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; 179 } 180 181 static inline bool rk_dte_is_pt_valid(u32 dte) 182 { 183 return dte & RK_DTE_PT_VALID; 184 } 185 186 static inline u32 rk_mk_dte(dma_addr_t pt_dma) 187 { 188 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; 189 } 190 191 /* 192 * Each PTE has a Page address, some flags and a valid bit: 193 * +---------------------+---+-------+-+ 194 * | Page address |Rsv| Flags |V| 195 * +---------------------+---+-------+-+ 196 * 31:12 - Page address (Pages always start on a 4 KB boundary) 197 * 11: 9 - Reserved 198 * 8: 1 - Flags 199 * 8 - Read allocate - allocate cache space on read misses 200 * 7 - Read cache - enable cache & prefetch of data 201 * 6 - Write buffer - enable delaying writes on their way to memory 202 * 5 - Write allocate - allocate cache space on write misses 203 * 4 - Write cache - different writes can be merged together 204 * 3 - Override cache attributes 205 * if 1, bits 4-8 control cache attributes 206 * if 0, the system bus defaults are used 207 * 2 - Writable 208 * 1 - Readable 209 * 0 - 1 if Page @ Page address is valid 210 */ 211 #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 212 #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe 213 #define RK_PTE_PAGE_WRITABLE BIT(2) 214 #define RK_PTE_PAGE_READABLE BIT(1) 215 #define RK_PTE_PAGE_VALID BIT(0) 216 217 static inline phys_addr_t rk_pte_page_address(u32 pte) 218 { 219 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; 220 } 221 222 static inline bool rk_pte_is_page_valid(u32 pte) 223 { 224 return pte & RK_PTE_PAGE_VALID; 225 } 226 227 /* TODO: set cache flags per prot IOMMU_CACHE */ 228 static u32 rk_mk_pte(phys_addr_t page, int prot) 229 { 230 u32 flags = 0; 231 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; 232 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; 233 page &= RK_PTE_PAGE_ADDRESS_MASK; 234 return page | flags | RK_PTE_PAGE_VALID; 235 } 236 237 static u32 rk_mk_pte_invalid(u32 pte) 238 { 239 return pte & ~RK_PTE_PAGE_VALID; 240 } 241 242 /* 243 * rk3288 iova (IOMMU Virtual Address) format 244 * 31 22.21 12.11 0 245 * +-----------+-----------+-------------+ 246 * | DTE index | PTE index | Page offset | 247 * +-----------+-----------+-------------+ 248 * 31:22 - DTE index - index of DTE in DT 249 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address 250 * 11: 0 - Page offset - offset into page @ PTE.page_address 251 */ 252 #define RK_IOVA_DTE_MASK 0xffc00000 253 #define RK_IOVA_DTE_SHIFT 22 254 #define RK_IOVA_PTE_MASK 0x003ff000 255 #define RK_IOVA_PTE_SHIFT 12 256 #define RK_IOVA_PAGE_MASK 0x00000fff 257 #define RK_IOVA_PAGE_SHIFT 0 258 259 static u32 rk_iova_dte_index(dma_addr_t iova) 260 { 261 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; 262 } 263 264 static u32 rk_iova_pte_index(dma_addr_t iova) 265 { 266 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; 267 } 268 269 static u32 rk_iova_page_offset(dma_addr_t iova) 270 { 271 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; 272 } 273 274 static u32 rk_iommu_read(void __iomem *base, u32 offset) 275 { 276 return readl(base + offset); 277 } 278 279 static void rk_iommu_write(void __iomem *base, u32 offset, u32 value) 280 { 281 writel(value, base + offset); 282 } 283 284 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) 285 { 286 int i; 287 288 for (i = 0; i < iommu->num_mmu; i++) 289 writel(command, iommu->bases[i] + RK_MMU_COMMAND); 290 } 291 292 static void rk_iommu_base_command(void __iomem *base, u32 command) 293 { 294 writel(command, base + RK_MMU_COMMAND); 295 } 296 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, 297 size_t size) 298 { 299 int i; 300 dma_addr_t iova_end = iova_start + size; 301 /* 302 * TODO(djkurtz): Figure out when it is more efficient to shootdown the 303 * entire iotlb rather than iterate over individual iovas. 304 */ 305 for (i = 0; i < iommu->num_mmu; i++) { 306 dma_addr_t iova; 307 308 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) 309 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); 310 } 311 } 312 313 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) 314 { 315 bool active = true; 316 int i; 317 318 for (i = 0; i < iommu->num_mmu; i++) 319 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 320 RK_MMU_STATUS_STALL_ACTIVE); 321 322 return active; 323 } 324 325 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) 326 { 327 bool enable = true; 328 int i; 329 330 for (i = 0; i < iommu->num_mmu; i++) 331 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 332 RK_MMU_STATUS_PAGING_ENABLED); 333 334 return enable; 335 } 336 337 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) 338 { 339 bool done = true; 340 int i; 341 342 for (i = 0; i < iommu->num_mmu; i++) 343 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; 344 345 return done; 346 } 347 348 static int rk_iommu_enable_stall(struct rk_iommu *iommu) 349 { 350 int ret, i; 351 bool val; 352 353 if (rk_iommu_is_stall_active(iommu)) 354 return 0; 355 356 /* Stall can only be enabled if paging is enabled */ 357 if (!rk_iommu_is_paging_enabled(iommu)) 358 return 0; 359 360 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); 361 362 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, 363 val, RK_MMU_POLL_PERIOD_US, 364 RK_MMU_POLL_TIMEOUT_US); 365 if (ret) 366 for (i = 0; i < iommu->num_mmu; i++) 367 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", 368 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 369 370 return ret; 371 } 372 373 static int rk_iommu_disable_stall(struct rk_iommu *iommu) 374 { 375 int ret, i; 376 bool val; 377 378 if (!rk_iommu_is_stall_active(iommu)) 379 return 0; 380 381 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); 382 383 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, 384 !val, RK_MMU_POLL_PERIOD_US, 385 RK_MMU_POLL_TIMEOUT_US); 386 if (ret) 387 for (i = 0; i < iommu->num_mmu; i++) 388 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", 389 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 390 391 return ret; 392 } 393 394 static int rk_iommu_enable_paging(struct rk_iommu *iommu) 395 { 396 int ret, i; 397 bool val; 398 399 if (rk_iommu_is_paging_enabled(iommu)) 400 return 0; 401 402 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); 403 404 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, 405 val, RK_MMU_POLL_PERIOD_US, 406 RK_MMU_POLL_TIMEOUT_US); 407 if (ret) 408 for (i = 0; i < iommu->num_mmu; i++) 409 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", 410 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 411 412 return ret; 413 } 414 415 static int rk_iommu_disable_paging(struct rk_iommu *iommu) 416 { 417 int ret, i; 418 bool val; 419 420 if (!rk_iommu_is_paging_enabled(iommu)) 421 return 0; 422 423 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); 424 425 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, 426 !val, RK_MMU_POLL_PERIOD_US, 427 RK_MMU_POLL_TIMEOUT_US); 428 if (ret) 429 for (i = 0; i < iommu->num_mmu; i++) 430 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", 431 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); 432 433 return ret; 434 } 435 436 static int rk_iommu_force_reset(struct rk_iommu *iommu) 437 { 438 int ret, i; 439 u32 dte_addr; 440 bool val; 441 442 if (iommu->reset_disabled) 443 return 0; 444 445 /* 446 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY 447 * and verifying that upper 5 nybbles are read back. 448 */ 449 for (i = 0; i < iommu->num_mmu; i++) { 450 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); 451 452 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR); 453 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { 454 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); 455 return -EFAULT; 456 } 457 } 458 459 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); 460 461 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, 462 val, RK_MMU_FORCE_RESET_TIMEOUT_US, 463 RK_MMU_POLL_TIMEOUT_US); 464 if (ret) { 465 dev_err(iommu->dev, "FORCE_RESET command timed out\n"); 466 return ret; 467 } 468 469 return 0; 470 } 471 472 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) 473 { 474 void __iomem *base = iommu->bases[index]; 475 u32 dte_index, pte_index, page_offset; 476 u32 mmu_dte_addr; 477 phys_addr_t mmu_dte_addr_phys, dte_addr_phys; 478 u32 *dte_addr; 479 u32 dte; 480 phys_addr_t pte_addr_phys = 0; 481 u32 *pte_addr = NULL; 482 u32 pte = 0; 483 phys_addr_t page_addr_phys = 0; 484 u32 page_flags = 0; 485 486 dte_index = rk_iova_dte_index(iova); 487 pte_index = rk_iova_pte_index(iova); 488 page_offset = rk_iova_page_offset(iova); 489 490 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); 491 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; 492 493 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); 494 dte_addr = phys_to_virt(dte_addr_phys); 495 dte = *dte_addr; 496 497 if (!rk_dte_is_pt_valid(dte)) 498 goto print_it; 499 500 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); 501 pte_addr = phys_to_virt(pte_addr_phys); 502 pte = *pte_addr; 503 504 if (!rk_pte_is_page_valid(pte)) 505 goto print_it; 506 507 page_addr_phys = rk_pte_page_address(pte) + page_offset; 508 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; 509 510 print_it: 511 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", 512 &iova, dte_index, pte_index, page_offset); 513 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", 514 &mmu_dte_addr_phys, &dte_addr_phys, dte, 515 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, 516 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); 517 } 518 519 static irqreturn_t rk_iommu_irq(int irq, void *dev_id) 520 { 521 struct rk_iommu *iommu = dev_id; 522 u32 status; 523 u32 int_status; 524 dma_addr_t iova; 525 irqreturn_t ret = IRQ_NONE; 526 int i, err; 527 528 err = pm_runtime_get_if_in_use(iommu->dev); 529 if (WARN_ON_ONCE(err <= 0)) 530 return ret; 531 532 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) 533 goto out; 534 535 for (i = 0; i < iommu->num_mmu; i++) { 536 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); 537 if (int_status == 0) 538 continue; 539 540 ret = IRQ_HANDLED; 541 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); 542 543 if (int_status & RK_MMU_IRQ_PAGE_FAULT) { 544 int flags; 545 546 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); 547 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? 548 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; 549 550 dev_err(iommu->dev, "Page fault at %pad of type %s\n", 551 &iova, 552 (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); 553 554 log_iova(iommu, i, iova); 555 556 /* 557 * Report page fault to any installed handlers. 558 * Ignore the return code, though, since we always zap cache 559 * and clear the page fault anyway. 560 */ 561 if (iommu->domain) 562 report_iommu_fault(iommu->domain, iommu->dev, iova, 563 flags); 564 else 565 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); 566 567 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 568 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); 569 } 570 571 if (int_status & RK_MMU_IRQ_BUS_ERROR) 572 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); 573 574 if (int_status & ~RK_MMU_IRQ_MASK) 575 dev_err(iommu->dev, "unexpected int_status: %#08x\n", 576 int_status); 577 578 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); 579 } 580 581 clk_bulk_disable(iommu->num_clocks, iommu->clocks); 582 583 out: 584 pm_runtime_put(iommu->dev); 585 return ret; 586 } 587 588 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, 589 dma_addr_t iova) 590 { 591 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 592 unsigned long flags; 593 phys_addr_t pt_phys, phys = 0; 594 u32 dte, pte; 595 u32 *page_table; 596 597 spin_lock_irqsave(&rk_domain->dt_lock, flags); 598 599 dte = rk_domain->dt[rk_iova_dte_index(iova)]; 600 if (!rk_dte_is_pt_valid(dte)) 601 goto out; 602 603 pt_phys = rk_dte_pt_address(dte); 604 page_table = (u32 *)phys_to_virt(pt_phys); 605 pte = page_table[rk_iova_pte_index(iova)]; 606 if (!rk_pte_is_page_valid(pte)) 607 goto out; 608 609 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); 610 out: 611 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 612 613 return phys; 614 } 615 616 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, 617 dma_addr_t iova, size_t size) 618 { 619 struct list_head *pos; 620 unsigned long flags; 621 622 /* shootdown these iova from all iommus using this domain */ 623 spin_lock_irqsave(&rk_domain->iommus_lock, flags); 624 list_for_each(pos, &rk_domain->iommus) { 625 struct rk_iommu *iommu; 626 int ret; 627 628 iommu = list_entry(pos, struct rk_iommu, node); 629 630 /* Only zap TLBs of IOMMUs that are powered on. */ 631 ret = pm_runtime_get_if_in_use(iommu->dev); 632 if (WARN_ON_ONCE(ret < 0)) 633 continue; 634 if (ret) { 635 WARN_ON(clk_bulk_enable(iommu->num_clocks, 636 iommu->clocks)); 637 rk_iommu_zap_lines(iommu, iova, size); 638 clk_bulk_disable(iommu->num_clocks, iommu->clocks); 639 pm_runtime_put(iommu->dev); 640 } 641 } 642 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 643 } 644 645 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, 646 dma_addr_t iova, size_t size) 647 { 648 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); 649 if (size > SPAGE_SIZE) 650 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, 651 SPAGE_SIZE); 652 } 653 654 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, 655 dma_addr_t iova) 656 { 657 u32 *page_table, *dte_addr; 658 u32 dte_index, dte; 659 phys_addr_t pt_phys; 660 dma_addr_t pt_dma; 661 662 assert_spin_locked(&rk_domain->dt_lock); 663 664 dte_index = rk_iova_dte_index(iova); 665 dte_addr = &rk_domain->dt[dte_index]; 666 dte = *dte_addr; 667 if (rk_dte_is_pt_valid(dte)) 668 goto done; 669 670 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); 671 if (!page_table) 672 return ERR_PTR(-ENOMEM); 673 674 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); 675 if (dma_mapping_error(dma_dev, pt_dma)) { 676 dev_err(dma_dev, "DMA mapping error while allocating page table\n"); 677 free_page((unsigned long)page_table); 678 return ERR_PTR(-ENOMEM); 679 } 680 681 dte = rk_mk_dte(pt_dma); 682 *dte_addr = dte; 683 684 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); 685 rk_table_flush(rk_domain, 686 rk_domain->dt_dma + dte_index * sizeof(u32), 1); 687 done: 688 pt_phys = rk_dte_pt_address(dte); 689 return (u32 *)phys_to_virt(pt_phys); 690 } 691 692 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, 693 u32 *pte_addr, dma_addr_t pte_dma, 694 size_t size) 695 { 696 unsigned int pte_count; 697 unsigned int pte_total = size / SPAGE_SIZE; 698 699 assert_spin_locked(&rk_domain->dt_lock); 700 701 for (pte_count = 0; pte_count < pte_total; pte_count++) { 702 u32 pte = pte_addr[pte_count]; 703 if (!rk_pte_is_page_valid(pte)) 704 break; 705 706 pte_addr[pte_count] = rk_mk_pte_invalid(pte); 707 } 708 709 rk_table_flush(rk_domain, pte_dma, pte_count); 710 711 return pte_count * SPAGE_SIZE; 712 } 713 714 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, 715 dma_addr_t pte_dma, dma_addr_t iova, 716 phys_addr_t paddr, size_t size, int prot) 717 { 718 unsigned int pte_count; 719 unsigned int pte_total = size / SPAGE_SIZE; 720 phys_addr_t page_phys; 721 722 assert_spin_locked(&rk_domain->dt_lock); 723 724 for (pte_count = 0; pte_count < pte_total; pte_count++) { 725 u32 pte = pte_addr[pte_count]; 726 727 if (rk_pte_is_page_valid(pte)) 728 goto unwind; 729 730 pte_addr[pte_count] = rk_mk_pte(paddr, prot); 731 732 paddr += SPAGE_SIZE; 733 } 734 735 rk_table_flush(rk_domain, pte_dma, pte_total); 736 737 /* 738 * Zap the first and last iova to evict from iotlb any previously 739 * mapped cachelines holding stale values for its dte and pte. 740 * We only zap the first and last iova, since only they could have 741 * dte or pte shared with an existing mapping. 742 */ 743 rk_iommu_zap_iova_first_last(rk_domain, iova, size); 744 745 return 0; 746 unwind: 747 /* Unmap the range of iovas that we just mapped */ 748 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, 749 pte_count * SPAGE_SIZE); 750 751 iova += pte_count * SPAGE_SIZE; 752 page_phys = rk_pte_page_address(pte_addr[pte_count]); 753 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", 754 &iova, &page_phys, &paddr, prot); 755 756 return -EADDRINUSE; 757 } 758 759 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, 760 phys_addr_t paddr, size_t size, int prot) 761 { 762 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 763 unsigned long flags; 764 dma_addr_t pte_dma, iova = (dma_addr_t)_iova; 765 u32 *page_table, *pte_addr; 766 u32 dte_index, pte_index; 767 int ret; 768 769 spin_lock_irqsave(&rk_domain->dt_lock, flags); 770 771 /* 772 * pgsize_bitmap specifies iova sizes that fit in one page table 773 * (1024 4-KiB pages = 4 MiB). 774 * So, size will always be 4096 <= size <= 4194304. 775 * Since iommu_map() guarantees that both iova and size will be 776 * aligned, we will always only be mapping from a single dte here. 777 */ 778 page_table = rk_dte_get_page_table(rk_domain, iova); 779 if (IS_ERR(page_table)) { 780 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 781 return PTR_ERR(page_table); 782 } 783 784 dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; 785 pte_index = rk_iova_pte_index(iova); 786 pte_addr = &page_table[pte_index]; 787 pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32); 788 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, 789 paddr, size, prot); 790 791 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 792 793 return ret; 794 } 795 796 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, 797 size_t size, struct iommu_iotlb_gather *gather) 798 { 799 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 800 unsigned long flags; 801 dma_addr_t pte_dma, iova = (dma_addr_t)_iova; 802 phys_addr_t pt_phys; 803 u32 dte; 804 u32 *pte_addr; 805 size_t unmap_size; 806 807 spin_lock_irqsave(&rk_domain->dt_lock, flags); 808 809 /* 810 * pgsize_bitmap specifies iova sizes that fit in one page table 811 * (1024 4-KiB pages = 4 MiB). 812 * So, size will always be 4096 <= size <= 4194304. 813 * Since iommu_unmap() guarantees that both iova and size will be 814 * aligned, we will always only be unmapping from a single dte here. 815 */ 816 dte = rk_domain->dt[rk_iova_dte_index(iova)]; 817 /* Just return 0 if iova is unmapped */ 818 if (!rk_dte_is_pt_valid(dte)) { 819 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 820 return 0; 821 } 822 823 pt_phys = rk_dte_pt_address(dte); 824 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); 825 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); 826 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); 827 828 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); 829 830 /* Shootdown iotlb entries for iova range that was just unmapped */ 831 rk_iommu_zap_iova(rk_domain, iova, unmap_size); 832 833 return unmap_size; 834 } 835 836 static struct rk_iommu *rk_iommu_from_dev(struct device *dev) 837 { 838 struct rk_iommudata *data = dev->archdata.iommu; 839 840 return data ? data->iommu : NULL; 841 } 842 843 /* Must be called with iommu powered on and attached */ 844 static void rk_iommu_disable(struct rk_iommu *iommu) 845 { 846 int i; 847 848 /* Ignore error while disabling, just keep going */ 849 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); 850 rk_iommu_enable_stall(iommu); 851 rk_iommu_disable_paging(iommu); 852 for (i = 0; i < iommu->num_mmu; i++) { 853 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); 854 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); 855 } 856 rk_iommu_disable_stall(iommu); 857 clk_bulk_disable(iommu->num_clocks, iommu->clocks); 858 } 859 860 /* Must be called with iommu powered on and attached */ 861 static int rk_iommu_enable(struct rk_iommu *iommu) 862 { 863 struct iommu_domain *domain = iommu->domain; 864 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 865 int ret, i; 866 867 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); 868 if (ret) 869 return ret; 870 871 ret = rk_iommu_enable_stall(iommu); 872 if (ret) 873 goto out_disable_clocks; 874 875 ret = rk_iommu_force_reset(iommu); 876 if (ret) 877 goto out_disable_stall; 878 879 for (i = 0; i < iommu->num_mmu; i++) { 880 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 881 rk_domain->dt_dma); 882 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 883 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); 884 } 885 886 ret = rk_iommu_enable_paging(iommu); 887 888 out_disable_stall: 889 rk_iommu_disable_stall(iommu); 890 out_disable_clocks: 891 clk_bulk_disable(iommu->num_clocks, iommu->clocks); 892 return ret; 893 } 894 895 static void rk_iommu_detach_device(struct iommu_domain *domain, 896 struct device *dev) 897 { 898 struct rk_iommu *iommu; 899 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 900 unsigned long flags; 901 int ret; 902 903 /* Allow 'virtual devices' (eg drm) to detach from domain */ 904 iommu = rk_iommu_from_dev(dev); 905 if (!iommu) 906 return; 907 908 dev_dbg(dev, "Detaching from iommu domain\n"); 909 910 /* iommu already detached */ 911 if (iommu->domain != domain) 912 return; 913 914 iommu->domain = NULL; 915 916 spin_lock_irqsave(&rk_domain->iommus_lock, flags); 917 list_del_init(&iommu->node); 918 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 919 920 ret = pm_runtime_get_if_in_use(iommu->dev); 921 WARN_ON_ONCE(ret < 0); 922 if (ret > 0) { 923 rk_iommu_disable(iommu); 924 pm_runtime_put(iommu->dev); 925 } 926 } 927 928 static int rk_iommu_attach_device(struct iommu_domain *domain, 929 struct device *dev) 930 { 931 struct rk_iommu *iommu; 932 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 933 unsigned long flags; 934 int ret; 935 936 /* 937 * Allow 'virtual devices' (e.g., drm) to attach to domain. 938 * Such a device does not belong to an iommu group. 939 */ 940 iommu = rk_iommu_from_dev(dev); 941 if (!iommu) 942 return 0; 943 944 dev_dbg(dev, "Attaching to iommu domain\n"); 945 946 /* iommu already attached */ 947 if (iommu->domain == domain) 948 return 0; 949 950 if (iommu->domain) 951 rk_iommu_detach_device(iommu->domain, dev); 952 953 iommu->domain = domain; 954 955 spin_lock_irqsave(&rk_domain->iommus_lock, flags); 956 list_add_tail(&iommu->node, &rk_domain->iommus); 957 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); 958 959 ret = pm_runtime_get_if_in_use(iommu->dev); 960 if (!ret || WARN_ON_ONCE(ret < 0)) 961 return 0; 962 963 ret = rk_iommu_enable(iommu); 964 if (ret) 965 rk_iommu_detach_device(iommu->domain, dev); 966 967 pm_runtime_put(iommu->dev); 968 969 return ret; 970 } 971 972 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) 973 { 974 struct rk_iommu_domain *rk_domain; 975 976 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) 977 return NULL; 978 979 if (!dma_dev) 980 return NULL; 981 982 rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL); 983 if (!rk_domain) 984 return NULL; 985 986 if (type == IOMMU_DOMAIN_DMA && 987 iommu_get_dma_cookie(&rk_domain->domain)) 988 return NULL; 989 990 /* 991 * rk32xx iommus use a 2 level pagetable. 992 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. 993 * Allocate one 4 KiB page for each table. 994 */ 995 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); 996 if (!rk_domain->dt) 997 goto err_put_cookie; 998 999 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, 1000 SPAGE_SIZE, DMA_TO_DEVICE); 1001 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { 1002 dev_err(dma_dev, "DMA map error for DT\n"); 1003 goto err_free_dt; 1004 } 1005 1006 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); 1007 1008 spin_lock_init(&rk_domain->iommus_lock); 1009 spin_lock_init(&rk_domain->dt_lock); 1010 INIT_LIST_HEAD(&rk_domain->iommus); 1011 1012 rk_domain->domain.geometry.aperture_start = 0; 1013 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); 1014 rk_domain->domain.geometry.force_aperture = true; 1015 1016 return &rk_domain->domain; 1017 1018 err_free_dt: 1019 free_page((unsigned long)rk_domain->dt); 1020 err_put_cookie: 1021 if (type == IOMMU_DOMAIN_DMA) 1022 iommu_put_dma_cookie(&rk_domain->domain); 1023 1024 return NULL; 1025 } 1026 1027 static void rk_iommu_domain_free(struct iommu_domain *domain) 1028 { 1029 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); 1030 int i; 1031 1032 WARN_ON(!list_empty(&rk_domain->iommus)); 1033 1034 for (i = 0; i < NUM_DT_ENTRIES; i++) { 1035 u32 dte = rk_domain->dt[i]; 1036 if (rk_dte_is_pt_valid(dte)) { 1037 phys_addr_t pt_phys = rk_dte_pt_address(dte); 1038 u32 *page_table = phys_to_virt(pt_phys); 1039 dma_unmap_single(dma_dev, pt_phys, 1040 SPAGE_SIZE, DMA_TO_DEVICE); 1041 free_page((unsigned long)page_table); 1042 } 1043 } 1044 1045 dma_unmap_single(dma_dev, rk_domain->dt_dma, 1046 SPAGE_SIZE, DMA_TO_DEVICE); 1047 free_page((unsigned long)rk_domain->dt); 1048 1049 if (domain->type == IOMMU_DOMAIN_DMA) 1050 iommu_put_dma_cookie(&rk_domain->domain); 1051 } 1052 1053 static int rk_iommu_add_device(struct device *dev) 1054 { 1055 struct iommu_group *group; 1056 struct rk_iommu *iommu; 1057 struct rk_iommudata *data; 1058 1059 data = dev->archdata.iommu; 1060 if (!data) 1061 return -ENODEV; 1062 1063 iommu = rk_iommu_from_dev(dev); 1064 1065 group = iommu_group_get_for_dev(dev); 1066 if (IS_ERR(group)) 1067 return PTR_ERR(group); 1068 iommu_group_put(group); 1069 1070 iommu_device_link(&iommu->iommu, dev); 1071 data->link = device_link_add(dev, iommu->dev, 1072 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); 1073 1074 return 0; 1075 } 1076 1077 static void rk_iommu_remove_device(struct device *dev) 1078 { 1079 struct rk_iommu *iommu; 1080 struct rk_iommudata *data = dev->archdata.iommu; 1081 1082 iommu = rk_iommu_from_dev(dev); 1083 1084 device_link_del(data->link); 1085 iommu_device_unlink(&iommu->iommu, dev); 1086 iommu_group_remove_device(dev); 1087 } 1088 1089 static struct iommu_group *rk_iommu_device_group(struct device *dev) 1090 { 1091 struct rk_iommu *iommu; 1092 1093 iommu = rk_iommu_from_dev(dev); 1094 1095 return iommu_group_ref_get(iommu->group); 1096 } 1097 1098 static int rk_iommu_of_xlate(struct device *dev, 1099 struct of_phandle_args *args) 1100 { 1101 struct platform_device *iommu_dev; 1102 struct rk_iommudata *data; 1103 1104 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL); 1105 if (!data) 1106 return -ENOMEM; 1107 1108 iommu_dev = of_find_device_by_node(args->np); 1109 1110 data->iommu = platform_get_drvdata(iommu_dev); 1111 dev->archdata.iommu = data; 1112 1113 platform_device_put(iommu_dev); 1114 1115 return 0; 1116 } 1117 1118 static const struct iommu_ops rk_iommu_ops = { 1119 .domain_alloc = rk_iommu_domain_alloc, 1120 .domain_free = rk_iommu_domain_free, 1121 .attach_dev = rk_iommu_attach_device, 1122 .detach_dev = rk_iommu_detach_device, 1123 .map = rk_iommu_map, 1124 .unmap = rk_iommu_unmap, 1125 .add_device = rk_iommu_add_device, 1126 .remove_device = rk_iommu_remove_device, 1127 .iova_to_phys = rk_iommu_iova_to_phys, 1128 .device_group = rk_iommu_device_group, 1129 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, 1130 .of_xlate = rk_iommu_of_xlate, 1131 }; 1132 1133 static int rk_iommu_probe(struct platform_device *pdev) 1134 { 1135 struct device *dev = &pdev->dev; 1136 struct rk_iommu *iommu; 1137 struct resource *res; 1138 int num_res = pdev->num_resources; 1139 int err, i, irq; 1140 1141 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); 1142 if (!iommu) 1143 return -ENOMEM; 1144 1145 platform_set_drvdata(pdev, iommu); 1146 iommu->dev = dev; 1147 iommu->num_mmu = 0; 1148 1149 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), 1150 GFP_KERNEL); 1151 if (!iommu->bases) 1152 return -ENOMEM; 1153 1154 for (i = 0; i < num_res; i++) { 1155 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 1156 if (!res) 1157 continue; 1158 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); 1159 if (IS_ERR(iommu->bases[i])) 1160 continue; 1161 iommu->num_mmu++; 1162 } 1163 if (iommu->num_mmu == 0) 1164 return PTR_ERR(iommu->bases[0]); 1165 1166 iommu->reset_disabled = device_property_read_bool(dev, 1167 "rockchip,disable-mmu-reset"); 1168 1169 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks); 1170 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks, 1171 sizeof(*iommu->clocks), GFP_KERNEL); 1172 if (!iommu->clocks) 1173 return -ENOMEM; 1174 1175 for (i = 0; i < iommu->num_clocks; ++i) 1176 iommu->clocks[i].id = rk_iommu_clocks[i]; 1177 1178 /* 1179 * iommu clocks should be present for all new devices and devicetrees 1180 * but there are older devicetrees without clocks out in the wild. 1181 * So clocks as optional for the time being. 1182 */ 1183 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); 1184 if (err == -ENOENT) 1185 iommu->num_clocks = 0; 1186 else if (err) 1187 return err; 1188 1189 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); 1190 if (err) 1191 return err; 1192 1193 iommu->group = iommu_group_alloc(); 1194 if (IS_ERR(iommu->group)) { 1195 err = PTR_ERR(iommu->group); 1196 goto err_unprepare_clocks; 1197 } 1198 1199 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); 1200 if (err) 1201 goto err_put_group; 1202 1203 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); 1204 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode); 1205 1206 err = iommu_device_register(&iommu->iommu); 1207 if (err) 1208 goto err_remove_sysfs; 1209 1210 /* 1211 * Use the first registered IOMMU device for domain to use with DMA 1212 * API, since a domain might not physically correspond to a single 1213 * IOMMU device.. 1214 */ 1215 if (!dma_dev) 1216 dma_dev = &pdev->dev; 1217 1218 bus_set_iommu(&platform_bus_type, &rk_iommu_ops); 1219 1220 pm_runtime_enable(dev); 1221 1222 i = 0; 1223 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { 1224 if (irq < 0) 1225 return irq; 1226 1227 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, 1228 IRQF_SHARED, dev_name(dev), iommu); 1229 if (err) { 1230 pm_runtime_disable(dev); 1231 goto err_remove_sysfs; 1232 } 1233 } 1234 1235 return 0; 1236 err_remove_sysfs: 1237 iommu_device_sysfs_remove(&iommu->iommu); 1238 err_put_group: 1239 iommu_group_put(iommu->group); 1240 err_unprepare_clocks: 1241 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); 1242 return err; 1243 } 1244 1245 static void rk_iommu_shutdown(struct platform_device *pdev) 1246 { 1247 struct rk_iommu *iommu = platform_get_drvdata(pdev); 1248 int i = 0, irq; 1249 1250 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) 1251 devm_free_irq(iommu->dev, irq, iommu); 1252 1253 pm_runtime_force_suspend(&pdev->dev); 1254 } 1255 1256 static int __maybe_unused rk_iommu_suspend(struct device *dev) 1257 { 1258 struct rk_iommu *iommu = dev_get_drvdata(dev); 1259 1260 if (!iommu->domain) 1261 return 0; 1262 1263 rk_iommu_disable(iommu); 1264 return 0; 1265 } 1266 1267 static int __maybe_unused rk_iommu_resume(struct device *dev) 1268 { 1269 struct rk_iommu *iommu = dev_get_drvdata(dev); 1270 1271 if (!iommu->domain) 1272 return 0; 1273 1274 return rk_iommu_enable(iommu); 1275 } 1276 1277 static const struct dev_pm_ops rk_iommu_pm_ops = { 1278 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL) 1279 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1280 pm_runtime_force_resume) 1281 }; 1282 1283 static const struct of_device_id rk_iommu_dt_ids[] = { 1284 { .compatible = "rockchip,iommu" }, 1285 { /* sentinel */ } 1286 }; 1287 1288 static struct platform_driver rk_iommu_driver = { 1289 .probe = rk_iommu_probe, 1290 .shutdown = rk_iommu_shutdown, 1291 .driver = { 1292 .name = "rk_iommu", 1293 .of_match_table = rk_iommu_dt_ids, 1294 .pm = &rk_iommu_pm_ops, 1295 .suppress_bind_attrs = true, 1296 }, 1297 }; 1298 1299 static int __init rk_iommu_init(void) 1300 { 1301 return platform_driver_register(&rk_iommu_driver); 1302 } 1303 subsys_initcall(rk_iommu_init); 1304