1 /* 2 * IOMMU implementation for Cell Broadband Processor Architecture 3 * 4 * (C) Copyright IBM Corporation 2006-2008 5 * 6 * Author: Jeremy Kerr <jk@ozlabs.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/kernel.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/notifier.h> 29 #include <linux/of.h> 30 #include <linux/of_platform.h> 31 #include <linux/lmb.h> 32 33 #include <asm/prom.h> 34 #include <asm/iommu.h> 35 #include <asm/machdep.h> 36 #include <asm/pci-bridge.h> 37 #include <asm/udbg.h> 38 #include <asm/firmware.h> 39 #include <asm/cell-regs.h> 40 41 #include "interrupt.h" 42 43 /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages 44 * instead of leaving them mapped to some dummy page. This can be 45 * enabled once the appropriate workarounds for spider bugs have 46 * been enabled 47 */ 48 #define CELL_IOMMU_REAL_UNMAP 49 50 /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of 51 * IO PTEs based on the transfer direction. That can be enabled 52 * once spider-net has been fixed to pass the correct direction 53 * to the DMA mapping functions 54 */ 55 #define CELL_IOMMU_STRICT_PROTECTION 56 57 58 #define NR_IOMMUS 2 59 60 /* IOC mmap registers */ 61 #define IOC_Reg_Size 0x2000 62 63 #define IOC_IOPT_CacheInvd 0x908 64 #define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul 65 #define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul 66 #define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul 67 68 #define IOC_IOST_Origin 0x918 69 #define IOC_IOST_Origin_E 0x8000000000000000ul 70 #define IOC_IOST_Origin_HW 0x0000000000000800ul 71 #define IOC_IOST_Origin_HL 0x0000000000000400ul 72 73 #define IOC_IO_ExcpStat 0x920 74 #define IOC_IO_ExcpStat_V 0x8000000000000000ul 75 #define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul 76 #define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul 77 #define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul 78 #define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul 79 #define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul 80 #define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful 81 82 #define IOC_IO_ExcpMask 0x928 83 #define IOC_IO_ExcpMask_SFE 0x4000000000000000ul 84 #define IOC_IO_ExcpMask_PFE 0x2000000000000000ul 85 86 #define IOC_IOCmd_Offset 0x1000 87 88 #define IOC_IOCmd_Cfg 0xc00 89 #define IOC_IOCmd_Cfg_TE 0x0000800000000000ul 90 91 92 /* Segment table entries */ 93 #define IOSTE_V 0x8000000000000000ul /* valid */ 94 #define IOSTE_H 0x4000000000000000ul /* cache hint */ 95 #define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */ 96 #define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */ 97 #define IOSTE_PS_Mask 0x0000000000000007ul /* page size */ 98 #define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */ 99 #define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */ 100 #define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */ 101 #define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */ 102 103 /* Page table entries */ 104 #define IOPTE_PP_W 0x8000000000000000ul /* protection: write */ 105 #define IOPTE_PP_R 0x4000000000000000ul /* protection: read */ 106 #define IOPTE_M 0x2000000000000000ul /* coherency required */ 107 #define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */ 108 #define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */ 109 #define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */ 110 #define IOPTE_H 0x0000000000000800ul /* cache hint */ 111 #define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */ 112 113 114 /* IOMMU sizing */ 115 #define IO_SEGMENT_SHIFT 28 116 #define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) 117 118 /* The high bit needs to be set on every DMA address */ 119 #define SPIDER_DMA_OFFSET 0x80000000ul 120 121 struct iommu_window { 122 struct list_head list; 123 struct cbe_iommu *iommu; 124 unsigned long offset; 125 unsigned long size; 126 unsigned int ioid; 127 struct iommu_table table; 128 }; 129 130 #define NAMESIZE 8 131 struct cbe_iommu { 132 int nid; 133 char name[NAMESIZE]; 134 void __iomem *xlate_regs; 135 void __iomem *cmd_regs; 136 unsigned long *stab; 137 unsigned long *ptab; 138 void *pad_page; 139 struct list_head windows; 140 }; 141 142 /* Static array of iommus, one per node 143 * each contains a list of windows, keyed from dma_window property 144 * - on bus setup, look for a matching window, or create one 145 * - on dev setup, assign iommu_table ptr 146 */ 147 static struct cbe_iommu iommus[NR_IOMMUS]; 148 static int cbe_nr_iommus; 149 150 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, 151 long n_ptes) 152 { 153 u64 __iomem *reg; 154 u64 val; 155 long n; 156 157 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; 158 159 while (n_ptes > 0) { 160 /* we can invalidate up to 1 << 11 PTEs at once */ 161 n = min(n_ptes, 1l << 11); 162 val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask) 163 | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask) 164 | IOC_IOPT_CacheInvd_Busy; 165 166 out_be64(reg, val); 167 while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy) 168 ; 169 170 n_ptes -= n; 171 pte += n; 172 } 173 } 174 175 static int tce_build_cell(struct iommu_table *tbl, long index, long npages, 176 unsigned long uaddr, enum dma_data_direction direction, 177 struct dma_attrs *attrs) 178 { 179 int i; 180 unsigned long *io_pte, base_pte; 181 struct iommu_window *window = 182 container_of(tbl, struct iommu_window, table); 183 184 /* implementing proper protection causes problems with the spidernet 185 * driver - check mapping directions later, but allow read & write by 186 * default for now.*/ 187 #ifdef CELL_IOMMU_STRICT_PROTECTION 188 /* to avoid referencing a global, we use a trick here to setup the 189 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended 190 * together for each of the 3 supported direction values. It is then 191 * shifted left so that the fields matching the desired direction 192 * lands on the appropriate bits, and other bits are masked out. 193 */ 194 const unsigned long prot = 0xc48; 195 base_pte = 196 ((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R)) 197 | IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask); 198 #else 199 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | 200 (window->ioid & IOPTE_IOID_Mask); 201 #endif 202 if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))) 203 base_pte &= ~IOPTE_SO_RW; 204 205 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); 206 207 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) 208 io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); 209 210 mb(); 211 212 invalidate_tce_cache(window->iommu, io_pte, npages); 213 214 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", 215 index, npages, direction, base_pte); 216 return 0; 217 } 218 219 static void tce_free_cell(struct iommu_table *tbl, long index, long npages) 220 { 221 222 int i; 223 unsigned long *io_pte, pte; 224 struct iommu_window *window = 225 container_of(tbl, struct iommu_window, table); 226 227 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); 228 229 #ifdef CELL_IOMMU_REAL_UNMAP 230 pte = 0; 231 #else 232 /* spider bridge does PCI reads after freeing - insert a mapping 233 * to a scratch page instead of an invalid entry */ 234 pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page) 235 | (window->ioid & IOPTE_IOID_Mask); 236 #endif 237 238 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); 239 240 for (i = 0; i < npages; i++) 241 io_pte[i] = pte; 242 243 mb(); 244 245 invalidate_tce_cache(window->iommu, io_pte, npages); 246 } 247 248 static irqreturn_t ioc_interrupt(int irq, void *data) 249 { 250 unsigned long stat, spf; 251 struct cbe_iommu *iommu = data; 252 253 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); 254 spf = stat & IOC_IO_ExcpStat_SPF_Mask; 255 256 /* Might want to rate limit it */ 257 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); 258 printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n", 259 !!(stat & IOC_IO_ExcpStat_V), 260 (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ', 261 (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ', 262 (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write", 263 (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask)); 264 printk(KERN_ERR " page=0x%016lx\n", 265 stat & IOC_IO_ExcpStat_ADDR_Mask); 266 267 /* clear interrupt */ 268 stat &= ~IOC_IO_ExcpStat_V; 269 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); 270 271 return IRQ_HANDLED; 272 } 273 274 static int cell_iommu_find_ioc(int nid, unsigned long *base) 275 { 276 struct device_node *np; 277 struct resource r; 278 279 *base = 0; 280 281 /* First look for new style /be nodes */ 282 for_each_node_by_name(np, "ioc") { 283 if (of_node_to_nid(np) != nid) 284 continue; 285 if (of_address_to_resource(np, 0, &r)) { 286 printk(KERN_ERR "iommu: can't get address for %s\n", 287 np->full_name); 288 continue; 289 } 290 *base = r.start; 291 of_node_put(np); 292 return 0; 293 } 294 295 /* Ok, let's try the old way */ 296 for_each_node_by_type(np, "cpu") { 297 const unsigned int *nidp; 298 const unsigned long *tmp; 299 300 nidp = of_get_property(np, "node-id", NULL); 301 if (nidp && *nidp == nid) { 302 tmp = of_get_property(np, "ioc-translation", NULL); 303 if (tmp) { 304 *base = *tmp; 305 of_node_put(np); 306 return 0; 307 } 308 } 309 } 310 311 return -ENODEV; 312 } 313 314 static void cell_iommu_setup_stab(struct cbe_iommu *iommu, 315 unsigned long dbase, unsigned long dsize, 316 unsigned long fbase, unsigned long fsize) 317 { 318 struct page *page; 319 unsigned long segments, stab_size; 320 321 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; 322 323 pr_debug("%s: iommu[%d]: segments: %lu\n", 324 __func__, iommu->nid, segments); 325 326 /* set up the segment table */ 327 stab_size = segments * sizeof(unsigned long); 328 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); 329 BUG_ON(!page); 330 iommu->stab = page_address(page); 331 memset(iommu->stab, 0, stab_size); 332 } 333 334 static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, 335 unsigned long base, unsigned long size, unsigned long gap_base, 336 unsigned long gap_size, unsigned long page_shift) 337 { 338 struct page *page; 339 int i; 340 unsigned long reg, segments, pages_per_segment, ptab_size, 341 n_pte_pages, start_seg, *ptab; 342 343 start_seg = base >> IO_SEGMENT_SHIFT; 344 segments = size >> IO_SEGMENT_SHIFT; 345 pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); 346 /* PTEs for each segment must start on a 4K bounday */ 347 pages_per_segment = max(pages_per_segment, 348 (1 << 12) / sizeof(unsigned long)); 349 350 ptab_size = segments * pages_per_segment * sizeof(unsigned long); 351 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, 352 iommu->nid, ptab_size, get_order(ptab_size)); 353 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); 354 BUG_ON(!page); 355 356 ptab = page_address(page); 357 memset(ptab, 0, ptab_size); 358 359 /* number of 4K pages needed for a page table */ 360 n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; 361 362 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", 363 __func__, iommu->nid, iommu->stab, ptab, 364 n_pte_pages); 365 366 /* initialise the STEs */ 367 reg = IOSTE_V | ((n_pte_pages - 1) << 5); 368 369 switch (page_shift) { 370 case 12: reg |= IOSTE_PS_4K; break; 371 case 16: reg |= IOSTE_PS_64K; break; 372 case 20: reg |= IOSTE_PS_1M; break; 373 case 24: reg |= IOSTE_PS_16M; break; 374 default: BUG(); 375 } 376 377 gap_base = gap_base >> IO_SEGMENT_SHIFT; 378 gap_size = gap_size >> IO_SEGMENT_SHIFT; 379 380 pr_debug("Setting up IOMMU stab:\n"); 381 for (i = start_seg; i < (start_seg + segments); i++) { 382 if (i >= gap_base && i < (gap_base + gap_size)) { 383 pr_debug("\toverlap at %d, skipping\n", i); 384 continue; 385 } 386 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * 387 (i - start_seg)); 388 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); 389 } 390 391 return ptab; 392 } 393 394 static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) 395 { 396 int ret; 397 unsigned long reg, xlate_base; 398 unsigned int virq; 399 400 if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) 401 panic("%s: missing IOC register mappings for node %d\n", 402 __func__, iommu->nid); 403 404 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); 405 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; 406 407 /* ensure that the STEs have updated */ 408 mb(); 409 410 /* setup interrupts for the iommu. */ 411 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); 412 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, 413 reg & ~IOC_IO_ExcpStat_V); 414 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, 415 IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); 416 417 virq = irq_create_mapping(NULL, 418 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); 419 BUG_ON(virq == NO_IRQ); 420 421 ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED, 422 iommu->name, iommu); 423 BUG_ON(ret); 424 425 /* set the IOC segment table origin register (and turn on the iommu) */ 426 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; 427 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); 428 in_be64(iommu->xlate_regs + IOC_IOST_Origin); 429 430 /* turn on IO translation */ 431 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; 432 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); 433 } 434 435 static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, 436 unsigned long base, unsigned long size) 437 { 438 cell_iommu_setup_stab(iommu, base, size, 0, 0); 439 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, 440 IOMMU_PAGE_SHIFT); 441 cell_iommu_enable_hardware(iommu); 442 } 443 444 #if 0/* Unused for now */ 445 static struct iommu_window *find_window(struct cbe_iommu *iommu, 446 unsigned long offset, unsigned long size) 447 { 448 struct iommu_window *window; 449 450 /* todo: check for overlapping (but not equal) windows) */ 451 452 list_for_each_entry(window, &(iommu->windows), list) { 453 if (window->offset == offset && window->size == size) 454 return window; 455 } 456 457 return NULL; 458 } 459 #endif 460 461 static inline u32 cell_iommu_get_ioid(struct device_node *np) 462 { 463 const u32 *ioid; 464 465 ioid = of_get_property(np, "ioid", NULL); 466 if (ioid == NULL) { 467 printk(KERN_WARNING "iommu: missing ioid for %s using 0\n", 468 np->full_name); 469 return 0; 470 } 471 472 return *ioid; 473 } 474 475 static struct iommu_window * __init 476 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, 477 unsigned long offset, unsigned long size, 478 unsigned long pte_offset) 479 { 480 struct iommu_window *window; 481 struct page *page; 482 u32 ioid; 483 484 ioid = cell_iommu_get_ioid(np); 485 486 window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); 487 BUG_ON(window == NULL); 488 489 window->offset = offset; 490 window->size = size; 491 window->ioid = ioid; 492 window->iommu = iommu; 493 494 window->table.it_blocksize = 16; 495 window->table.it_base = (unsigned long)iommu->ptab; 496 window->table.it_index = iommu->nid; 497 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; 498 window->table.it_size = size >> IOMMU_PAGE_SHIFT; 499 500 iommu_init_table(&window->table, iommu->nid); 501 502 pr_debug("\tioid %d\n", window->ioid); 503 pr_debug("\tblocksize %ld\n", window->table.it_blocksize); 504 pr_debug("\tbase 0x%016lx\n", window->table.it_base); 505 pr_debug("\toffset 0x%lx\n", window->table.it_offset); 506 pr_debug("\tsize %ld\n", window->table.it_size); 507 508 list_add(&window->list, &iommu->windows); 509 510 if (offset != 0) 511 return window; 512 513 /* We need to map and reserve the first IOMMU page since it's used 514 * by the spider workaround. In theory, we only need to do that when 515 * running on spider but it doesn't really matter. 516 * 517 * This code also assumes that we have a window that starts at 0, 518 * which is the case on all spider based blades. 519 */ 520 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); 521 BUG_ON(!page); 522 iommu->pad_page = page_address(page); 523 clear_page(iommu->pad_page); 524 525 __set_bit(0, window->table.it_map); 526 tce_build_cell(&window->table, window->table.it_offset, 1, 527 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); 528 window->table.it_hint = window->table.it_blocksize; 529 530 return window; 531 } 532 533 static struct cbe_iommu *cell_iommu_for_node(int nid) 534 { 535 int i; 536 537 for (i = 0; i < cbe_nr_iommus; i++) 538 if (iommus[i].nid == nid) 539 return &iommus[i]; 540 return NULL; 541 } 542 543 static unsigned long cell_dma_direct_offset; 544 545 static unsigned long dma_iommu_fixed_base; 546 547 /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ 548 static int iommu_fixed_is_weak; 549 550 static struct iommu_table *cell_get_iommu_table(struct device *dev) 551 { 552 struct iommu_window *window; 553 struct cbe_iommu *iommu; 554 struct dev_archdata *archdata = &dev->archdata; 555 556 /* Current implementation uses the first window available in that 557 * node's iommu. We -might- do something smarter later though it may 558 * never be necessary 559 */ 560 iommu = cell_iommu_for_node(dev_to_node(dev)); 561 if (iommu == NULL || list_empty(&iommu->windows)) { 562 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", 563 archdata->of_node ? archdata->of_node->full_name : "?", 564 dev_to_node(dev)); 565 return NULL; 566 } 567 window = list_entry(iommu->windows.next, struct iommu_window, list); 568 569 return &window->table; 570 } 571 572 /* A coherent allocation implies strong ordering */ 573 574 static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, 575 dma_addr_t *dma_handle, gfp_t flag) 576 { 577 if (iommu_fixed_is_weak) 578 return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), 579 size, dma_handle, 580 device_to_mask(dev), flag, 581 dev_to_node(dev)); 582 else 583 return dma_direct_ops.alloc_coherent(dev, size, dma_handle, 584 flag); 585 } 586 587 static void dma_fixed_free_coherent(struct device *dev, size_t size, 588 void *vaddr, dma_addr_t dma_handle) 589 { 590 if (iommu_fixed_is_weak) 591 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, 592 dma_handle); 593 else 594 dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); 595 } 596 597 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, 598 unsigned long offset, size_t size, 599 enum dma_data_direction direction, 600 struct dma_attrs *attrs) 601 { 602 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 603 return dma_direct_ops.map_page(dev, page, offset, size, 604 direction, attrs); 605 else 606 return iommu_map_page(dev, cell_get_iommu_table(dev), page, 607 offset, size, device_to_mask(dev), 608 direction, attrs); 609 } 610 611 static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, 612 size_t size, enum dma_data_direction direction, 613 struct dma_attrs *attrs) 614 { 615 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 616 dma_direct_ops.unmap_page(dev, dma_addr, size, direction, 617 attrs); 618 else 619 iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size, 620 direction, attrs); 621 } 622 623 static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, 624 int nents, enum dma_data_direction direction, 625 struct dma_attrs *attrs) 626 { 627 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 628 return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); 629 else 630 return iommu_map_sg(dev, cell_get_iommu_table(dev), sg, nents, 631 device_to_mask(dev), direction, attrs); 632 } 633 634 static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, 635 int nents, enum dma_data_direction direction, 636 struct dma_attrs *attrs) 637 { 638 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 639 dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); 640 else 641 iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, direction, 642 attrs); 643 } 644 645 static int dma_fixed_dma_supported(struct device *dev, u64 mask) 646 { 647 return mask == DMA_BIT_MASK(64); 648 } 649 650 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); 651 652 struct dma_mapping_ops dma_iommu_fixed_ops = { 653 .alloc_coherent = dma_fixed_alloc_coherent, 654 .free_coherent = dma_fixed_free_coherent, 655 .map_sg = dma_fixed_map_sg, 656 .unmap_sg = dma_fixed_unmap_sg, 657 .dma_supported = dma_fixed_dma_supported, 658 .set_dma_mask = dma_set_mask_and_switch, 659 .map_page = dma_fixed_map_page, 660 .unmap_page = dma_fixed_unmap_page, 661 }; 662 663 static void cell_dma_dev_setup_fixed(struct device *dev); 664 665 static void cell_dma_dev_setup(struct device *dev) 666 { 667 struct dev_archdata *archdata = &dev->archdata; 668 669 /* Order is important here, these are not mutually exclusive */ 670 if (get_dma_ops(dev) == &dma_iommu_fixed_ops) 671 cell_dma_dev_setup_fixed(dev); 672 else if (get_pci_dma_ops() == &dma_iommu_ops) 673 archdata->dma_data = cell_get_iommu_table(dev); 674 else if (get_pci_dma_ops() == &dma_direct_ops) 675 archdata->dma_data = (void *)cell_dma_direct_offset; 676 else 677 BUG(); 678 } 679 680 static void cell_pci_dma_dev_setup(struct pci_dev *dev) 681 { 682 cell_dma_dev_setup(&dev->dev); 683 } 684 685 static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, 686 void *data) 687 { 688 struct device *dev = data; 689 690 /* We are only intereted in device addition */ 691 if (action != BUS_NOTIFY_ADD_DEVICE) 692 return 0; 693 694 /* We use the PCI DMA ops */ 695 dev->archdata.dma_ops = get_pci_dma_ops(); 696 697 cell_dma_dev_setup(dev); 698 699 return 0; 700 } 701 702 static struct notifier_block cell_of_bus_notifier = { 703 .notifier_call = cell_of_bus_notify 704 }; 705 706 static int __init cell_iommu_get_window(struct device_node *np, 707 unsigned long *base, 708 unsigned long *size) 709 { 710 const void *dma_window; 711 unsigned long index; 712 713 /* Use ibm,dma-window if available, else, hard code ! */ 714 dma_window = of_get_property(np, "ibm,dma-window", NULL); 715 if (dma_window == NULL) { 716 *base = 0; 717 *size = 0x80000000u; 718 return -ENODEV; 719 } 720 721 of_parse_dma_window(np, dma_window, &index, base, size); 722 return 0; 723 } 724 725 static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) 726 { 727 struct cbe_iommu *iommu; 728 int nid, i; 729 730 /* Get node ID */ 731 nid = of_node_to_nid(np); 732 if (nid < 0) { 733 printk(KERN_ERR "iommu: failed to get node for %s\n", 734 np->full_name); 735 return NULL; 736 } 737 pr_debug("iommu: setting up iommu for node %d (%s)\n", 738 nid, np->full_name); 739 740 /* XXX todo: If we can have multiple windows on the same IOMMU, which 741 * isn't the case today, we probably want here to check wether the 742 * iommu for that node is already setup. 743 * However, there might be issue with getting the size right so let's 744 * ignore that for now. We might want to completely get rid of the 745 * multiple window support since the cell iommu supports per-page ioids 746 */ 747 748 if (cbe_nr_iommus >= NR_IOMMUS) { 749 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n", 750 np->full_name); 751 return NULL; 752 } 753 754 /* Init base fields */ 755 i = cbe_nr_iommus++; 756 iommu = &iommus[i]; 757 iommu->stab = NULL; 758 iommu->nid = nid; 759 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); 760 INIT_LIST_HEAD(&iommu->windows); 761 762 return iommu; 763 } 764 765 static void __init cell_iommu_init_one(struct device_node *np, 766 unsigned long offset) 767 { 768 struct cbe_iommu *iommu; 769 unsigned long base, size; 770 771 iommu = cell_iommu_alloc(np); 772 if (!iommu) 773 return; 774 775 /* Obtain a window for it */ 776 cell_iommu_get_window(np, &base, &size); 777 778 pr_debug("\ttranslating window 0x%lx...0x%lx\n", 779 base, base + size - 1); 780 781 /* Initialize the hardware */ 782 cell_iommu_setup_hardware(iommu, base, size); 783 784 /* Setup the iommu_table */ 785 cell_iommu_setup_window(iommu, np, base, size, 786 offset >> IOMMU_PAGE_SHIFT); 787 } 788 789 static void __init cell_disable_iommus(void) 790 { 791 int node; 792 unsigned long base, val; 793 void __iomem *xregs, *cregs; 794 795 /* Make sure IOC translation is disabled on all nodes */ 796 for_each_online_node(node) { 797 if (cell_iommu_find_ioc(node, &base)) 798 continue; 799 xregs = ioremap(base, IOC_Reg_Size); 800 if (xregs == NULL) 801 continue; 802 cregs = xregs + IOC_IOCmd_Offset; 803 804 pr_debug("iommu: cleaning up iommu on node %d\n", node); 805 806 out_be64(xregs + IOC_IOST_Origin, 0); 807 (void)in_be64(xregs + IOC_IOST_Origin); 808 val = in_be64(cregs + IOC_IOCmd_Cfg); 809 val &= ~IOC_IOCmd_Cfg_TE; 810 out_be64(cregs + IOC_IOCmd_Cfg, val); 811 (void)in_be64(cregs + IOC_IOCmd_Cfg); 812 813 iounmap(xregs); 814 } 815 } 816 817 static int __init cell_iommu_init_disabled(void) 818 { 819 struct device_node *np = NULL; 820 unsigned long base = 0, size; 821 822 /* When no iommu is present, we use direct DMA ops */ 823 set_pci_dma_ops(&dma_direct_ops); 824 825 /* First make sure all IOC translation is turned off */ 826 cell_disable_iommus(); 827 828 /* If we have no Axon, we set up the spider DMA magic offset */ 829 if (of_find_node_by_name(NULL, "axon") == NULL) 830 cell_dma_direct_offset = SPIDER_DMA_OFFSET; 831 832 /* Now we need to check to see where the memory is mapped 833 * in PCI space. We assume that all busses use the same dma 834 * window which is always the case so far on Cell, thus we 835 * pick up the first pci-internal node we can find and check 836 * the DMA window from there. 837 */ 838 for_each_node_by_name(np, "axon") { 839 if (np->parent == NULL || np->parent->parent != NULL) 840 continue; 841 if (cell_iommu_get_window(np, &base, &size) == 0) 842 break; 843 } 844 if (np == NULL) { 845 for_each_node_by_name(np, "pci-internal") { 846 if (np->parent == NULL || np->parent->parent != NULL) 847 continue; 848 if (cell_iommu_get_window(np, &base, &size) == 0) 849 break; 850 } 851 } 852 of_node_put(np); 853 854 /* If we found a DMA window, we check if it's big enough to enclose 855 * all of physical memory. If not, we force enable IOMMU 856 */ 857 if (np && size < lmb_end_of_DRAM()) { 858 printk(KERN_WARNING "iommu: force-enabled, dma window" 859 " (%ldMB) smaller than total memory (%lldMB)\n", 860 size >> 20, lmb_end_of_DRAM() >> 20); 861 return -ENODEV; 862 } 863 864 cell_dma_direct_offset += base; 865 866 if (cell_dma_direct_offset != 0) 867 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; 868 869 printk("iommu: disabled, direct DMA offset is 0x%lx\n", 870 cell_dma_direct_offset); 871 872 return 0; 873 } 874 875 /* 876 * Fixed IOMMU mapping support 877 * 878 * This code adds support for setting up a fixed IOMMU mapping on certain 879 * cell machines. For 64-bit devices this avoids the performance overhead of 880 * mapping and unmapping pages at runtime. 32-bit devices are unable to use 881 * the fixed mapping. 882 * 883 * The fixed mapping is established at boot, and maps all of physical memory 884 * 1:1 into device space at some offset. On machines with < 30 GB of memory 885 * we setup the fixed mapping immediately above the normal IOMMU window. 886 * 887 * For example a machine with 4GB of memory would end up with the normal 888 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In 889 * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to 890 * 3GB, plus any offset required by firmware. The firmware offset is encoded 891 * in the "dma-ranges" property. 892 * 893 * On machines with 30GB or more of memory, we are unable to place the fixed 894 * mapping above the normal IOMMU window as we would run out of address space. 895 * Instead we move the normal IOMMU window to coincide with the hash page 896 * table, this region does not need to be part of the fixed mapping as no 897 * device should ever be DMA'ing to it. We then setup the fixed mapping 898 * from 0 to 32GB. 899 */ 900 901 static u64 cell_iommu_get_fixed_address(struct device *dev) 902 { 903 u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR; 904 struct device_node *np; 905 const u32 *ranges = NULL; 906 int i, len, best, naddr, nsize, pna, range_size; 907 908 np = of_node_get(dev->archdata.of_node); 909 while (1) { 910 naddr = of_n_addr_cells(np); 911 nsize = of_n_size_cells(np); 912 np = of_get_next_parent(np); 913 if (!np) 914 break; 915 916 ranges = of_get_property(np, "dma-ranges", &len); 917 918 /* Ignore empty ranges, they imply no translation required */ 919 if (ranges && len > 0) 920 break; 921 } 922 923 if (!ranges) { 924 dev_dbg(dev, "iommu: no dma-ranges found\n"); 925 goto out; 926 } 927 928 len /= sizeof(u32); 929 930 pna = of_n_addr_cells(np); 931 range_size = naddr + nsize + pna; 932 933 /* dma-ranges format: 934 * child addr : naddr cells 935 * parent addr : pna cells 936 * size : nsize cells 937 */ 938 for (i = 0, best = -1, best_size = 0; i < len; i += range_size) { 939 cpu_addr = of_translate_dma_address(np, ranges + i + naddr); 940 size = of_read_number(ranges + i + naddr + pna, nsize); 941 942 if (cpu_addr == 0 && size > best_size) { 943 best = i; 944 best_size = size; 945 } 946 } 947 948 if (best >= 0) { 949 dev_addr = of_read_number(ranges + best, naddr); 950 } else 951 dev_dbg(dev, "iommu: no suitable range found!\n"); 952 953 out: 954 of_node_put(np); 955 956 return dev_addr; 957 } 958 959 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) 960 { 961 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 962 return -EIO; 963 964 if (dma_mask == DMA_BIT_MASK(64) && 965 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) 966 { 967 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); 968 set_dma_ops(dev, &dma_iommu_fixed_ops); 969 } else { 970 dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); 971 set_dma_ops(dev, get_pci_dma_ops()); 972 } 973 974 cell_dma_dev_setup(dev); 975 976 *dev->dma_mask = dma_mask; 977 978 return 0; 979 } 980 981 static void cell_dma_dev_setup_fixed(struct device *dev) 982 { 983 struct dev_archdata *archdata = &dev->archdata; 984 u64 addr; 985 986 addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base; 987 archdata->dma_data = (void *)addr; 988 989 dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); 990 } 991 992 static void insert_16M_pte(unsigned long addr, unsigned long *ptab, 993 unsigned long base_pte) 994 { 995 unsigned long segment, offset; 996 997 segment = addr >> IO_SEGMENT_SHIFT; 998 offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); 999 ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); 1000 1001 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", 1002 addr, ptab, segment, offset); 1003 1004 ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask); 1005 } 1006 1007 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, 1008 struct device_node *np, unsigned long dbase, unsigned long dsize, 1009 unsigned long fbase, unsigned long fsize) 1010 { 1011 unsigned long base_pte, uaddr, ioaddr, *ptab; 1012 1013 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); 1014 1015 dma_iommu_fixed_base = fbase; 1016 1017 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); 1018 1019 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M 1020 | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); 1021 1022 if (iommu_fixed_is_weak) 1023 pr_info("IOMMU: Using weak ordering for fixed mapping\n"); 1024 else { 1025 pr_info("IOMMU: Using strong ordering for fixed mapping\n"); 1026 base_pte |= IOPTE_SO_RW; 1027 } 1028 1029 for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { 1030 /* Don't touch the dynamic region */ 1031 ioaddr = uaddr + fbase; 1032 if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { 1033 pr_debug("iommu: fixed/dynamic overlap, skipping\n"); 1034 continue; 1035 } 1036 1037 insert_16M_pte(uaddr, ptab, base_pte); 1038 } 1039 1040 mb(); 1041 } 1042 1043 static int __init cell_iommu_fixed_mapping_init(void) 1044 { 1045 unsigned long dbase, dsize, fbase, fsize, hbase, hend; 1046 struct cbe_iommu *iommu; 1047 struct device_node *np; 1048 1049 /* The fixed mapping is only supported on axon machines */ 1050 np = of_find_node_by_name(NULL, "axon"); 1051 if (!np) { 1052 pr_debug("iommu: fixed mapping disabled, no axons found\n"); 1053 return -1; 1054 } 1055 1056 /* We must have dma-ranges properties for fixed mapping to work */ 1057 np = of_find_node_with_property(NULL, "dma-ranges"); 1058 of_node_put(np); 1059 1060 if (!np) { 1061 pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); 1062 return -1; 1063 } 1064 1065 /* The default setup is to have the fixed mapping sit after the 1066 * dynamic region, so find the top of the largest IOMMU window 1067 * on any axon, then add the size of RAM and that's our max value. 1068 * If that is > 32GB we have to do other shennanigans. 1069 */ 1070 fbase = 0; 1071 for_each_node_by_name(np, "axon") { 1072 cell_iommu_get_window(np, &dbase, &dsize); 1073 fbase = max(fbase, dbase + dsize); 1074 } 1075 1076 fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); 1077 fsize = lmb_phys_mem_size(); 1078 1079 if ((fbase + fsize) <= 0x800000000) 1080 hbase = 0; /* use the device tree window */ 1081 else { 1082 /* If we're over 32 GB we need to cheat. We can't map all of 1083 * RAM with the fixed mapping, and also fit the dynamic 1084 * region. So try to place the dynamic region where the hash 1085 * table sits, drivers never need to DMA to it, we don't 1086 * need a fixed mapping for that area. 1087 */ 1088 if (!htab_address) { 1089 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); 1090 return -1; 1091 } 1092 hbase = __pa(htab_address); 1093 hend = hbase + htab_size_bytes; 1094 1095 /* The window must start and end on a segment boundary */ 1096 if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) || 1097 (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) { 1098 pr_debug("iommu: hash window not segment aligned\n"); 1099 return -1; 1100 } 1101 1102 /* Check the hash window fits inside the real DMA window */ 1103 for_each_node_by_name(np, "axon") { 1104 cell_iommu_get_window(np, &dbase, &dsize); 1105 1106 if (hbase < dbase || (hend > (dbase + dsize))) { 1107 pr_debug("iommu: hash window doesn't fit in" 1108 "real DMA window\n"); 1109 return -1; 1110 } 1111 } 1112 1113 fbase = 0; 1114 } 1115 1116 /* Setup the dynamic regions */ 1117 for_each_node_by_name(np, "axon") { 1118 iommu = cell_iommu_alloc(np); 1119 BUG_ON(!iommu); 1120 1121 if (hbase == 0) 1122 cell_iommu_get_window(np, &dbase, &dsize); 1123 else { 1124 dbase = hbase; 1125 dsize = htab_size_bytes; 1126 } 1127 1128 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " 1129 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, 1130 dbase + dsize, fbase, fbase + fsize); 1131 1132 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); 1133 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, 1134 IOMMU_PAGE_SHIFT); 1135 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, 1136 fbase, fsize); 1137 cell_iommu_enable_hardware(iommu); 1138 cell_iommu_setup_window(iommu, np, dbase, dsize, 0); 1139 } 1140 1141 dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch; 1142 set_pci_dma_ops(&dma_iommu_ops); 1143 1144 return 0; 1145 } 1146 1147 static int iommu_fixed_disabled; 1148 1149 static int __init setup_iommu_fixed(char *str) 1150 { 1151 struct device_node *pciep; 1152 1153 if (strcmp(str, "off") == 0) 1154 iommu_fixed_disabled = 1; 1155 1156 /* If we can find a pcie-endpoint in the device tree assume that 1157 * we're on a triblade or a CAB so by default the fixed mapping 1158 * should be set to be weakly ordered; but only if the boot 1159 * option WASN'T set for strong ordering 1160 */ 1161 pciep = of_find_node_by_type(NULL, "pcie-endpoint"); 1162 1163 if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) 1164 iommu_fixed_is_weak = 1; 1165 1166 of_node_put(pciep); 1167 1168 return 1; 1169 } 1170 __setup("iommu_fixed=", setup_iommu_fixed); 1171 1172 static int __init cell_iommu_init(void) 1173 { 1174 struct device_node *np; 1175 1176 /* If IOMMU is disabled or we have little enough RAM to not need 1177 * to enable it, we setup a direct mapping. 1178 * 1179 * Note: should we make sure we have the IOMMU actually disabled ? 1180 */ 1181 if (iommu_is_off || 1182 (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull)) 1183 if (cell_iommu_init_disabled() == 0) 1184 goto bail; 1185 1186 /* Setup various ppc_md. callbacks */ 1187 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; 1188 ppc_md.tce_build = tce_build_cell; 1189 ppc_md.tce_free = tce_free_cell; 1190 1191 if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) 1192 goto bail; 1193 1194 /* Create an iommu for each /axon node. */ 1195 for_each_node_by_name(np, "axon") { 1196 if (np->parent == NULL || np->parent->parent != NULL) 1197 continue; 1198 cell_iommu_init_one(np, 0); 1199 } 1200 1201 /* Create an iommu for each toplevel /pci-internal node for 1202 * old hardware/firmware 1203 */ 1204 for_each_node_by_name(np, "pci-internal") { 1205 if (np->parent == NULL || np->parent->parent != NULL) 1206 continue; 1207 cell_iommu_init_one(np, SPIDER_DMA_OFFSET); 1208 } 1209 1210 /* Setup default PCI iommu ops */ 1211 set_pci_dma_ops(&dma_iommu_ops); 1212 1213 bail: 1214 /* Register callbacks on OF platform device addition/removal 1215 * to handle linking them to the right DMA operations 1216 */ 1217 bus_register_notifier(&of_platform_bus_type, &cell_of_bus_notifier); 1218 1219 return 0; 1220 } 1221 machine_arch_initcall(cell, cell_iommu_init); 1222 machine_arch_initcall(celleb_native, cell_iommu_init); 1223 1224