1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 ** System Bus Adapter (SBA) I/O MMU manager 4 ** 5 ** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org> 6 ** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com> 7 ** (c) Copyright 2000-2004 Hewlett-Packard Company 8 ** 9 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) 10 ** 11 ** 12 ** 13 ** This module initializes the IOC (I/O Controller) found on B1000/C3000/ 14 ** J5000/J7000/N-class/L-class machines and their successors. 15 ** 16 ** FIXME: add DMA hint support programming in both sba and lba modules. 17 */ 18 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/spinlock.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 25 #include <linux/mm.h> 26 #include <linux/string.h> 27 #include <linux/pci.h> 28 #include <linux/scatterlist.h> 29 #include <linux/iommu-helper.h> 30 31 #include <asm/byteorder.h> 32 #include <asm/io.h> 33 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ 34 35 #include <asm/hardware.h> /* for register_parisc_driver() stuff */ 36 37 #include <linux/proc_fs.h> 38 #include <linux/seq_file.h> 39 #include <linux/module.h> 40 41 #include <asm/ropes.h> 42 #include <asm/mckinley.h> /* for proc_mckinley_root */ 43 #include <asm/runway.h> /* for proc_runway_root */ 44 #include <asm/page.h> /* for PAGE0 */ 45 #include <asm/pdc.h> /* for PDC_MODEL_* */ 46 #include <asm/pdcpat.h> /* for is_pdc_pat() */ 47 #include <asm/parisc-device.h> 48 49 #include "iommu.h" 50 51 #define MODULE_NAME "SBA" 52 53 /* 54 ** The number of debug flags is a clue - this code is fragile. 55 ** Don't even think about messing with it unless you have 56 ** plenty of 710's to sacrifice to the computer gods. :^) 57 */ 58 #undef DEBUG_SBA_INIT 59 #undef DEBUG_SBA_RUN 60 #undef DEBUG_SBA_RUN_SG 61 #undef DEBUG_SBA_RESOURCE 62 #undef ASSERT_PDIR_SANITY 63 #undef DEBUG_LARGE_SG_ENTRIES 64 #undef DEBUG_DMB_TRAP 65 66 #ifdef DEBUG_SBA_INIT 67 #define DBG_INIT(x...) printk(x) 68 #else 69 #define DBG_INIT(x...) 70 #endif 71 72 #ifdef DEBUG_SBA_RUN 73 #define DBG_RUN(x...) printk(x) 74 #else 75 #define DBG_RUN(x...) 76 #endif 77 78 #ifdef DEBUG_SBA_RUN_SG 79 #define DBG_RUN_SG(x...) printk(x) 80 #else 81 #define DBG_RUN_SG(x...) 82 #endif 83 84 85 #ifdef DEBUG_SBA_RESOURCE 86 #define DBG_RES(x...) printk(x) 87 #else 88 #define DBG_RES(x...) 89 #endif 90 91 #define SBA_INLINE __inline__ 92 93 #define DEFAULT_DMA_HINT_REG 0 94 95 struct sba_device *sba_list; 96 EXPORT_SYMBOL_GPL(sba_list); 97 98 static unsigned long ioc_needs_fdc = 0; 99 100 /* global count of IOMMUs in the system */ 101 static unsigned int global_ioc_cnt = 0; 102 103 /* PA8700 (Piranha 2.2) bug workaround */ 104 static unsigned long piranha_bad_128k = 0; 105 106 /* Looks nice and keeps the compiler happy */ 107 #define SBA_DEV(d) ((struct sba_device *) (d)) 108 109 #ifdef CONFIG_AGP_PARISC 110 #define SBA_AGP_SUPPORT 111 #endif /*CONFIG_AGP_PARISC*/ 112 113 #ifdef SBA_AGP_SUPPORT 114 static int sba_reserve_agpgart = 1; 115 module_param(sba_reserve_agpgart, int, 0444); 116 MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART"); 117 #endif 118 119 120 /************************************ 121 ** SBA register read and write support 122 ** 123 ** BE WARNED: register writes are posted. 124 ** (ie follow writes which must reach HW with a read) 125 ** 126 ** Superdome (in particular, REO) allows only 64-bit CSR accesses. 127 */ 128 #define READ_REG32(addr) readl(addr) 129 #define READ_REG64(addr) readq(addr) 130 #define WRITE_REG32(val, addr) writel((val), (addr)) 131 #define WRITE_REG64(val, addr) writeq((val), (addr)) 132 133 #ifdef CONFIG_64BIT 134 #define READ_REG(addr) READ_REG64(addr) 135 #define WRITE_REG(value, addr) WRITE_REG64(value, addr) 136 #else 137 #define READ_REG(addr) READ_REG32(addr) 138 #define WRITE_REG(value, addr) WRITE_REG32(value, addr) 139 #endif 140 141 #ifdef DEBUG_SBA_INIT 142 143 /* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */ 144 145 /** 146 * sba_dump_ranges - debugging only - print ranges assigned to this IOA 147 * @hpa: base address of the sba 148 * 149 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO 150 * IO Adapter (aka Bus Converter). 151 */ 152 static void 153 sba_dump_ranges(void __iomem *hpa) 154 { 155 DBG_INIT("SBA at 0x%p\n", hpa); 156 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE)); 157 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK)); 158 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE)); 159 DBG_INIT("\n"); 160 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE)); 161 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK)); 162 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE)); 163 } 164 165 /** 166 * sba_dump_tlb - debugging only - print IOMMU operating parameters 167 * @hpa: base address of the IOMMU 168 * 169 * Print the size/location of the IO MMU PDIR. 170 */ 171 static void sba_dump_tlb(void __iomem *hpa) 172 { 173 DBG_INIT("IO TLB at 0x%p\n", hpa); 174 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE)); 175 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK)); 176 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG)); 177 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE)); 178 DBG_INIT("\n"); 179 } 180 #else 181 #define sba_dump_ranges(x) 182 #define sba_dump_tlb(x) 183 #endif /* DEBUG_SBA_INIT */ 184 185 186 #ifdef ASSERT_PDIR_SANITY 187 188 /** 189 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry 190 * @ioc: IO MMU structure which owns the pdir we are interested in. 191 * @msg: text to print ont the output line. 192 * @pide: pdir index. 193 * 194 * Print one entry of the IO MMU PDIR in human readable form. 195 */ 196 static void 197 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) 198 { 199 /* start printing from lowest pde in rval */ 200 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); 201 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); 202 uint rcnt; 203 204 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", 205 msg, 206 rptr, pide & (BITS_PER_LONG - 1), *rptr); 207 208 rcnt = 0; 209 while (rcnt < BITS_PER_LONG) { 210 printk(KERN_DEBUG "%s %2d %p %016Lx\n", 211 (rcnt == (pide & (BITS_PER_LONG - 1))) 212 ? " -->" : " ", 213 rcnt, ptr, *ptr ); 214 rcnt++; 215 ptr++; 216 } 217 printk(KERN_DEBUG "%s", msg); 218 } 219 220 221 /** 222 * sba_check_pdir - debugging only - consistency checker 223 * @ioc: IO MMU structure which owns the pdir we are interested in. 224 * @msg: text to print ont the output line. 225 * 226 * Verify the resource map and pdir state is consistent 227 */ 228 static int 229 sba_check_pdir(struct ioc *ioc, char *msg) 230 { 231 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); 232 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ 233 u64 *pptr = ioc->pdir_base; /* pdir ptr */ 234 uint pide = 0; 235 236 while (rptr < rptr_end) { 237 u32 rval = *rptr; 238 int rcnt = 32; /* number of bits we might check */ 239 240 while (rcnt) { 241 /* Get last byte and highest bit from that */ 242 u32 pde = ((u32) (((char *)pptr)[7])) << 24; 243 if ((rval ^ pde) & 0x80000000) 244 { 245 /* 246 ** BUMMER! -- res_map != pdir -- 247 ** Dump rval and matching pdir entries 248 */ 249 sba_dump_pdir_entry(ioc, msg, pide); 250 return(1); 251 } 252 rcnt--; 253 rval <<= 1; /* try the next bit */ 254 pptr++; 255 pide++; 256 } 257 rptr++; /* look at next word of res_map */ 258 } 259 /* It'd be nice if we always got here :^) */ 260 return 0; 261 } 262 263 264 /** 265 * sba_dump_sg - debugging only - print Scatter-Gather list 266 * @ioc: IO MMU structure which owns the pdir we are interested in. 267 * @startsg: head of the SG list 268 * @nents: number of entries in SG list 269 * 270 * print the SG list so we can verify it's correct by hand. 271 */ 272 static void 273 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) 274 { 275 while (nents-- > 0) { 276 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n", 277 nents, 278 (unsigned long) sg_dma_address(startsg), 279 sg_dma_len(startsg), 280 sg_virt(startsg), startsg->length); 281 startsg++; 282 } 283 } 284 285 #endif /* ASSERT_PDIR_SANITY */ 286 287 288 289 290 /************************************************************** 291 * 292 * I/O Pdir Resource Management 293 * 294 * Bits set in the resource map are in use. 295 * Each bit can represent a number of pages. 296 * LSbs represent lower addresses (IOVA's). 297 * 298 ***************************************************************/ 299 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ 300 301 /* Convert from IOVP to IOVA and vice versa. */ 302 303 #ifdef ZX1_SUPPORT 304 /* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */ 305 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset)) 306 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) 307 #else 308 /* only support Astro and ancestors. Saves a few cycles in key places */ 309 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset)) 310 #define SBA_IOVP(ioc,iova) (iova) 311 #endif 312 313 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) 314 315 #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) 316 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) 317 318 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, 319 unsigned int bitshiftcnt) 320 { 321 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) 322 + bitshiftcnt; 323 } 324 325 /** 326 * sba_search_bitmap - find free space in IO PDIR resource bitmap 327 * @ioc: IO MMU structure which owns the pdir we are interested in. 328 * @bits_wanted: number of entries we need. 329 * 330 * Find consecutive free bits in resource bitmap. 331 * Each bit represents one entry in the IO Pdir. 332 * Cool perf optimization: search for log2(size) bits at a time. 333 */ 334 static SBA_INLINE unsigned long 335 sba_search_bitmap(struct ioc *ioc, struct device *dev, 336 unsigned long bits_wanted) 337 { 338 unsigned long *res_ptr = ioc->res_hint; 339 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 340 unsigned long pide = ~0UL, tpide; 341 unsigned long boundary_size; 342 unsigned long shift; 343 int ret; 344 345 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, 346 1ULL << IOVP_SHIFT) >> IOVP_SHIFT; 347 348 #if defined(ZX1_SUPPORT) 349 BUG_ON(ioc->ibase & ~IOVP_MASK); 350 shift = ioc->ibase >> IOVP_SHIFT; 351 #else 352 shift = 0; 353 #endif 354 355 if (bits_wanted > (BITS_PER_LONG/2)) { 356 /* Search word at a time - no mask needed */ 357 for(; res_ptr < res_end; ++res_ptr) { 358 tpide = ptr_to_pide(ioc, res_ptr, 0); 359 ret = iommu_is_span_boundary(tpide, bits_wanted, 360 shift, 361 boundary_size); 362 if ((*res_ptr == 0) && !ret) { 363 *res_ptr = RESMAP_MASK(bits_wanted); 364 pide = tpide; 365 break; 366 } 367 } 368 /* point to the next word on next pass */ 369 res_ptr++; 370 ioc->res_bitshift = 0; 371 } else { 372 /* 373 ** Search the resource bit map on well-aligned values. 374 ** "o" is the alignment. 375 ** We need the alignment to invalidate I/O TLB using 376 ** SBA HW features in the unmap path. 377 */ 378 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); 379 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o); 380 unsigned long mask; 381 382 if (bitshiftcnt >= BITS_PER_LONG) { 383 bitshiftcnt = 0; 384 res_ptr++; 385 } 386 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; 387 388 DBG_RES("%s() o %ld %p", __func__, o, res_ptr); 389 while(res_ptr < res_end) 390 { 391 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 392 WARN_ON(mask == 0); 393 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); 394 ret = iommu_is_span_boundary(tpide, bits_wanted, 395 shift, 396 boundary_size); 397 if ((((*res_ptr) & mask) == 0) && !ret) { 398 *res_ptr |= mask; /* mark resources busy! */ 399 pide = tpide; 400 break; 401 } 402 mask >>= o; 403 bitshiftcnt += o; 404 if (mask == 0) { 405 mask = RESMAP_MASK(bits_wanted); 406 bitshiftcnt=0; 407 res_ptr++; 408 } 409 } 410 /* look in the same word on the next pass */ 411 ioc->res_bitshift = bitshiftcnt + bits_wanted; 412 } 413 414 /* wrapped ? */ 415 if (res_end <= res_ptr) { 416 ioc->res_hint = (unsigned long *) ioc->res_map; 417 ioc->res_bitshift = 0; 418 } else { 419 ioc->res_hint = res_ptr; 420 } 421 return (pide); 422 } 423 424 425 /** 426 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap 427 * @ioc: IO MMU structure which owns the pdir we are interested in. 428 * @size: number of bytes to create a mapping for 429 * 430 * Given a size, find consecutive unmarked and then mark those bits in the 431 * resource bit map. 432 */ 433 static int 434 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) 435 { 436 unsigned int pages_needed = size >> IOVP_SHIFT; 437 #ifdef SBA_COLLECT_STATS 438 unsigned long cr_start = mfctl(16); 439 #endif 440 unsigned long pide; 441 442 pide = sba_search_bitmap(ioc, dev, pages_needed); 443 if (pide >= (ioc->res_size << 3)) { 444 pide = sba_search_bitmap(ioc, dev, pages_needed); 445 if (pide >= (ioc->res_size << 3)) 446 panic("%s: I/O MMU @ %p is out of mapping resources\n", 447 __FILE__, ioc->ioc_hpa); 448 } 449 450 #ifdef ASSERT_PDIR_SANITY 451 /* verify the first enable bit is clear */ 452 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { 453 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); 454 } 455 #endif 456 457 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 458 __func__, size, pages_needed, pide, 459 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 460 ioc->res_bitshift ); 461 462 #ifdef SBA_COLLECT_STATS 463 { 464 unsigned long cr_end = mfctl(16); 465 unsigned long tmp = cr_end - cr_start; 466 /* check for roll over */ 467 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); 468 } 469 ioc->avg_search[ioc->avg_idx++] = cr_start; 470 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; 471 472 ioc->used_pages += pages_needed; 473 #endif 474 475 return (pide); 476 } 477 478 479 /** 480 * sba_free_range - unmark bits in IO PDIR resource bitmap 481 * @ioc: IO MMU structure which owns the pdir we are interested in. 482 * @iova: IO virtual address which was previously allocated. 483 * @size: number of bytes to create a mapping for 484 * 485 * clear bits in the ioc's resource map 486 */ 487 static SBA_INLINE void 488 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) 489 { 490 unsigned long iovp = SBA_IOVP(ioc, iova); 491 unsigned int pide = PDIR_INDEX(iovp); 492 unsigned int ridx = pide >> 3; /* convert bit to byte address */ 493 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); 494 495 int bits_not_wanted = size >> IOVP_SHIFT; 496 497 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ 498 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); 499 500 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", 501 __func__, (uint) iova, size, 502 bits_not_wanted, m, pide, res_ptr, *res_ptr); 503 504 #ifdef SBA_COLLECT_STATS 505 ioc->used_pages -= bits_not_wanted; 506 #endif 507 508 *res_ptr &= ~m; 509 } 510 511 512 /************************************************************** 513 * 514 * "Dynamic DMA Mapping" support (aka "Coherent I/O") 515 * 516 ***************************************************************/ 517 518 #ifdef SBA_HINT_SUPPORT 519 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) 520 #endif 521 522 typedef unsigned long space_t; 523 #define KERNEL_SPACE 0 524 525 /** 526 * sba_io_pdir_entry - fill in one IO PDIR entry 527 * @pdir_ptr: pointer to IO PDIR entry 528 * @sid: process Space ID - currently only support KERNEL_SPACE 529 * @vba: Virtual CPU address of buffer to map 530 * @hint: DMA hint set to use for this mapping 531 * 532 * SBA Mapping Routine 533 * 534 * Given a virtual address (vba, arg2) and space id, (sid, arg1) 535 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by 536 * pdir_ptr (arg0). 537 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry 538 * for Astro/Ike looks like: 539 * 540 * 541 * 0 19 51 55 63 542 * +-+---------------------+----------------------------------+----+--------+ 543 * |V| U | PPN[43:12] | U | VI | 544 * +-+---------------------+----------------------------------+----+--------+ 545 * 546 * Pluto is basically identical, supports fewer physical address bits: 547 * 548 * 0 23 51 55 63 549 * +-+------------------------+-------------------------------+----+--------+ 550 * |V| U | PPN[39:12] | U | VI | 551 * +-+------------------------+-------------------------------+----+--------+ 552 * 553 * V == Valid Bit (Most Significant Bit is bit 0) 554 * U == Unused 555 * PPN == Physical Page Number 556 * VI == Virtual Index (aka Coherent Index) 557 * 558 * LPA instruction output is put into PPN field. 559 * LCI (Load Coherence Index) instruction provides the "VI" bits. 560 * 561 * We pre-swap the bytes since PCX-W is Big Endian and the 562 * IOMMU uses little endian for the pdir. 563 */ 564 565 static void SBA_INLINE 566 sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, 567 unsigned long hint) 568 { 569 u64 pa; /* physical address */ 570 register unsigned ci; /* coherent index */ 571 572 pa = lpa(vba); 573 pa &= IOVP_MASK; 574 575 asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba)); 576 pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */ 577 578 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ 579 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 580 581 /* 582 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 583 * (bit #61, big endian), we have to flush and sync every time 584 * IO-PDIR is changed in Ike/Astro. 585 */ 586 asm_io_fdc(pdir_ptr); 587 } 588 589 590 /** 591 * sba_mark_invalid - invalidate one or more IO PDIR entries 592 * @ioc: IO MMU structure which owns the pdir we are interested in. 593 * @iova: IO Virtual Address mapped earlier 594 * @byte_cnt: number of bytes this mapping covers. 595 * 596 * Marking the IO PDIR entry(ies) as Invalid and invalidate 597 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) 598 * is to purge stale entries in the IO TLB when unmapping entries. 599 * 600 * The PCOM register supports purging of multiple pages, with a minium 601 * of 1 page and a maximum of 2GB. Hardware requires the address be 602 * aligned to the size of the range being purged. The size of the range 603 * must be a power of 2. The "Cool perf optimization" in the 604 * allocation routine helps keep that true. 605 */ 606 static SBA_INLINE void 607 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) 608 { 609 u32 iovp = (u32) SBA_IOVP(ioc,iova); 610 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; 611 612 #ifdef ASSERT_PDIR_SANITY 613 /* Assert first pdir entry is set. 614 ** 615 ** Even though this is a big-endian machine, the entries 616 ** in the iopdir are little endian. That's why we look at 617 ** the byte at +7 instead of at +0. 618 */ 619 if (0x80 != (((u8 *) pdir_ptr)[7])) { 620 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); 621 } 622 #endif 623 624 if (byte_cnt > IOVP_SIZE) 625 { 626 #if 0 627 unsigned long entries_per_cacheline = ioc_needs_fdc ? 628 L1_CACHE_ALIGN(((unsigned long) pdir_ptr)) 629 - (unsigned long) pdir_ptr; 630 : 262144; 631 #endif 632 633 /* set "size" field for PCOM */ 634 iovp |= get_order(byte_cnt) + PAGE_SHIFT; 635 636 do { 637 /* clear I/O Pdir entry "valid" bit first */ 638 ((u8 *) pdir_ptr)[7] = 0; 639 asm_io_fdc(pdir_ptr); 640 if (ioc_needs_fdc) { 641 #if 0 642 entries_per_cacheline = L1_CACHE_SHIFT - 3; 643 #endif 644 } 645 pdir_ptr++; 646 byte_cnt -= IOVP_SIZE; 647 } while (byte_cnt > IOVP_SIZE); 648 } else 649 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ 650 651 /* 652 ** clear I/O PDIR entry "valid" bit. 653 ** We have to R/M/W the cacheline regardless how much of the 654 ** pdir entry that we clobber. 655 ** The rest of the entry would be useful for debugging if we 656 ** could dump core on HPMC. 657 */ 658 ((u8 *) pdir_ptr)[7] = 0; 659 asm_io_fdc(pdir_ptr); 660 661 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); 662 } 663 664 /** 665 * sba_dma_supported - PCI driver can query DMA support 666 * @dev: instance of PCI owned by the driver that's asking 667 * @mask: number of address bits this PCI device can handle 668 * 669 * See Documentation/DMA-API-HOWTO.txt 670 */ 671 static int sba_dma_supported( struct device *dev, u64 mask) 672 { 673 struct ioc *ioc; 674 675 if (dev == NULL) { 676 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); 677 BUG(); 678 return(0); 679 } 680 681 /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit 682 * first, then fall back to 32-bit if that fails. 683 * We are just "encouraging" 32-bit DMA masks here since we can 684 * never allow IOMMU bypass unless we add special support for ZX1. 685 */ 686 if (mask > ~0U) 687 return 0; 688 689 ioc = GET_IOC(dev); 690 if (!ioc) 691 return 0; 692 693 /* 694 * check if mask is >= than the current max IO Virt Address 695 * The max IO Virt address will *always* < 30 bits. 696 */ 697 return((int)(mask >= (ioc->ibase - 1 + 698 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); 699 } 700 701 702 /** 703 * sba_map_single - map one buffer and return IOVA for DMA 704 * @dev: instance of PCI owned by the driver that's asking. 705 * @addr: driver buffer to map. 706 * @size: number of bytes to map in driver buffer. 707 * @direction: R/W or both. 708 * 709 * See Documentation/DMA-API-HOWTO.txt 710 */ 711 static dma_addr_t 712 sba_map_single(struct device *dev, void *addr, size_t size, 713 enum dma_data_direction direction) 714 { 715 struct ioc *ioc; 716 unsigned long flags; 717 dma_addr_t iovp; 718 dma_addr_t offset; 719 u64 *pdir_start; 720 int pide; 721 722 ioc = GET_IOC(dev); 723 if (!ioc) 724 return DMA_MAPPING_ERROR; 725 726 /* save offset bits */ 727 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; 728 729 /* round up to nearest IOVP_SIZE */ 730 size = (size + offset + ~IOVP_MASK) & IOVP_MASK; 731 732 spin_lock_irqsave(&ioc->res_lock, flags); 733 #ifdef ASSERT_PDIR_SANITY 734 sba_check_pdir(ioc,"Check before sba_map_single()"); 735 #endif 736 737 #ifdef SBA_COLLECT_STATS 738 ioc->msingle_calls++; 739 ioc->msingle_pages += size >> IOVP_SHIFT; 740 #endif 741 pide = sba_alloc_range(ioc, dev, size); 742 iovp = (dma_addr_t) pide << IOVP_SHIFT; 743 744 DBG_RUN("%s() 0x%p -> 0x%lx\n", 745 __func__, addr, (long) iovp | offset); 746 747 pdir_start = &(ioc->pdir_base[pide]); 748 749 while (size > 0) { 750 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0); 751 752 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n", 753 pdir_start, 754 (u8) (((u8 *) pdir_start)[7]), 755 (u8) (((u8 *) pdir_start)[6]), 756 (u8) (((u8 *) pdir_start)[5]), 757 (u8) (((u8 *) pdir_start)[4]), 758 (u8) (((u8 *) pdir_start)[3]), 759 (u8) (((u8 *) pdir_start)[2]), 760 (u8) (((u8 *) pdir_start)[1]), 761 (u8) (((u8 *) pdir_start)[0]) 762 ); 763 764 addr += IOVP_SIZE; 765 size -= IOVP_SIZE; 766 pdir_start++; 767 } 768 769 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 770 asm_io_sync(); 771 772 #ifdef ASSERT_PDIR_SANITY 773 sba_check_pdir(ioc,"Check after sba_map_single()"); 774 #endif 775 spin_unlock_irqrestore(&ioc->res_lock, flags); 776 777 /* form complete address */ 778 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); 779 } 780 781 782 static dma_addr_t 783 sba_map_page(struct device *dev, struct page *page, unsigned long offset, 784 size_t size, enum dma_data_direction direction, 785 unsigned long attrs) 786 { 787 return sba_map_single(dev, page_address(page) + offset, size, 788 direction); 789 } 790 791 792 /** 793 * sba_unmap_page - unmap one IOVA and free resources 794 * @dev: instance of PCI owned by the driver that's asking. 795 * @iova: IOVA of driver buffer previously mapped. 796 * @size: number of bytes mapped in driver buffer. 797 * @direction: R/W or both. 798 * 799 * See Documentation/DMA-API-HOWTO.txt 800 */ 801 static void 802 sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, 803 enum dma_data_direction direction, unsigned long attrs) 804 { 805 struct ioc *ioc; 806 #if DELAYED_RESOURCE_CNT > 0 807 struct sba_dma_pair *d; 808 #endif 809 unsigned long flags; 810 dma_addr_t offset; 811 812 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); 813 814 ioc = GET_IOC(dev); 815 if (!ioc) { 816 WARN_ON(!ioc); 817 return; 818 } 819 offset = iova & ~IOVP_MASK; 820 iova ^= offset; /* clear offset bits */ 821 size += offset; 822 size = ALIGN(size, IOVP_SIZE); 823 824 spin_lock_irqsave(&ioc->res_lock, flags); 825 826 #ifdef SBA_COLLECT_STATS 827 ioc->usingle_calls++; 828 ioc->usingle_pages += size >> IOVP_SHIFT; 829 #endif 830 831 sba_mark_invalid(ioc, iova, size); 832 833 #if DELAYED_RESOURCE_CNT > 0 834 /* Delaying when we re-use a IO Pdir entry reduces the number 835 * of MMIO reads needed to flush writes to the PCOM register. 836 */ 837 d = &(ioc->saved[ioc->saved_cnt]); 838 d->iova = iova; 839 d->size = size; 840 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { 841 int cnt = ioc->saved_cnt; 842 while (cnt--) { 843 sba_free_range(ioc, d->iova, d->size); 844 d--; 845 } 846 ioc->saved_cnt = 0; 847 848 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 849 } 850 #else /* DELAYED_RESOURCE_CNT == 0 */ 851 sba_free_range(ioc, iova, size); 852 853 /* If fdc's were issued, force fdc's to be visible now */ 854 asm_io_sync(); 855 856 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 857 #endif /* DELAYED_RESOURCE_CNT == 0 */ 858 859 spin_unlock_irqrestore(&ioc->res_lock, flags); 860 861 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. 862 ** For Astro based systems this isn't a big deal WRT performance. 863 ** As long as 2.4 kernels copyin/copyout data from/to userspace, 864 ** we don't need the syncdma. The issue here is I/O MMU cachelines 865 ** are *not* coherent in all cases. May be hwrev dependent. 866 ** Need to investigate more. 867 asm volatile("syncdma"); 868 */ 869 } 870 871 872 /** 873 * sba_alloc - allocate/map shared mem for DMA 874 * @hwdev: instance of PCI owned by the driver that's asking. 875 * @size: number of bytes mapped in driver buffer. 876 * @dma_handle: IOVA of new buffer. 877 * 878 * See Documentation/DMA-API-HOWTO.txt 879 */ 880 static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, 881 gfp_t gfp, unsigned long attrs) 882 { 883 void *ret; 884 885 if (!hwdev) { 886 /* only support PCI */ 887 *dma_handle = 0; 888 return NULL; 889 } 890 891 ret = (void *) __get_free_pages(gfp, get_order(size)); 892 893 if (ret) { 894 memset(ret, 0, size); 895 *dma_handle = sba_map_single(hwdev, ret, size, 0); 896 } 897 898 return ret; 899 } 900 901 902 /** 903 * sba_free - free/unmap shared mem for DMA 904 * @hwdev: instance of PCI owned by the driver that's asking. 905 * @size: number of bytes mapped in driver buffer. 906 * @vaddr: virtual address IOVA of "consistent" buffer. 907 * @dma_handler: IO virtual address of "consistent" buffer. 908 * 909 * See Documentation/DMA-API-HOWTO.txt 910 */ 911 static void 912 sba_free(struct device *hwdev, size_t size, void *vaddr, 913 dma_addr_t dma_handle, unsigned long attrs) 914 { 915 sba_unmap_page(hwdev, dma_handle, size, 0, 0); 916 free_pages((unsigned long) vaddr, get_order(size)); 917 } 918 919 920 /* 921 ** Since 0 is a valid pdir_base index value, can't use that 922 ** to determine if a value is valid or not. Use a flag to indicate 923 ** the SG list entry contains a valid pdir index. 924 */ 925 #define PIDE_FLAG 0x80000000UL 926 927 #ifdef SBA_COLLECT_STATS 928 #define IOMMU_MAP_STATS 929 #endif 930 #include "iommu-helpers.h" 931 932 #ifdef DEBUG_LARGE_SG_ENTRIES 933 int dump_run_sg = 0; 934 #endif 935 936 937 /** 938 * sba_map_sg - map Scatter/Gather list 939 * @dev: instance of PCI owned by the driver that's asking. 940 * @sglist: array of buffer/length pairs 941 * @nents: number of entries in list 942 * @direction: R/W or both. 943 * 944 * See Documentation/DMA-API-HOWTO.txt 945 */ 946 static int 947 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 948 enum dma_data_direction direction, unsigned long attrs) 949 { 950 struct ioc *ioc; 951 int coalesced, filled = 0; 952 unsigned long flags; 953 954 DBG_RUN_SG("%s() START %d entries\n", __func__, nents); 955 956 ioc = GET_IOC(dev); 957 if (!ioc) 958 return 0; 959 960 /* Fast path single entry scatterlists. */ 961 if (nents == 1) { 962 sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist), 963 sglist->length, direction); 964 sg_dma_len(sglist) = sglist->length; 965 return 1; 966 } 967 968 spin_lock_irqsave(&ioc->res_lock, flags); 969 970 #ifdef ASSERT_PDIR_SANITY 971 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 972 { 973 sba_dump_sg(ioc, sglist, nents); 974 panic("Check before sba_map_sg()"); 975 } 976 #endif 977 978 #ifdef SBA_COLLECT_STATS 979 ioc->msg_calls++; 980 #endif 981 982 /* 983 ** First coalesce the chunks and allocate I/O pdir space 984 ** 985 ** If this is one DMA stream, we can properly map using the 986 ** correct virtual address associated with each DMA page. 987 ** w/o this association, we wouldn't have coherent DMA! 988 ** Access to the virtual address is what forces a two pass algorithm. 989 */ 990 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range); 991 992 /* 993 ** Program the I/O Pdir 994 ** 995 ** map the virtual addresses to the I/O Pdir 996 ** o dma_address will contain the pdir index 997 ** o dma_len will contain the number of bytes to map 998 ** o address contains the virtual address. 999 */ 1000 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); 1001 1002 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 1003 asm_io_sync(); 1004 1005 #ifdef ASSERT_PDIR_SANITY 1006 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 1007 { 1008 sba_dump_sg(ioc, sglist, nents); 1009 panic("Check after sba_map_sg()\n"); 1010 } 1011 #endif 1012 1013 spin_unlock_irqrestore(&ioc->res_lock, flags); 1014 1015 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled); 1016 1017 return filled; 1018 } 1019 1020 1021 /** 1022 * sba_unmap_sg - unmap Scatter/Gather list 1023 * @dev: instance of PCI owned by the driver that's asking. 1024 * @sglist: array of buffer/length pairs 1025 * @nents: number of entries in list 1026 * @direction: R/W or both. 1027 * 1028 * See Documentation/DMA-API-HOWTO.txt 1029 */ 1030 static void 1031 sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 1032 enum dma_data_direction direction, unsigned long attrs) 1033 { 1034 struct ioc *ioc; 1035 #ifdef ASSERT_PDIR_SANITY 1036 unsigned long flags; 1037 #endif 1038 1039 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1040 __func__, nents, sg_virt(sglist), sglist->length); 1041 1042 ioc = GET_IOC(dev); 1043 if (!ioc) { 1044 WARN_ON(!ioc); 1045 return; 1046 } 1047 1048 #ifdef SBA_COLLECT_STATS 1049 ioc->usg_calls++; 1050 #endif 1051 1052 #ifdef ASSERT_PDIR_SANITY 1053 spin_lock_irqsave(&ioc->res_lock, flags); 1054 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1055 spin_unlock_irqrestore(&ioc->res_lock, flags); 1056 #endif 1057 1058 while (sg_dma_len(sglist) && nents--) { 1059 1060 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), 1061 direction, 0); 1062 #ifdef SBA_COLLECT_STATS 1063 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; 1064 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ 1065 #endif 1066 ++sglist; 1067 } 1068 1069 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); 1070 1071 #ifdef ASSERT_PDIR_SANITY 1072 spin_lock_irqsave(&ioc->res_lock, flags); 1073 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1074 spin_unlock_irqrestore(&ioc->res_lock, flags); 1075 #endif 1076 1077 } 1078 1079 static const struct dma_map_ops sba_ops = { 1080 .dma_supported = sba_dma_supported, 1081 .alloc = sba_alloc, 1082 .free = sba_free, 1083 .map_page = sba_map_page, 1084 .unmap_page = sba_unmap_page, 1085 .map_sg = sba_map_sg, 1086 .unmap_sg = sba_unmap_sg, 1087 }; 1088 1089 1090 /************************************************************************** 1091 ** 1092 ** SBA PAT PDC support 1093 ** 1094 ** o call pdc_pat_cell_module() 1095 ** o store ranges in PCI "resource" structures 1096 ** 1097 **************************************************************************/ 1098 1099 static void 1100 sba_get_pat_resources(struct sba_device *sba_dev) 1101 { 1102 #if 0 1103 /* 1104 ** TODO/REVISIT/FIXME: support for directed ranges requires calls to 1105 ** PAT PDC to program the SBA/LBA directed range registers...this 1106 ** burden may fall on the LBA code since it directly supports the 1107 ** PCI subsystem. It's not clear yet. - ggg 1108 */ 1109 PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp); 1110 FIXME : ??? 1111 PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp); 1112 Tells where the dvi bits are located in the address. 1113 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp); 1114 FIXME : ??? 1115 #endif 1116 } 1117 1118 1119 /************************************************************** 1120 * 1121 * Initialization and claim 1122 * 1123 ***************************************************************/ 1124 #define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */ 1125 #define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */ 1126 static void * 1127 sba_alloc_pdir(unsigned int pdir_size) 1128 { 1129 unsigned long pdir_base; 1130 unsigned long pdir_order = get_order(pdir_size); 1131 1132 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); 1133 if (NULL == (void *) pdir_base) { 1134 panic("%s() could not allocate I/O Page Table\n", 1135 __func__); 1136 } 1137 1138 /* If this is not PA8700 (PCX-W2) 1139 ** OR newer than ver 2.2 1140 ** OR in a system that doesn't need VINDEX bits from SBA, 1141 ** 1142 ** then we aren't exposed to the HW bug. 1143 */ 1144 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 1145 || (boot_cpu_data.pdc.versions > 0x202) 1146 || (boot_cpu_data.pdc.capabilities & 0x08L) ) 1147 return (void *) pdir_base; 1148 1149 /* 1150 * PA8700 (PCX-W2, aka piranha) silent data corruption fix 1151 * 1152 * An interaction between PA8700 CPU (Ver 2.2 or older) and 1153 * Ike/Astro can cause silent data corruption. This is only 1154 * a problem if the I/O PDIR is located in memory such that 1155 * (little-endian) bits 17 and 18 are on and bit 20 is off. 1156 * 1157 * Since the max IO Pdir size is 2MB, by cleverly allocating the 1158 * right physical address, we can either avoid (IOPDIR <= 1MB) 1159 * or minimize (2MB IO Pdir) the problem if we restrict the 1160 * IO Pdir to a maximum size of 2MB-128K (1902K). 1161 * 1162 * Because we always allocate 2^N sized IO pdirs, either of the 1163 * "bad" regions will be the last 128K if at all. That's easy 1164 * to test for. 1165 * 1166 */ 1167 if (pdir_order <= (19-12)) { 1168 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) { 1169 /* allocate a new one on 512k alignment */ 1170 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12)); 1171 /* release original */ 1172 free_pages(pdir_base, pdir_order); 1173 1174 pdir_base = new_pdir; 1175 1176 /* release excess */ 1177 while (pdir_order < (19-12)) { 1178 new_pdir += pdir_size; 1179 free_pages(new_pdir, pdir_order); 1180 pdir_order +=1; 1181 pdir_size <<=1; 1182 } 1183 } 1184 } else { 1185 /* 1186 ** 1MB or 2MB Pdir 1187 ** Needs to be aligned on an "odd" 1MB boundary. 1188 */ 1189 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */ 1190 1191 /* release original */ 1192 free_pages( pdir_base, pdir_order); 1193 1194 /* release first 1MB */ 1195 free_pages(new_pdir, 20-12); 1196 1197 pdir_base = new_pdir + 1024*1024; 1198 1199 if (pdir_order > (20-12)) { 1200 /* 1201 ** 2MB Pdir. 1202 ** 1203 ** Flag tells init_bitmap() to mark bad 128k as used 1204 ** and to reduce the size by 128k. 1205 */ 1206 piranha_bad_128k = 1; 1207 1208 new_pdir += 3*1024*1024; 1209 /* release last 1MB */ 1210 free_pages(new_pdir, 20-12); 1211 1212 /* release unusable 128KB */ 1213 free_pages(new_pdir - 128*1024 , 17-12); 1214 1215 pdir_size -= 128*1024; 1216 } 1217 } 1218 1219 memset((void *) pdir_base, 0, pdir_size); 1220 return (void *) pdir_base; 1221 } 1222 1223 struct ibase_data_struct { 1224 struct ioc *ioc; 1225 int ioc_num; 1226 }; 1227 1228 static int setup_ibase_imask_callback(struct device *dev, void *data) 1229 { 1230 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */ 1231 extern void lba_set_iregs(struct parisc_device *, u32, u32); 1232 struct parisc_device *lba = to_parisc_device(dev); 1233 struct ibase_data_struct *ibd = data; 1234 int rope_num = (lba->hpa.start >> 13) & 0xf; 1235 if (rope_num >> 3 == ibd->ioc_num) 1236 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask); 1237 return 0; 1238 } 1239 1240 /* setup Mercury or Elroy IBASE/IMASK registers. */ 1241 static void 1242 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1243 { 1244 struct ibase_data_struct ibase_data = { 1245 .ioc = ioc, 1246 .ioc_num = ioc_num, 1247 }; 1248 1249 device_for_each_child(&sba->dev, &ibase_data, 1250 setup_ibase_imask_callback); 1251 } 1252 1253 #ifdef SBA_AGP_SUPPORT 1254 static int 1255 sba_ioc_find_quicksilver(struct device *dev, void *data) 1256 { 1257 int *agp_found = data; 1258 struct parisc_device *lba = to_parisc_device(dev); 1259 1260 if (IS_QUICKSILVER(lba)) 1261 *agp_found = 1; 1262 return 0; 1263 } 1264 #endif 1265 1266 static void 1267 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1268 { 1269 u32 iova_space_mask; 1270 u32 iova_space_size; 1271 int iov_order, tcnfg; 1272 #ifdef SBA_AGP_SUPPORT 1273 int agp_found = 0; 1274 #endif 1275 /* 1276 ** Firmware programs the base and size of a "safe IOVA space" 1277 ** (one that doesn't overlap memory or LMMIO space) in the 1278 ** IBASE and IMASK registers. 1279 */ 1280 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); 1281 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; 1282 1283 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { 1284 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n"); 1285 iova_space_size /= 2; 1286 } 1287 1288 /* 1289 ** iov_order is always based on a 1GB IOVA space since we want to 1290 ** turn on the other half for AGP GART. 1291 */ 1292 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT)); 1293 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); 1294 1295 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n", 1296 __func__, ioc->ioc_hpa, iova_space_size >> 20, 1297 iov_order + PAGE_SHIFT); 1298 1299 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, 1300 get_order(ioc->pdir_size)); 1301 if (!ioc->pdir_base) 1302 panic("Couldn't allocate I/O Page Table\n"); 1303 1304 memset(ioc->pdir_base, 0, ioc->pdir_size); 1305 1306 DBG_INIT("%s() pdir %p size %x\n", 1307 __func__, ioc->pdir_base, ioc->pdir_size); 1308 1309 #ifdef SBA_HINT_SUPPORT 1310 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1311 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1312 1313 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1314 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1315 #endif 1316 1317 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base); 1318 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1319 1320 /* build IMASK for IOC and Elroy */ 1321 iova_space_mask = 0xffffffff; 1322 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1323 ioc->imask = iova_space_mask; 1324 #ifdef ZX1_SUPPORT 1325 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1326 #endif 1327 sba_dump_tlb(ioc->ioc_hpa); 1328 1329 setup_ibase_imask(sba, ioc, ioc_num); 1330 1331 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); 1332 1333 #ifdef CONFIG_64BIT 1334 /* 1335 ** Setting the upper bits makes checking for bypass addresses 1336 ** a little faster later on. 1337 */ 1338 ioc->imask |= 0xFFFFFFFF00000000UL; 1339 #endif 1340 1341 /* Set I/O PDIR Page size to system page size */ 1342 switch (PAGE_SHIFT) { 1343 case 12: tcnfg = 0; break; /* 4K */ 1344 case 13: tcnfg = 1; break; /* 8K */ 1345 case 14: tcnfg = 2; break; /* 16K */ 1346 case 16: tcnfg = 3; break; /* 64K */ 1347 default: 1348 panic(__FILE__ "Unsupported system page size %d", 1349 1 << PAGE_SHIFT); 1350 break; 1351 } 1352 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); 1353 1354 /* 1355 ** Program the IOC's ibase and enable IOVA translation 1356 ** Bit zero == enable bit. 1357 */ 1358 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); 1359 1360 /* 1361 ** Clear I/O TLB of any possible entries. 1362 ** (Yes. This is a bit paranoid...but so what) 1363 */ 1364 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); 1365 1366 #ifdef SBA_AGP_SUPPORT 1367 1368 /* 1369 ** If an AGP device is present, only use half of the IOV space 1370 ** for PCI DMA. Unfortunately we can't know ahead of time 1371 ** whether GART support will actually be used, for now we 1372 ** can just key on any AGP device found in the system. 1373 ** We program the next pdir index after we stop w/ a key for 1374 ** the GART code to handshake on. 1375 */ 1376 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver); 1377 1378 if (agp_found && sba_reserve_agpgart) { 1379 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n", 1380 __func__, (iova_space_size/2) >> 20); 1381 ioc->pdir_size /= 2; 1382 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE; 1383 } 1384 #endif /*SBA_AGP_SUPPORT*/ 1385 } 1386 1387 static void 1388 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1389 { 1390 u32 iova_space_size, iova_space_mask; 1391 unsigned int pdir_size, iov_order, tcnfg; 1392 1393 /* 1394 ** Determine IOVA Space size from memory size. 1395 ** 1396 ** Ideally, PCI drivers would register the maximum number 1397 ** of DMA they can have outstanding for each device they 1398 ** own. Next best thing would be to guess how much DMA 1399 ** can be outstanding based on PCI Class/sub-class. Both 1400 ** methods still require some "extra" to support PCI 1401 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1402 ** 1403 ** While we have 32-bits "IOVA" space, top two 2 bits are used 1404 ** for DMA hints - ergo only 30 bits max. 1405 */ 1406 1407 iova_space_size = (u32) (totalram_pages()/global_ioc_cnt); 1408 1409 /* limit IOVA space size to 1MB-1GB */ 1410 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { 1411 iova_space_size = 1 << (20 - PAGE_SHIFT); 1412 } 1413 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { 1414 iova_space_size = 1 << (30 - PAGE_SHIFT); 1415 } 1416 1417 /* 1418 ** iova space must be log2() in size. 1419 ** thus, pdir/res_map will also be log2(). 1420 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced) 1421 */ 1422 iov_order = get_order(iova_space_size << PAGE_SHIFT); 1423 1424 /* iova_space_size is now bytes, not pages */ 1425 iova_space_size = 1 << (iov_order + PAGE_SHIFT); 1426 1427 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); 1428 1429 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1430 __func__, 1431 ioc->ioc_hpa, 1432 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT), 1433 iova_space_size>>20, 1434 iov_order + PAGE_SHIFT); 1435 1436 ioc->pdir_base = sba_alloc_pdir(pdir_size); 1437 1438 DBG_INIT("%s() pdir %p size %x\n", 1439 __func__, ioc->pdir_base, pdir_size); 1440 1441 #ifdef SBA_HINT_SUPPORT 1442 /* FIXME : DMA HINTs not used */ 1443 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1444 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1445 1446 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1447 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1448 #endif 1449 1450 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1451 1452 /* build IMASK for IOC and Elroy */ 1453 iova_space_mask = 0xffffffff; 1454 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1455 1456 /* 1457 ** On C3000 w/512MB mem, HP-UX 10.20 reports: 1458 ** ibase=0, imask=0xFE000000, size=0x2000000. 1459 */ 1460 ioc->ibase = 0; 1461 ioc->imask = iova_space_mask; /* save it */ 1462 #ifdef ZX1_SUPPORT 1463 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1464 #endif 1465 1466 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", 1467 __func__, ioc->ibase, ioc->imask); 1468 1469 /* 1470 ** FIXME: Hint registers are programmed with default hint 1471 ** values during boot, so hints should be sane even if we 1472 ** can't reprogram them the way drivers want. 1473 */ 1474 1475 setup_ibase_imask(sba, ioc, ioc_num); 1476 1477 /* 1478 ** Program the IOC's ibase and enable IOVA translation 1479 */ 1480 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); 1481 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); 1482 1483 /* Set I/O PDIR Page size to system page size */ 1484 switch (PAGE_SHIFT) { 1485 case 12: tcnfg = 0; break; /* 4K */ 1486 case 13: tcnfg = 1; break; /* 8K */ 1487 case 14: tcnfg = 2; break; /* 16K */ 1488 case 16: tcnfg = 3; break; /* 64K */ 1489 default: 1490 panic(__FILE__ "Unsupported system page size %d", 1491 1 << PAGE_SHIFT); 1492 break; 1493 } 1494 /* Set I/O PDIR Page size to PAGE_SIZE (4k/16k/...) */ 1495 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG); 1496 1497 /* 1498 ** Clear I/O TLB of any possible entries. 1499 ** (Yes. This is a bit paranoid...but so what) 1500 */ 1501 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); 1502 1503 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ 1504 1505 DBG_INIT("%s() DONE\n", __func__); 1506 } 1507 1508 1509 1510 /************************************************************************** 1511 ** 1512 ** SBA initialization code (HW and SW) 1513 ** 1514 ** o identify SBA chip itself 1515 ** o initialize SBA chip modes (HardFail) 1516 ** o initialize SBA chip modes (HardFail) 1517 ** o FIXME: initialize DMA hints for reasonable defaults 1518 ** 1519 **************************************************************************/ 1520 1521 static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset) 1522 { 1523 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); 1524 } 1525 1526 static void sba_hw_init(struct sba_device *sba_dev) 1527 { 1528 int i; 1529 int num_ioc; 1530 u64 ioc_ctl; 1531 1532 if (!is_pdc_pat()) { 1533 /* Shutdown the USB controller on Astro-based workstations. 1534 ** Once we reprogram the IOMMU, the next DMA performed by 1535 ** USB will HPMC the box. USB is only enabled if a 1536 ** keyboard is present and found. 1537 ** 1538 ** With serial console, j6k v5.0 firmware says: 1539 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7 1540 ** 1541 ** FIXME: Using GFX+USB console at power up but direct 1542 ** linux to serial console is still broken. 1543 ** USB could generate DMA so we must reset USB. 1544 ** The proper sequence would be: 1545 ** o block console output 1546 ** o reset USB device 1547 ** o reprogram serial port 1548 ** o unblock console output 1549 */ 1550 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) { 1551 pdc_io_reset_devices(); 1552 } 1553 1554 } 1555 1556 1557 #if 0 1558 printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, 1559 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class); 1560 1561 /* 1562 ** Need to deal with DMA from LAN. 1563 ** Maybe use page zero boot device as a handle to talk 1564 ** to PDC about which device to shutdown. 1565 ** 1566 ** Netbooting, j6k v5.0 firmware says: 1567 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002 1568 ** ARGH! invalid class. 1569 */ 1570 if ((PAGE0->mem_boot.cl_class != CL_RANDOM) 1571 && (PAGE0->mem_boot.cl_class != CL_SEQU)) { 1572 pdc_io_reset(); 1573 } 1574 #endif 1575 1576 if (!IS_PLUTO(sba_dev->dev)) { 1577 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1578 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1579 __func__, sba_dev->sba_hpa, ioc_ctl); 1580 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); 1581 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; 1582 /* j6700 v1.6 firmware sets 0x294f */ 1583 /* A500 firmware sets 0x4d */ 1584 1585 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL); 1586 1587 #ifdef DEBUG_SBA_INIT 1588 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL); 1589 DBG_INIT(" 0x%Lx\n", ioc_ctl); 1590 #endif 1591 } /* if !PLUTO */ 1592 1593 if (IS_ASTRO(sba_dev->dev)) { 1594 int err; 1595 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); 1596 num_ioc = 1; 1597 1598 sba_dev->chip_resv.name = "Astro Intr Ack"; 1599 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL; 1600 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ; 1601 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1602 BUG_ON(err < 0); 1603 1604 } else if (IS_PLUTO(sba_dev->dev)) { 1605 int err; 1606 1607 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); 1608 num_ioc = 1; 1609 1610 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA"; 1611 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL; 1612 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1); 1613 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1614 WARN_ON(err < 0); 1615 1616 sba_dev->iommu_resv.name = "IOVA Space"; 1617 sba_dev->iommu_resv.start = 0x40000000UL; 1618 sba_dev->iommu_resv.end = 0x50000000UL - 1; 1619 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv)); 1620 WARN_ON(err < 0); 1621 } else { 1622 /* IKE, REO */ 1623 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); 1624 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); 1625 num_ioc = 2; 1626 1627 /* TODO - LOOKUP Ike/Stretch chipset mem map */ 1628 } 1629 /* XXX: What about Reo Grande? */ 1630 1631 sba_dev->num_ioc = num_ioc; 1632 for (i = 0; i < num_ioc; i++) { 1633 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa; 1634 unsigned int j; 1635 1636 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) { 1637 1638 /* 1639 * Clear ROPE(N)_CONFIG AO bit. 1640 * Disables "NT Ordering" (~= !"Relaxed Ordering") 1641 * Overrides bit 1 in DMA Hint Sets. 1642 * Improves netperf UDP_STREAM by ~10% for bcm5701. 1643 */ 1644 if (IS_PLUTO(sba_dev->dev)) { 1645 void __iomem *rope_cfg; 1646 unsigned long cfg_val; 1647 1648 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j; 1649 cfg_val = READ_REG(rope_cfg); 1650 cfg_val &= ~IOC_ROPE_AO; 1651 WRITE_REG(cfg_val, rope_cfg); 1652 } 1653 1654 /* 1655 ** Make sure the box crashes on rope errors. 1656 */ 1657 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j); 1658 } 1659 1660 /* flush out the last writes */ 1661 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); 1662 1663 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n", 1664 i, 1665 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), 1666 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) 1667 ); 1668 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n", 1669 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), 1670 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) 1671 ); 1672 1673 if (IS_PLUTO(sba_dev->dev)) { 1674 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); 1675 } else { 1676 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); 1677 } 1678 } 1679 } 1680 1681 static void 1682 sba_common_init(struct sba_device *sba_dev) 1683 { 1684 int i; 1685 1686 /* add this one to the head of the list (order doesn't matter) 1687 ** This will be useful for debugging - especially if we get coredumps 1688 */ 1689 sba_dev->next = sba_list; 1690 sba_list = sba_dev; 1691 1692 for(i=0; i< sba_dev->num_ioc; i++) { 1693 int res_size; 1694 #ifdef DEBUG_DMB_TRAP 1695 extern void iterate_pages(unsigned long , unsigned long , 1696 void (*)(pte_t * , unsigned long), 1697 unsigned long ); 1698 void set_data_memory_break(pte_t * , unsigned long); 1699 #endif 1700 /* resource map size dictated by pdir_size */ 1701 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ 1702 1703 /* Second part of PIRANHA BUG */ 1704 if (piranha_bad_128k) { 1705 res_size -= (128*1024)/sizeof(u64); 1706 } 1707 1708 res_size >>= 3; /* convert bit count to byte count */ 1709 DBG_INIT("%s() res_size 0x%x\n", 1710 __func__, res_size); 1711 1712 sba_dev->ioc[i].res_size = res_size; 1713 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); 1714 1715 #ifdef DEBUG_DMB_TRAP 1716 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1717 set_data_memory_break, 0); 1718 #endif 1719 1720 if (NULL == sba_dev->ioc[i].res_map) 1721 { 1722 panic("%s:%s() could not allocate resource map\n", 1723 __FILE__, __func__ ); 1724 } 1725 1726 memset(sba_dev->ioc[i].res_map, 0, res_size); 1727 /* next available IOVP - circular search */ 1728 sba_dev->ioc[i].res_hint = (unsigned long *) 1729 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); 1730 1731 #ifdef ASSERT_PDIR_SANITY 1732 /* Mark first bit busy - ie no IOVA 0 */ 1733 sba_dev->ioc[i].res_map[0] = 0x80; 1734 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; 1735 #endif 1736 1737 /* Third (and last) part of PIRANHA BUG */ 1738 if (piranha_bad_128k) { 1739 /* region from +1408K to +1536 is un-usable. */ 1740 1741 int idx_start = (1408*1024/sizeof(u64)) >> 3; 1742 int idx_end = (1536*1024/sizeof(u64)) >> 3; 1743 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); 1744 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); 1745 1746 /* mark that part of the io pdir busy */ 1747 while (p_start < p_end) 1748 *p_start++ = -1; 1749 1750 } 1751 1752 #ifdef DEBUG_DMB_TRAP 1753 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1754 set_data_memory_break, 0); 1755 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, 1756 set_data_memory_break, 0); 1757 #endif 1758 1759 DBG_INIT("%s() %d res_map %x %p\n", 1760 __func__, i, res_size, sba_dev->ioc[i].res_map); 1761 } 1762 1763 spin_lock_init(&sba_dev->sba_lock); 1764 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC; 1765 1766 #ifdef DEBUG_SBA_INIT 1767 /* 1768 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 1769 * (bit #61, big endian), we have to flush and sync every time 1770 * IO-PDIR is changed in Ike/Astro. 1771 */ 1772 if (ioc_needs_fdc) { 1773 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n"); 1774 } else { 1775 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n"); 1776 } 1777 #endif 1778 } 1779 1780 #ifdef CONFIG_PROC_FS 1781 static int sba_proc_info(struct seq_file *m, void *p) 1782 { 1783 struct sba_device *sba_dev = sba_list; 1784 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ 1785 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ 1786 #ifdef SBA_COLLECT_STATS 1787 unsigned long avg = 0, min, max; 1788 #endif 1789 int i; 1790 1791 seq_printf(m, "%s rev %d.%d\n", 1792 sba_dev->name, 1793 (sba_dev->hw_rev & 0x7) + 1, 1794 (sba_dev->hw_rev & 0x18) >> 3); 1795 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", 1796 (int)((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ 1797 total_pages); 1798 1799 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", 1800 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ 1801 1802 seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n", 1803 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE), 1804 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK), 1805 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)); 1806 1807 for (i=0; i<4; i++) 1808 seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", 1809 i, 1810 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18), 1811 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18), 1812 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)); 1813 1814 #ifdef SBA_COLLECT_STATS 1815 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", 1816 total_pages - ioc->used_pages, ioc->used_pages, 1817 (int)(ioc->used_pages * 100 / total_pages)); 1818 1819 min = max = ioc->avg_search[0]; 1820 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { 1821 avg += ioc->avg_search[i]; 1822 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; 1823 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; 1824 } 1825 avg /= SBA_SEARCH_SAMPLE; 1826 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", 1827 min, avg, max); 1828 1829 seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", 1830 ioc->msingle_calls, ioc->msingle_pages, 1831 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1832 1833 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ 1834 min = ioc->usingle_calls; 1835 max = ioc->usingle_pages - ioc->usg_pages; 1836 seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", 1837 min, max, (int)((max * 1000)/min)); 1838 1839 seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1840 ioc->msg_calls, ioc->msg_pages, 1841 (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); 1842 1843 seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1844 ioc->usg_calls, ioc->usg_pages, 1845 (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); 1846 #endif 1847 1848 return 0; 1849 } 1850 1851 static int 1852 sba_proc_bitmap_info(struct seq_file *m, void *p) 1853 { 1854 struct sba_device *sba_dev = sba_list; 1855 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ 1856 1857 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map, 1858 ioc->res_size, false); 1859 seq_putc(m, '\n'); 1860 1861 return 0; 1862 } 1863 #endif /* CONFIG_PROC_FS */ 1864 1865 static const struct parisc_device_id sba_tbl[] __initconst = { 1866 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb }, 1867 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc }, 1868 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc }, 1869 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc }, 1870 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc }, 1871 { 0, } 1872 }; 1873 1874 static int sba_driver_callback(struct parisc_device *); 1875 1876 static struct parisc_driver sba_driver __refdata = { 1877 .name = MODULE_NAME, 1878 .id_table = sba_tbl, 1879 .probe = sba_driver_callback, 1880 }; 1881 1882 /* 1883 ** Determine if sba should claim this chip (return 0) or not (return 1). 1884 ** If so, initialize the chip and tell other partners in crime they 1885 ** have work to do. 1886 */ 1887 static int __init sba_driver_callback(struct parisc_device *dev) 1888 { 1889 struct sba_device *sba_dev; 1890 u32 func_class; 1891 int i; 1892 char *version; 1893 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); 1894 #ifdef CONFIG_PROC_FS 1895 struct proc_dir_entry *root; 1896 #endif 1897 1898 sba_dump_ranges(sba_addr); 1899 1900 /* Read HW Rev First */ 1901 func_class = READ_REG(sba_addr + SBA_FCLASS); 1902 1903 if (IS_ASTRO(dev)) { 1904 unsigned long fclass; 1905 static char astro_rev[]="Astro ?.?"; 1906 1907 /* Astro is broken...Read HW Rev First */ 1908 fclass = READ_REG(sba_addr); 1909 1910 astro_rev[6] = '1' + (char) (fclass & 0x7); 1911 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); 1912 version = astro_rev; 1913 1914 } else if (IS_IKE(dev)) { 1915 static char ike_rev[] = "Ike rev ?"; 1916 ike_rev[8] = '0' + (char) (func_class & 0xff); 1917 version = ike_rev; 1918 } else if (IS_PLUTO(dev)) { 1919 static char pluto_rev[]="Pluto ?.?"; 1920 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); 1921 pluto_rev[8] = '0' + (char) (func_class & 0x0f); 1922 version = pluto_rev; 1923 } else { 1924 static char reo_rev[] = "REO rev ?"; 1925 reo_rev[8] = '0' + (char) (func_class & 0xff); 1926 version = reo_rev; 1927 } 1928 1929 if (!global_ioc_cnt) { 1930 global_ioc_cnt = count_parisc_driver(&sba_driver); 1931 1932 /* Astro and Pluto have one IOC per SBA */ 1933 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev))) 1934 global_ioc_cnt *= 2; 1935 } 1936 1937 printk(KERN_INFO "%s found %s at 0x%llx\n", 1938 MODULE_NAME, version, (unsigned long long)dev->hpa.start); 1939 1940 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL); 1941 if (!sba_dev) { 1942 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n"); 1943 return -ENOMEM; 1944 } 1945 1946 parisc_set_drvdata(dev, sba_dev); 1947 1948 for(i=0; i<MAX_IOC; i++) 1949 spin_lock_init(&(sba_dev->ioc[i].res_lock)); 1950 1951 sba_dev->dev = dev; 1952 sba_dev->hw_rev = func_class; 1953 sba_dev->name = dev->name; 1954 sba_dev->sba_hpa = sba_addr; 1955 1956 sba_get_pat_resources(sba_dev); 1957 sba_hw_init(sba_dev); 1958 sba_common_init(sba_dev); 1959 1960 hppa_dma_ops = &sba_ops; 1961 1962 #ifdef CONFIG_PROC_FS 1963 switch (dev->id.hversion) { 1964 case PLUTO_MCKINLEY_PORT: 1965 root = proc_mckinley_root; 1966 break; 1967 case ASTRO_RUNWAY_PORT: 1968 case IKE_MERCED_PORT: 1969 default: 1970 root = proc_runway_root; 1971 break; 1972 } 1973 1974 proc_create_single("sba_iommu", 0, root, sba_proc_info); 1975 proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info); 1976 #endif 1977 return 0; 1978 } 1979 1980 /* 1981 ** One time initialization to let the world know the SBA was found. 1982 ** This is the only routine which is NOT static. 1983 ** Must be called exactly once before pci_init(). 1984 */ 1985 void __init sba_init(void) 1986 { 1987 register_parisc_driver(&sba_driver); 1988 } 1989 1990 1991 /** 1992 * sba_get_iommu - Assign the iommu pointer for the pci bus controller. 1993 * @dev: The parisc device. 1994 * 1995 * Returns the appropriate IOMMU data for the given parisc PCI controller. 1996 * This is cached and used later for PCI DMA Mapping. 1997 */ 1998 void * sba_get_iommu(struct parisc_device *pci_hba) 1999 { 2000 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2001 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); 2002 char t = sba_dev->id.hw_type; 2003 int iocnum = (pci_hba->hw_path >> 3); /* rope # */ 2004 2005 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT)); 2006 2007 return &(sba->ioc[iocnum]); 2008 } 2009 2010 2011 /** 2012 * sba_directed_lmmio - return first directed LMMIO range routed to rope 2013 * @pa_dev: The parisc device. 2014 * @r: resource PCI host controller wants start/end fields assigned. 2015 * 2016 * For the given parisc PCI controller, determine if any direct ranges 2017 * are routed down the corresponding rope. 2018 */ 2019 void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r) 2020 { 2021 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2022 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); 2023 char t = sba_dev->id.hw_type; 2024 int i; 2025 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2026 2027 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 2028 2029 r->start = r->end = 0; 2030 2031 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */ 2032 for (i=0; i<4; i++) { 2033 int base, size; 2034 void __iomem *reg = sba->sba_hpa + i*0x18; 2035 2036 base = READ_REG32(reg + LMMIO_DIRECT0_BASE); 2037 if ((base & 1) == 0) 2038 continue; /* not enabled */ 2039 2040 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE); 2041 2042 if ((size & (ROPES_PER_IOC-1)) != rope) 2043 continue; /* directed down different rope */ 2044 2045 r->start = (base & ~1UL) | PCI_F_EXTEND; 2046 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK); 2047 r->end = r->start + size; 2048 r->flags = IORESOURCE_MEM; 2049 } 2050 } 2051 2052 2053 /** 2054 * sba_distributed_lmmio - return portion of distributed LMMIO range 2055 * @pa_dev: The parisc device. 2056 * @r: resource PCI host controller wants start/end fields assigned. 2057 * 2058 * For the given parisc PCI controller, return portion of distributed LMMIO 2059 * range. The distributed LMMIO is always present and it's just a question 2060 * of the base address and size of the range. 2061 */ 2062 void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r ) 2063 { 2064 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2065 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); 2066 char t = sba_dev->id.hw_type; 2067 int base, size; 2068 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2069 2070 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 2071 2072 r->start = r->end = 0; 2073 2074 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE); 2075 if ((base & 1) == 0) { 2076 BUG(); /* Gah! Distr Range wasn't enabled! */ 2077 return; 2078 } 2079 2080 r->start = (base & ~1UL) | PCI_F_EXTEND; 2081 2082 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; 2083 r->start += rope * (size + 1); /* adjust base for this rope */ 2084 r->end = r->start + size; 2085 r->flags = IORESOURCE_MEM; 2086 } 2087