1 /* 2 * PS3 address space management. 3 * 4 * Copyright (C) 2006 Sony Computer Entertainment Inc. 5 * Copyright 2006 Sony Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/memory_hotplug.h> 24 #include <linux/lmb.h> 25 26 #include <asm/firmware.h> 27 #include <asm/prom.h> 28 #include <asm/udbg.h> 29 #include <asm/lv1call.h> 30 31 #include "platform.h" 32 33 #if defined(DEBUG) 34 #define DBG udbg_printf 35 #else 36 #define DBG pr_debug 37 #endif 38 39 enum { 40 #if defined(CONFIG_PS3_DYNAMIC_DMA) 41 USE_DYNAMIC_DMA = 1, 42 #else 43 USE_DYNAMIC_DMA = 0, 44 #endif 45 }; 46 47 enum { 48 PAGE_SHIFT_4K = 12U, 49 PAGE_SHIFT_64K = 16U, 50 PAGE_SHIFT_16M = 24U, 51 }; 52 53 static unsigned long make_page_sizes(unsigned long a, unsigned long b) 54 { 55 return (a << 56) | (b << 48); 56 } 57 58 enum { 59 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04, 60 ALLOCATE_MEMORY_ADDR_ZERO = 0X08, 61 }; 62 63 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */ 64 65 enum { 66 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */ 67 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */ 68 }; 69 70 /*============================================================================*/ 71 /* virtual address space routines */ 72 /*============================================================================*/ 73 74 /** 75 * struct mem_region - memory region structure 76 * @base: base address 77 * @size: size in bytes 78 * @offset: difference between base and rm.size 79 */ 80 81 struct mem_region { 82 unsigned long base; 83 unsigned long size; 84 unsigned long offset; 85 }; 86 87 /** 88 * struct map - address space state variables holder 89 * @total: total memory available as reported by HV 90 * @vas_id - HV virtual address space id 91 * @htab_size: htab size in bytes 92 * 93 * The HV virtual address space (vas) allows for hotplug memory regions. 94 * Memory regions can be created and destroyed in the vas at runtime. 95 * @rm: real mode (bootmem) region 96 * @r1: hotplug memory region(s) 97 * 98 * ps3 addresses 99 * virt_addr: a cpu 'translated' effective address 100 * phys_addr: an address in what Linux thinks is the physical address space 101 * lpar_addr: an address in the HV virtual address space 102 * bus_addr: an io controller 'translated' address on a device bus 103 */ 104 105 struct map { 106 unsigned long total; 107 unsigned long vas_id; 108 unsigned long htab_size; 109 struct mem_region rm; 110 struct mem_region r1; 111 }; 112 113 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__) 114 static void __maybe_unused _debug_dump_map(const struct map *m, 115 const char *func, int line) 116 { 117 DBG("%s:%d: map.total = %lxh\n", func, line, m->total); 118 DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size); 119 DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id); 120 DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size); 121 DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base); 122 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); 123 DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size); 124 } 125 126 static struct map map; 127 128 /** 129 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address 130 * @phys_addr: linux physical address 131 */ 132 133 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr) 134 { 135 BUG_ON(is_kernel_addr(phys_addr)); 136 return (phys_addr < map.rm.size || phys_addr >= map.total) 137 ? phys_addr : phys_addr + map.r1.offset; 138 } 139 140 EXPORT_SYMBOL(ps3_mm_phys_to_lpar); 141 142 /** 143 * ps3_mm_vas_create - create the virtual address space 144 */ 145 146 void __init ps3_mm_vas_create(unsigned long* htab_size) 147 { 148 int result; 149 unsigned long start_address; 150 unsigned long size; 151 unsigned long access_right; 152 unsigned long max_page_size; 153 unsigned long flags; 154 155 result = lv1_query_logical_partition_address_region_info(0, 156 &start_address, &size, &access_right, &max_page_size, 157 &flags); 158 159 if (result) { 160 DBG("%s:%d: lv1_query_logical_partition_address_region_info " 161 "failed: %s\n", __func__, __LINE__, 162 ps3_result(result)); 163 goto fail; 164 } 165 166 if (max_page_size < PAGE_SHIFT_16M) { 167 DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__, 168 max_page_size); 169 goto fail; 170 } 171 172 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX); 173 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN); 174 175 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE, 176 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K), 177 &map.vas_id, &map.htab_size); 178 179 if (result) { 180 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n", 181 __func__, __LINE__, ps3_result(result)); 182 goto fail; 183 } 184 185 result = lv1_select_virtual_address_space(map.vas_id); 186 187 if (result) { 188 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n", 189 __func__, __LINE__, ps3_result(result)); 190 goto fail; 191 } 192 193 *htab_size = map.htab_size; 194 195 debug_dump_map(&map); 196 197 return; 198 199 fail: 200 panic("ps3_mm_vas_create failed"); 201 } 202 203 /** 204 * ps3_mm_vas_destroy - 205 */ 206 207 void ps3_mm_vas_destroy(void) 208 { 209 int result; 210 211 DBG("%s:%d: map.vas_id = %lu\n", __func__, __LINE__, map.vas_id); 212 213 if (map.vas_id) { 214 result = lv1_select_virtual_address_space(0); 215 BUG_ON(result); 216 result = lv1_destruct_virtual_address_space(map.vas_id); 217 BUG_ON(result); 218 map.vas_id = 0; 219 } 220 } 221 222 /*============================================================================*/ 223 /* memory hotplug routines */ 224 /*============================================================================*/ 225 226 /** 227 * ps3_mm_region_create - create a memory region in the vas 228 * @r: pointer to a struct mem_region to accept initialized values 229 * @size: requested region size 230 * 231 * This implementation creates the region with the vas large page size. 232 * @size is rounded down to a multiple of the vas large page size. 233 */ 234 235 static int ps3_mm_region_create(struct mem_region *r, unsigned long size) 236 { 237 int result; 238 unsigned long muid; 239 240 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); 241 242 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); 243 DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size); 244 DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__, 245 (unsigned long)(size - r->size), 246 (size - r->size) / 1024 / 1024); 247 248 if (r->size == 0) { 249 DBG("%s:%d: size == 0\n", __func__, __LINE__); 250 result = -1; 251 goto zero_region; 252 } 253 254 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, 255 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); 256 257 if (result || r->base < map.rm.size) { 258 DBG("%s:%d: lv1_allocate_memory failed: %s\n", 259 __func__, __LINE__, ps3_result(result)); 260 goto zero_region; 261 } 262 263 r->offset = r->base - map.rm.size; 264 return result; 265 266 zero_region: 267 r->size = r->base = r->offset = 0; 268 return result; 269 } 270 271 /** 272 * ps3_mm_region_destroy - destroy a memory region 273 * @r: pointer to struct mem_region 274 */ 275 276 static void ps3_mm_region_destroy(struct mem_region *r) 277 { 278 int result; 279 280 DBG("%s:%d: r->base = %lxh\n", __func__, __LINE__, r->base); 281 if (r->base) { 282 result = lv1_release_memory(r->base); 283 BUG_ON(result); 284 r->size = r->base = r->offset = 0; 285 map.total = map.rm.size; 286 } 287 } 288 289 /** 290 * ps3_mm_add_memory - hot add memory 291 */ 292 293 static int __init ps3_mm_add_memory(void) 294 { 295 int result; 296 unsigned long start_addr; 297 unsigned long start_pfn; 298 unsigned long nr_pages; 299 300 if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) 301 return -ENODEV; 302 303 BUG_ON(!mem_init_done); 304 305 start_addr = map.rm.size; 306 start_pfn = start_addr >> PAGE_SHIFT; 307 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; 308 309 DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n", 310 __func__, __LINE__, start_addr, start_pfn, nr_pages); 311 312 result = add_memory(0, start_addr, map.r1.size); 313 314 if (result) { 315 DBG("%s:%d: add_memory failed: (%d)\n", 316 __func__, __LINE__, result); 317 return result; 318 } 319 320 lmb_add(start_addr, map.r1.size); 321 lmb_analyze(); 322 323 result = online_pages(start_pfn, nr_pages); 324 325 if (result) 326 DBG("%s:%d: online_pages failed: (%d)\n", 327 __func__, __LINE__, result); 328 329 return result; 330 } 331 332 core_initcall(ps3_mm_add_memory); 333 334 /*============================================================================*/ 335 /* dma routines */ 336 /*============================================================================*/ 337 338 /** 339 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address. 340 * @r: pointer to dma region structure 341 * @lpar_addr: HV lpar address 342 */ 343 344 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r, 345 unsigned long lpar_addr) 346 { 347 if (lpar_addr >= map.rm.size) 348 lpar_addr -= map.r1.offset; 349 BUG_ON(lpar_addr < r->offset); 350 BUG_ON(lpar_addr >= r->offset + r->len); 351 return r->bus_addr + lpar_addr - r->offset; 352 } 353 354 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__) 355 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r, 356 const char *func, int line) 357 { 358 DBG("%s:%d: dev %lu:%lu\n", func, line, r->dev->bus_id, 359 r->dev->dev_id); 360 DBG("%s:%d: page_size %u\n", func, line, r->page_size); 361 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); 362 DBG("%s:%d: len %lxh\n", func, line, r->len); 363 DBG("%s:%d: offset %lxh\n", func, line, r->offset); 364 } 365 366 /** 367 * dma_chunk - A chunk of dma pages mapped by the io controller. 368 * @region - The dma region that owns this chunk. 369 * @lpar_addr: Starting lpar address of the area to map. 370 * @bus_addr: Starting ioc bus address of the area to map. 371 * @len: Length in bytes of the area to map. 372 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the 373 * list of all chuncks owned by the region. 374 * 375 * This implementation uses a very simple dma page manager 376 * based on the dma_chunk structure. This scheme assumes 377 * that all drivers use very well behaved dma ops. 378 */ 379 380 struct dma_chunk { 381 struct ps3_dma_region *region; 382 unsigned long lpar_addr; 383 unsigned long bus_addr; 384 unsigned long len; 385 struct list_head link; 386 unsigned int usage_count; 387 }; 388 389 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__) 390 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, 391 int line) 392 { 393 DBG("%s:%d: r.dev %lu:%lu\n", func, line, 394 c->region->dev->bus_id, c->region->dev->dev_id); 395 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); 396 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); 397 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); 398 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset); 399 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); 400 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); 401 DBG("%s:%d: c.len %lxh\n", func, line, c->len); 402 } 403 404 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, 405 unsigned long bus_addr, unsigned long len) 406 { 407 struct dma_chunk *c; 408 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); 409 unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus, 410 1 << r->page_size); 411 412 list_for_each_entry(c, &r->chunk_list.head, link) { 413 /* intersection */ 414 if (aligned_bus >= c->bus_addr && 415 aligned_bus + aligned_len <= c->bus_addr + c->len) 416 return c; 417 418 /* below */ 419 if (aligned_bus + aligned_len <= c->bus_addr) 420 continue; 421 422 /* above */ 423 if (aligned_bus >= c->bus_addr + c->len) 424 continue; 425 426 /* we don't handle the multi-chunk case for now */ 427 dma_dump_chunk(c); 428 BUG(); 429 } 430 return NULL; 431 } 432 433 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r, 434 unsigned long lpar_addr, unsigned long len) 435 { 436 struct dma_chunk *c; 437 unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size); 438 unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar, 439 1 << r->page_size); 440 441 list_for_each_entry(c, &r->chunk_list.head, link) { 442 /* intersection */ 443 if (c->lpar_addr <= aligned_lpar && 444 aligned_lpar < c->lpar_addr + c->len) { 445 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len) 446 return c; 447 else { 448 dma_dump_chunk(c); 449 BUG(); 450 } 451 } 452 /* below */ 453 if (aligned_lpar + aligned_len <= c->lpar_addr) { 454 continue; 455 } 456 /* above */ 457 if (c->lpar_addr + c->len <= aligned_lpar) { 458 continue; 459 } 460 } 461 return NULL; 462 } 463 464 static int dma_sb_free_chunk(struct dma_chunk *c) 465 { 466 int result = 0; 467 468 if (c->bus_addr) { 469 result = lv1_unmap_device_dma_region(c->region->dev->bus_id, 470 c->region->dev->dev_id, c->bus_addr, c->len); 471 BUG_ON(result); 472 } 473 474 kfree(c); 475 return result; 476 } 477 478 static int dma_ioc0_free_chunk(struct dma_chunk *c) 479 { 480 int result = 0; 481 int iopage; 482 unsigned long offset; 483 struct ps3_dma_region *r = c->region; 484 485 DBG("%s:start\n", __func__); 486 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) { 487 offset = (1 << r->page_size) * iopage; 488 /* put INVALID entry */ 489 result = lv1_put_iopte(0, 490 c->bus_addr + offset, 491 c->lpar_addr + offset, 492 r->ioid, 493 0); 494 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__, 495 c->bus_addr + offset, 496 c->lpar_addr + offset, 497 r->ioid); 498 499 if (result) { 500 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__, 501 __LINE__, ps3_result(result)); 502 } 503 } 504 kfree(c); 505 DBG("%s:end\n", __func__); 506 return result; 507 } 508 509 /** 510 * dma_sb_map_pages - Maps dma pages into the io controller bus address space. 511 * @r: Pointer to a struct ps3_dma_region. 512 * @phys_addr: Starting physical address of the area to map. 513 * @len: Length in bytes of the area to map. 514 * c_out: A pointer to receive an allocated struct dma_chunk for this area. 515 * 516 * This is the lowest level dma mapping routine, and is the one that will 517 * make the HV call to add the pages into the io controller address space. 518 */ 519 520 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 521 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag) 522 { 523 int result; 524 struct dma_chunk *c; 525 526 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); 527 528 if (!c) { 529 result = -ENOMEM; 530 goto fail_alloc; 531 } 532 533 c->region = r; 534 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 535 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr); 536 c->len = len; 537 538 BUG_ON(iopte_flag != 0xf800000000000000UL); 539 result = lv1_map_device_dma_region(c->region->dev->bus_id, 540 c->region->dev->dev_id, c->lpar_addr, 541 c->bus_addr, c->len, iopte_flag); 542 if (result) { 543 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n", 544 __func__, __LINE__, ps3_result(result)); 545 goto fail_map; 546 } 547 548 list_add(&c->link, &r->chunk_list.head); 549 550 *c_out = c; 551 return 0; 552 553 fail_map: 554 kfree(c); 555 fail_alloc: 556 *c_out = NULL; 557 DBG(" <- %s:%d\n", __func__, __LINE__); 558 return result; 559 } 560 561 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 562 unsigned long len, struct dma_chunk **c_out, 563 u64 iopte_flag) 564 { 565 int result; 566 struct dma_chunk *c, *last; 567 int iopage, pages; 568 unsigned long offset; 569 570 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__, 571 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len); 572 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); 573 574 if (!c) { 575 result = -ENOMEM; 576 goto fail_alloc; 577 } 578 579 c->region = r; 580 c->len = len; 581 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 582 /* allocate IO address */ 583 if (list_empty(&r->chunk_list.head)) { 584 /* first one */ 585 c->bus_addr = r->bus_addr; 586 } else { 587 /* derive from last bus addr*/ 588 last = list_entry(r->chunk_list.head.next, 589 struct dma_chunk, link); 590 c->bus_addr = last->bus_addr + last->len; 591 DBG("%s: last bus=%#lx, len=%#lx\n", __func__, 592 last->bus_addr, last->len); 593 } 594 595 /* FIXME: check whether length exceeds region size */ 596 597 /* build ioptes for the area */ 598 pages = len >> r->page_size; 599 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#lx\n", __func__, 600 r->page_size, r->len, pages, iopte_flag); 601 for (iopage = 0; iopage < pages; iopage++) { 602 offset = (1 << r->page_size) * iopage; 603 result = lv1_put_iopte(0, 604 c->bus_addr + offset, 605 c->lpar_addr + offset, 606 r->ioid, 607 iopte_flag); 608 if (result) { 609 printk(KERN_WARNING "%s:%d: lv1_map_device_dma_region " 610 "failed: %s\n", __func__, __LINE__, 611 ps3_result(result)); 612 goto fail_map; 613 } 614 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__, 615 iopage, c->bus_addr + offset, c->lpar_addr + offset, 616 r->ioid); 617 } 618 619 /* be sure that last allocated one is inserted at head */ 620 list_add(&c->link, &r->chunk_list.head); 621 622 *c_out = c; 623 DBG("%s: end\n", __func__); 624 return 0; 625 626 fail_map: 627 for (iopage--; 0 <= iopage; iopage--) { 628 lv1_put_iopte(0, 629 c->bus_addr + offset, 630 c->lpar_addr + offset, 631 r->ioid, 632 0); 633 } 634 kfree(c); 635 fail_alloc: 636 *c_out = NULL; 637 return result; 638 } 639 640 /** 641 * dma_sb_region_create - Create a device dma region. 642 * @r: Pointer to a struct ps3_dma_region. 643 * 644 * This is the lowest level dma region create routine, and is the one that 645 * will make the HV call to create the region. 646 */ 647 648 static int dma_sb_region_create(struct ps3_dma_region *r) 649 { 650 int result; 651 652 pr_info(" -> %s:%d:\n", __func__, __LINE__); 653 654 BUG_ON(!r); 655 656 if (!r->dev->bus_id) { 657 pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__, 658 r->dev->bus_id, r->dev->dev_id); 659 return 0; 660 } 661 662 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__, 663 __LINE__, r->len, r->page_size, r->offset); 664 665 BUG_ON(!r->len); 666 BUG_ON(!r->page_size); 667 BUG_ON(!r->region_ops); 668 669 INIT_LIST_HEAD(&r->chunk_list.head); 670 spin_lock_init(&r->chunk_list.lock); 671 672 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id, 673 roundup_pow_of_two(r->len), r->page_size, r->region_type, 674 &r->bus_addr); 675 676 if (result) { 677 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n", 678 __func__, __LINE__, ps3_result(result)); 679 r->len = r->bus_addr = 0; 680 } 681 682 return result; 683 } 684 685 static int dma_ioc0_region_create(struct ps3_dma_region *r) 686 { 687 int result; 688 689 INIT_LIST_HEAD(&r->chunk_list.head); 690 spin_lock_init(&r->chunk_list.lock); 691 692 result = lv1_allocate_io_segment(0, 693 r->len, 694 r->page_size, 695 &r->bus_addr); 696 if (result) { 697 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n", 698 __func__, __LINE__, ps3_result(result)); 699 r->len = r->bus_addr = 0; 700 } 701 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__, 702 r->len, r->page_size, r->bus_addr); 703 return result; 704 } 705 706 /** 707 * dma_region_free - Free a device dma region. 708 * @r: Pointer to a struct ps3_dma_region. 709 * 710 * This is the lowest level dma region free routine, and is the one that 711 * will make the HV call to free the region. 712 */ 713 714 static int dma_sb_region_free(struct ps3_dma_region *r) 715 { 716 int result; 717 struct dma_chunk *c; 718 struct dma_chunk *tmp; 719 720 BUG_ON(!r); 721 722 if (!r->dev->bus_id) { 723 pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__, 724 r->dev->bus_id, r->dev->dev_id); 725 return 0; 726 } 727 728 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { 729 list_del(&c->link); 730 dma_sb_free_chunk(c); 731 } 732 733 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id, 734 r->bus_addr); 735 736 if (result) 737 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 738 __func__, __LINE__, ps3_result(result)); 739 740 r->bus_addr = 0; 741 742 return result; 743 } 744 745 static int dma_ioc0_region_free(struct ps3_dma_region *r) 746 { 747 int result; 748 struct dma_chunk *c, *n; 749 750 DBG("%s: start\n", __func__); 751 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { 752 list_del(&c->link); 753 dma_ioc0_free_chunk(c); 754 } 755 756 result = lv1_release_io_segment(0, r->bus_addr); 757 758 if (result) 759 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 760 __func__, __LINE__, ps3_result(result)); 761 762 r->bus_addr = 0; 763 DBG("%s: end\n", __func__); 764 765 return result; 766 } 767 768 /** 769 * dma_sb_map_area - Map an area of memory into a device dma region. 770 * @r: Pointer to a struct ps3_dma_region. 771 * @virt_addr: Starting virtual address of the area to map. 772 * @len: Length in bytes of the area to map. 773 * @bus_addr: A pointer to return the starting ioc bus address of the area to 774 * map. 775 * 776 * This is the common dma mapping routine. 777 */ 778 779 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 780 unsigned long len, unsigned long *bus_addr, 781 u64 iopte_flag) 782 { 783 int result; 784 unsigned long flags; 785 struct dma_chunk *c; 786 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 787 : virt_addr; 788 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); 789 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 790 1 << r->page_size); 791 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 792 793 if (!USE_DYNAMIC_DMA) { 794 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 795 DBG(" -> %s:%d\n", __func__, __LINE__); 796 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__, 797 virt_addr); 798 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__, 799 phys_addr); 800 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__, 801 lpar_addr); 802 DBG("%s:%d len %lxh\n", __func__, __LINE__, len); 803 DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__, 804 *bus_addr, len); 805 } 806 807 spin_lock_irqsave(&r->chunk_list.lock, flags); 808 c = dma_find_chunk(r, *bus_addr, len); 809 810 if (c) { 811 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__); 812 dma_dump_chunk(c); 813 c->usage_count++; 814 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 815 return 0; 816 } 817 818 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag); 819 820 if (result) { 821 *bus_addr = 0; 822 DBG("%s:%d: dma_sb_map_pages failed (%d)\n", 823 __func__, __LINE__, result); 824 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 825 return result; 826 } 827 828 c->usage_count = 1; 829 830 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 831 return result; 832 } 833 834 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 835 unsigned long len, unsigned long *bus_addr, 836 u64 iopte_flag) 837 { 838 int result; 839 unsigned long flags; 840 struct dma_chunk *c; 841 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 842 : virt_addr; 843 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); 844 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 845 1 << r->page_size); 846 847 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__, 848 virt_addr, len); 849 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__, 850 phys_addr, aligned_phys, aligned_len); 851 852 spin_lock_irqsave(&r->chunk_list.lock, flags); 853 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len); 854 855 if (c) { 856 /* FIXME */ 857 BUG(); 858 *bus_addr = c->bus_addr + phys_addr - aligned_phys; 859 c->usage_count++; 860 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 861 return 0; 862 } 863 864 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c, 865 iopte_flag); 866 867 if (result) { 868 *bus_addr = 0; 869 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n", 870 __func__, __LINE__, result); 871 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 872 return result; 873 } 874 *bus_addr = c->bus_addr + phys_addr - aligned_phys; 875 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#lx\n", __func__, 876 virt_addr, phys_addr, aligned_phys, *bus_addr); 877 c->usage_count = 1; 878 879 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 880 return result; 881 } 882 883 /** 884 * dma_sb_unmap_area - Unmap an area of memory from a device dma region. 885 * @r: Pointer to a struct ps3_dma_region. 886 * @bus_addr: The starting ioc bus address of the area to unmap. 887 * @len: Length in bytes of the area to unmap. 888 * 889 * This is the common dma unmap routine. 890 */ 891 892 static int dma_sb_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr, 893 unsigned long len) 894 { 895 unsigned long flags; 896 struct dma_chunk *c; 897 898 spin_lock_irqsave(&r->chunk_list.lock, flags); 899 c = dma_find_chunk(r, bus_addr, len); 900 901 if (!c) { 902 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 903 1 << r->page_size); 904 unsigned long aligned_len = _ALIGN_UP(len + bus_addr 905 - aligned_bus, 1 << r->page_size); 906 DBG("%s:%d: not found: bus_addr %lxh\n", 907 __func__, __LINE__, bus_addr); 908 DBG("%s:%d: not found: len %lxh\n", 909 __func__, __LINE__, len); 910 DBG("%s:%d: not found: aligned_bus %lxh\n", 911 __func__, __LINE__, aligned_bus); 912 DBG("%s:%d: not found: aligned_len %lxh\n", 913 __func__, __LINE__, aligned_len); 914 BUG(); 915 } 916 917 c->usage_count--; 918 919 if (!c->usage_count) { 920 list_del(&c->link); 921 dma_sb_free_chunk(c); 922 } 923 924 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 925 return 0; 926 } 927 928 static int dma_ioc0_unmap_area(struct ps3_dma_region *r, 929 unsigned long bus_addr, unsigned long len) 930 { 931 unsigned long flags; 932 struct dma_chunk *c; 933 934 DBG("%s: start a=%#lx l=%#lx\n", __func__, bus_addr, len); 935 spin_lock_irqsave(&r->chunk_list.lock, flags); 936 c = dma_find_chunk(r, bus_addr, len); 937 938 if (!c) { 939 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 940 1 << r->page_size); 941 unsigned long aligned_len = _ALIGN_UP(len + bus_addr 942 - aligned_bus, 943 1 << r->page_size); 944 DBG("%s:%d: not found: bus_addr %lxh\n", 945 __func__, __LINE__, bus_addr); 946 DBG("%s:%d: not found: len %lxh\n", 947 __func__, __LINE__, len); 948 DBG("%s:%d: not found: aligned_bus %lxh\n", 949 __func__, __LINE__, aligned_bus); 950 DBG("%s:%d: not found: aligned_len %lxh\n", 951 __func__, __LINE__, aligned_len); 952 BUG(); 953 } 954 955 c->usage_count--; 956 957 if (!c->usage_count) { 958 list_del(&c->link); 959 dma_ioc0_free_chunk(c); 960 } 961 962 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 963 DBG("%s: end\n", __func__); 964 return 0; 965 } 966 967 /** 968 * dma_sb_region_create_linear - Setup a linear dma mapping for a device. 969 * @r: Pointer to a struct ps3_dma_region. 970 * 971 * This routine creates an HV dma region for the device and maps all available 972 * ram into the io controller bus address space. 973 */ 974 975 static int dma_sb_region_create_linear(struct ps3_dma_region *r) 976 { 977 int result; 978 unsigned long virt_addr, len, tmp; 979 980 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */ 981 /* force 16M dma pages for linear mapping */ 982 if (r->page_size != PS3_DMA_16M) { 983 pr_info("%s:%d: forcing 16M pages for linear map\n", 984 __func__, __LINE__); 985 r->page_size = PS3_DMA_16M; 986 r->len = _ALIGN_UP(r->len, 1 << r->page_size); 987 } 988 } 989 990 result = dma_sb_region_create(r); 991 BUG_ON(result); 992 993 if (r->offset < map.rm.size) { 994 /* Map (part of) 1st RAM chunk */ 995 virt_addr = map.rm.base + r->offset; 996 len = map.rm.size - r->offset; 997 if (len > r->len) 998 len = r->len; 999 result = dma_sb_map_area(r, virt_addr, len, &tmp, 1000 IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M); 1001 BUG_ON(result); 1002 } 1003 1004 if (r->offset + r->len > map.rm.size) { 1005 /* Map (part of) 2nd RAM chunk */ 1006 virt_addr = map.rm.size; 1007 len = r->len; 1008 if (r->offset >= map.rm.size) 1009 virt_addr += r->offset - map.rm.size; 1010 else 1011 len -= map.rm.size - r->offset; 1012 result = dma_sb_map_area(r, virt_addr, len, &tmp, 1013 IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M); 1014 BUG_ON(result); 1015 } 1016 1017 return result; 1018 } 1019 1020 /** 1021 * dma_sb_region_free_linear - Free a linear dma mapping for a device. 1022 * @r: Pointer to a struct ps3_dma_region. 1023 * 1024 * This routine will unmap all mapped areas and free the HV dma region. 1025 */ 1026 1027 static int dma_sb_region_free_linear(struct ps3_dma_region *r) 1028 { 1029 int result; 1030 unsigned long bus_addr, len, lpar_addr; 1031 1032 if (r->offset < map.rm.size) { 1033 /* Unmap (part of) 1st RAM chunk */ 1034 lpar_addr = map.rm.base + r->offset; 1035 len = map.rm.size - r->offset; 1036 if (len > r->len) 1037 len = r->len; 1038 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); 1039 result = dma_sb_unmap_area(r, bus_addr, len); 1040 BUG_ON(result); 1041 } 1042 1043 if (r->offset + r->len > map.rm.size) { 1044 /* Unmap (part of) 2nd RAM chunk */ 1045 lpar_addr = map.r1.base; 1046 len = r->len; 1047 if (r->offset >= map.rm.size) 1048 lpar_addr += r->offset - map.rm.size; 1049 else 1050 len -= map.rm.size - r->offset; 1051 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); 1052 result = dma_sb_unmap_area(r, bus_addr, len); 1053 BUG_ON(result); 1054 } 1055 1056 result = dma_sb_region_free(r); 1057 BUG_ON(result); 1058 1059 return result; 1060 } 1061 1062 /** 1063 * dma_sb_map_area_linear - Map an area of memory into a device dma region. 1064 * @r: Pointer to a struct ps3_dma_region. 1065 * @virt_addr: Starting virtual address of the area to map. 1066 * @len: Length in bytes of the area to map. 1067 * @bus_addr: A pointer to return the starting ioc bus address of the area to 1068 * map. 1069 * 1070 * This routine just returns the corresponding bus address. Actual mapping 1071 * occurs in dma_region_create_linear(). 1072 */ 1073 1074 static int dma_sb_map_area_linear(struct ps3_dma_region *r, 1075 unsigned long virt_addr, unsigned long len, unsigned long *bus_addr, 1076 u64 iopte_flag) 1077 { 1078 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 1079 : virt_addr; 1080 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 1081 return 0; 1082 } 1083 1084 /** 1085 * dma_unmap_area_linear - Unmap an area of memory from a device dma region. 1086 * @r: Pointer to a struct ps3_dma_region. 1087 * @bus_addr: The starting ioc bus address of the area to unmap. 1088 * @len: Length in bytes of the area to unmap. 1089 * 1090 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear(). 1091 */ 1092 1093 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r, 1094 unsigned long bus_addr, unsigned long len) 1095 { 1096 return 0; 1097 }; 1098 1099 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = { 1100 .create = dma_sb_region_create, 1101 .free = dma_sb_region_free, 1102 .map = dma_sb_map_area, 1103 .unmap = dma_sb_unmap_area 1104 }; 1105 1106 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = { 1107 .create = dma_sb_region_create_linear, 1108 .free = dma_sb_region_free_linear, 1109 .map = dma_sb_map_area_linear, 1110 .unmap = dma_sb_unmap_area_linear 1111 }; 1112 1113 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = { 1114 .create = dma_ioc0_region_create, 1115 .free = dma_ioc0_region_free, 1116 .map = dma_ioc0_map_area, 1117 .unmap = dma_ioc0_unmap_area 1118 }; 1119 1120 int ps3_dma_region_init(struct ps3_system_bus_device *dev, 1121 struct ps3_dma_region *r, enum ps3_dma_page_size page_size, 1122 enum ps3_dma_region_type region_type, void *addr, unsigned long len) 1123 { 1124 unsigned long lpar_addr; 1125 1126 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0; 1127 1128 r->dev = dev; 1129 r->page_size = page_size; 1130 r->region_type = region_type; 1131 r->offset = lpar_addr; 1132 if (r->offset >= map.rm.size) 1133 r->offset -= map.r1.offset; 1134 r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); 1135 1136 switch (dev->dev_type) { 1137 case PS3_DEVICE_TYPE_SB: 1138 r->region_ops = (USE_DYNAMIC_DMA) 1139 ? &ps3_dma_sb_region_ops 1140 : &ps3_dma_sb_region_linear_ops; 1141 break; 1142 case PS3_DEVICE_TYPE_IOC0: 1143 r->region_ops = &ps3_dma_ioc0_region_ops; 1144 break; 1145 default: 1146 BUG(); 1147 return -EINVAL; 1148 } 1149 return 0; 1150 } 1151 EXPORT_SYMBOL(ps3_dma_region_init); 1152 1153 int ps3_dma_region_create(struct ps3_dma_region *r) 1154 { 1155 BUG_ON(!r); 1156 BUG_ON(!r->region_ops); 1157 BUG_ON(!r->region_ops->create); 1158 return r->region_ops->create(r); 1159 } 1160 EXPORT_SYMBOL(ps3_dma_region_create); 1161 1162 int ps3_dma_region_free(struct ps3_dma_region *r) 1163 { 1164 BUG_ON(!r); 1165 BUG_ON(!r->region_ops); 1166 BUG_ON(!r->region_ops->free); 1167 return r->region_ops->free(r); 1168 } 1169 EXPORT_SYMBOL(ps3_dma_region_free); 1170 1171 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr, 1172 unsigned long len, unsigned long *bus_addr, 1173 u64 iopte_flag) 1174 { 1175 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag); 1176 } 1177 1178 int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr, 1179 unsigned long len) 1180 { 1181 return r->region_ops->unmap(r, bus_addr, len); 1182 } 1183 1184 /*============================================================================*/ 1185 /* system startup routines */ 1186 /*============================================================================*/ 1187 1188 /** 1189 * ps3_mm_init - initialize the address space state variables 1190 */ 1191 1192 void __init ps3_mm_init(void) 1193 { 1194 int result; 1195 1196 DBG(" -> %s:%d\n", __func__, __LINE__); 1197 1198 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, 1199 &map.total); 1200 1201 if (result) 1202 panic("ps3_repository_read_mm_info() failed"); 1203 1204 map.rm.offset = map.rm.base; 1205 map.vas_id = map.htab_size = 0; 1206 1207 /* this implementation assumes map.rm.base is zero */ 1208 1209 BUG_ON(map.rm.base); 1210 BUG_ON(!map.rm.size); 1211 1212 1213 /* arrange to do this in ps3_mm_add_memory */ 1214 ps3_mm_region_create(&map.r1, map.total - map.rm.size); 1215 1216 /* correct map.total for the real total amount of memory we use */ 1217 map.total = map.rm.size + map.r1.size; 1218 1219 DBG(" <- %s:%d\n", __func__, __LINE__); 1220 } 1221 1222 /** 1223 * ps3_mm_shutdown - final cleanup of address space 1224 */ 1225 1226 void ps3_mm_shutdown(void) 1227 { 1228 ps3_mm_region_destroy(&map.r1); 1229 } 1230