1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * PS3 address space management. 4 * 5 * Copyright (C) 2006 Sony Computer Entertainment Inc. 6 * Copyright 2006 Sony Corp. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/export.h> 11 #include <linux/memblock.h> 12 #include <linux/slab.h> 13 14 #include <asm/cell-regs.h> 15 #include <asm/firmware.h> 16 #include <asm/prom.h> 17 #include <asm/udbg.h> 18 #include <asm/lv1call.h> 19 #include <asm/setup.h> 20 21 #include "platform.h" 22 23 #if defined(DEBUG) 24 #define DBG udbg_printf 25 #else 26 #define DBG pr_devel 27 #endif 28 29 enum { 30 #if defined(CONFIG_PS3_DYNAMIC_DMA) 31 USE_DYNAMIC_DMA = 1, 32 #else 33 USE_DYNAMIC_DMA = 0, 34 #endif 35 }; 36 37 enum { 38 PAGE_SHIFT_4K = 12U, 39 PAGE_SHIFT_64K = 16U, 40 PAGE_SHIFT_16M = 24U, 41 }; 42 43 static unsigned long make_page_sizes(unsigned long a, unsigned long b) 44 { 45 return (a << 56) | (b << 48); 46 } 47 48 enum { 49 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04, 50 ALLOCATE_MEMORY_ADDR_ZERO = 0X08, 51 }; 52 53 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */ 54 55 enum { 56 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */ 57 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */ 58 }; 59 60 /*============================================================================*/ 61 /* virtual address space routines */ 62 /*============================================================================*/ 63 64 /** 65 * struct mem_region - memory region structure 66 * @base: base address 67 * @size: size in bytes 68 * @offset: difference between base and rm.size 69 * @destroy: flag if region should be destroyed upon shutdown 70 */ 71 72 struct mem_region { 73 u64 base; 74 u64 size; 75 unsigned long offset; 76 int destroy; 77 }; 78 79 /** 80 * struct map - address space state variables holder 81 * @total: total memory available as reported by HV 82 * @vas_id - HV virtual address space id 83 * @htab_size: htab size in bytes 84 * 85 * The HV virtual address space (vas) allows for hotplug memory regions. 86 * Memory regions can be created and destroyed in the vas at runtime. 87 * @rm: real mode (bootmem) region 88 * @r1: highmem region(s) 89 * 90 * ps3 addresses 91 * virt_addr: a cpu 'translated' effective address 92 * phys_addr: an address in what Linux thinks is the physical address space 93 * lpar_addr: an address in the HV virtual address space 94 * bus_addr: an io controller 'translated' address on a device bus 95 */ 96 97 struct map { 98 u64 total; 99 u64 vas_id; 100 u64 htab_size; 101 struct mem_region rm; 102 struct mem_region r1; 103 }; 104 105 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__) 106 static void __maybe_unused _debug_dump_map(const struct map *m, 107 const char *func, int line) 108 { 109 DBG("%s:%d: map.total = %llxh\n", func, line, m->total); 110 DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size); 111 DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id); 112 DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size); 113 DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base); 114 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); 115 DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size); 116 } 117 118 static struct map map; 119 120 /** 121 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address 122 * @phys_addr: linux physical address 123 */ 124 125 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr) 126 { 127 BUG_ON(is_kernel_addr(phys_addr)); 128 return (phys_addr < map.rm.size || phys_addr >= map.total) 129 ? phys_addr : phys_addr + map.r1.offset; 130 } 131 132 EXPORT_SYMBOL(ps3_mm_phys_to_lpar); 133 134 /** 135 * ps3_mm_vas_create - create the virtual address space 136 */ 137 138 void __init ps3_mm_vas_create(unsigned long* htab_size) 139 { 140 int result; 141 u64 start_address; 142 u64 size; 143 u64 access_right; 144 u64 max_page_size; 145 u64 flags; 146 147 result = lv1_query_logical_partition_address_region_info(0, 148 &start_address, &size, &access_right, &max_page_size, 149 &flags); 150 151 if (result) { 152 DBG("%s:%d: lv1_query_logical_partition_address_region_info " 153 "failed: %s\n", __func__, __LINE__, 154 ps3_result(result)); 155 goto fail; 156 } 157 158 if (max_page_size < PAGE_SHIFT_16M) { 159 DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__, 160 max_page_size); 161 goto fail; 162 } 163 164 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX); 165 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN); 166 167 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE, 168 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K), 169 &map.vas_id, &map.htab_size); 170 171 if (result) { 172 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n", 173 __func__, __LINE__, ps3_result(result)); 174 goto fail; 175 } 176 177 result = lv1_select_virtual_address_space(map.vas_id); 178 179 if (result) { 180 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n", 181 __func__, __LINE__, ps3_result(result)); 182 goto fail; 183 } 184 185 *htab_size = map.htab_size; 186 187 debug_dump_map(&map); 188 189 return; 190 191 fail: 192 panic("ps3_mm_vas_create failed"); 193 } 194 195 /** 196 * ps3_mm_vas_destroy - 197 */ 198 199 void ps3_mm_vas_destroy(void) 200 { 201 int result; 202 203 if (map.vas_id) { 204 result = lv1_select_virtual_address_space(0); 205 result += lv1_destruct_virtual_address_space(map.vas_id); 206 207 if (result) { 208 lv1_panic(0); 209 } 210 211 map.vas_id = 0; 212 } 213 } 214 215 static int ps3_mm_get_repository_highmem(struct mem_region *r) 216 { 217 int result; 218 219 /* Assume a single highmem region. */ 220 221 result = ps3_repository_read_highmem_info(0, &r->base, &r->size); 222 223 if (result) 224 goto zero_region; 225 226 if (!r->base || !r->size) { 227 result = -1; 228 goto zero_region; 229 } 230 231 r->offset = r->base - map.rm.size; 232 233 DBG("%s:%d: Found high region in repository: %llxh %llxh\n", 234 __func__, __LINE__, r->base, r->size); 235 236 return 0; 237 238 zero_region: 239 DBG("%s:%d: No high region in repository.\n", __func__, __LINE__); 240 241 r->size = r->base = r->offset = 0; 242 return result; 243 } 244 245 static int ps3_mm_set_repository_highmem(const struct mem_region *r) 246 { 247 /* Assume a single highmem region. */ 248 249 return r ? ps3_repository_write_highmem_info(0, r->base, r->size) : 250 ps3_repository_write_highmem_info(0, 0, 0); 251 } 252 253 /** 254 * ps3_mm_region_create - create a memory region in the vas 255 * @r: pointer to a struct mem_region to accept initialized values 256 * @size: requested region size 257 * 258 * This implementation creates the region with the vas large page size. 259 * @size is rounded down to a multiple of the vas large page size. 260 */ 261 262 static int ps3_mm_region_create(struct mem_region *r, unsigned long size) 263 { 264 int result; 265 u64 muid; 266 267 r->size = ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); 268 269 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); 270 DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size); 271 DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__, 272 size - r->size, (size - r->size) / 1024 / 1024); 273 274 if (r->size == 0) { 275 DBG("%s:%d: size == 0\n", __func__, __LINE__); 276 result = -1; 277 goto zero_region; 278 } 279 280 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, 281 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); 282 283 if (result || r->base < map.rm.size) { 284 DBG("%s:%d: lv1_allocate_memory failed: %s\n", 285 __func__, __LINE__, ps3_result(result)); 286 goto zero_region; 287 } 288 289 r->destroy = 1; 290 r->offset = r->base - map.rm.size; 291 return result; 292 293 zero_region: 294 r->size = r->base = r->offset = 0; 295 return result; 296 } 297 298 /** 299 * ps3_mm_region_destroy - destroy a memory region 300 * @r: pointer to struct mem_region 301 */ 302 303 static void ps3_mm_region_destroy(struct mem_region *r) 304 { 305 int result; 306 307 if (!r->destroy) { 308 return; 309 } 310 311 if (r->base) { 312 result = lv1_release_memory(r->base); 313 314 if (result) { 315 lv1_panic(0); 316 } 317 318 r->size = r->base = r->offset = 0; 319 map.total = map.rm.size; 320 } 321 322 ps3_mm_set_repository_highmem(NULL); 323 } 324 325 /*============================================================================*/ 326 /* dma routines */ 327 /*============================================================================*/ 328 329 /** 330 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address. 331 * @r: pointer to dma region structure 332 * @lpar_addr: HV lpar address 333 */ 334 335 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r, 336 unsigned long lpar_addr) 337 { 338 if (lpar_addr >= map.rm.size) 339 lpar_addr -= map.r1.offset; 340 BUG_ON(lpar_addr < r->offset); 341 BUG_ON(lpar_addr >= r->offset + r->len); 342 return r->bus_addr + lpar_addr - r->offset; 343 } 344 345 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__) 346 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r, 347 const char *func, int line) 348 { 349 DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id, 350 r->dev->dev_id); 351 DBG("%s:%d: page_size %u\n", func, line, r->page_size); 352 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); 353 DBG("%s:%d: len %lxh\n", func, line, r->len); 354 DBG("%s:%d: offset %lxh\n", func, line, r->offset); 355 } 356 357 /** 358 * dma_chunk - A chunk of dma pages mapped by the io controller. 359 * @region - The dma region that owns this chunk. 360 * @lpar_addr: Starting lpar address of the area to map. 361 * @bus_addr: Starting ioc bus address of the area to map. 362 * @len: Length in bytes of the area to map. 363 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the 364 * list of all chuncks owned by the region. 365 * 366 * This implementation uses a very simple dma page manager 367 * based on the dma_chunk structure. This scheme assumes 368 * that all drivers use very well behaved dma ops. 369 */ 370 371 struct dma_chunk { 372 struct ps3_dma_region *region; 373 unsigned long lpar_addr; 374 unsigned long bus_addr; 375 unsigned long len; 376 struct list_head link; 377 unsigned int usage_count; 378 }; 379 380 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__) 381 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, 382 int line) 383 { 384 DBG("%s:%d: r.dev %llu:%llu\n", func, line, 385 c->region->dev->bus_id, c->region->dev->dev_id); 386 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); 387 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); 388 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); 389 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset); 390 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); 391 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); 392 DBG("%s:%d: c.len %lxh\n", func, line, c->len); 393 } 394 395 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, 396 unsigned long bus_addr, unsigned long len) 397 { 398 struct dma_chunk *c; 399 unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); 400 unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus, 401 1 << r->page_size); 402 403 list_for_each_entry(c, &r->chunk_list.head, link) { 404 /* intersection */ 405 if (aligned_bus >= c->bus_addr && 406 aligned_bus + aligned_len <= c->bus_addr + c->len) 407 return c; 408 409 /* below */ 410 if (aligned_bus + aligned_len <= c->bus_addr) 411 continue; 412 413 /* above */ 414 if (aligned_bus >= c->bus_addr + c->len) 415 continue; 416 417 /* we don't handle the multi-chunk case for now */ 418 dma_dump_chunk(c); 419 BUG(); 420 } 421 return NULL; 422 } 423 424 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r, 425 unsigned long lpar_addr, unsigned long len) 426 { 427 struct dma_chunk *c; 428 unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size); 429 unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar, 430 1 << r->page_size); 431 432 list_for_each_entry(c, &r->chunk_list.head, link) { 433 /* intersection */ 434 if (c->lpar_addr <= aligned_lpar && 435 aligned_lpar < c->lpar_addr + c->len) { 436 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len) 437 return c; 438 else { 439 dma_dump_chunk(c); 440 BUG(); 441 } 442 } 443 /* below */ 444 if (aligned_lpar + aligned_len <= c->lpar_addr) { 445 continue; 446 } 447 /* above */ 448 if (c->lpar_addr + c->len <= aligned_lpar) { 449 continue; 450 } 451 } 452 return NULL; 453 } 454 455 static int dma_sb_free_chunk(struct dma_chunk *c) 456 { 457 int result = 0; 458 459 if (c->bus_addr) { 460 result = lv1_unmap_device_dma_region(c->region->dev->bus_id, 461 c->region->dev->dev_id, c->bus_addr, c->len); 462 BUG_ON(result); 463 } 464 465 kfree(c); 466 return result; 467 } 468 469 static int dma_ioc0_free_chunk(struct dma_chunk *c) 470 { 471 int result = 0; 472 int iopage; 473 unsigned long offset; 474 struct ps3_dma_region *r = c->region; 475 476 DBG("%s:start\n", __func__); 477 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) { 478 offset = (1 << r->page_size) * iopage; 479 /* put INVALID entry */ 480 result = lv1_put_iopte(0, 481 c->bus_addr + offset, 482 c->lpar_addr + offset, 483 r->ioid, 484 0); 485 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__, 486 c->bus_addr + offset, 487 c->lpar_addr + offset, 488 r->ioid); 489 490 if (result) { 491 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__, 492 __LINE__, ps3_result(result)); 493 } 494 } 495 kfree(c); 496 DBG("%s:end\n", __func__); 497 return result; 498 } 499 500 /** 501 * dma_sb_map_pages - Maps dma pages into the io controller bus address space. 502 * @r: Pointer to a struct ps3_dma_region. 503 * @phys_addr: Starting physical address of the area to map. 504 * @len: Length in bytes of the area to map. 505 * c_out: A pointer to receive an allocated struct dma_chunk for this area. 506 * 507 * This is the lowest level dma mapping routine, and is the one that will 508 * make the HV call to add the pages into the io controller address space. 509 */ 510 511 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 512 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag) 513 { 514 int result; 515 struct dma_chunk *c; 516 517 c = kzalloc(sizeof(*c), GFP_ATOMIC); 518 if (!c) { 519 result = -ENOMEM; 520 goto fail_alloc; 521 } 522 523 c->region = r; 524 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 525 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr); 526 c->len = len; 527 528 BUG_ON(iopte_flag != 0xf800000000000000UL); 529 result = lv1_map_device_dma_region(c->region->dev->bus_id, 530 c->region->dev->dev_id, c->lpar_addr, 531 c->bus_addr, c->len, iopte_flag); 532 if (result) { 533 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n", 534 __func__, __LINE__, ps3_result(result)); 535 goto fail_map; 536 } 537 538 list_add(&c->link, &r->chunk_list.head); 539 540 *c_out = c; 541 return 0; 542 543 fail_map: 544 kfree(c); 545 fail_alloc: 546 *c_out = NULL; 547 DBG(" <- %s:%d\n", __func__, __LINE__); 548 return result; 549 } 550 551 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 552 unsigned long len, struct dma_chunk **c_out, 553 u64 iopte_flag) 554 { 555 int result; 556 struct dma_chunk *c, *last; 557 int iopage, pages; 558 unsigned long offset; 559 560 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__, 561 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len); 562 c = kzalloc(sizeof(*c), GFP_ATOMIC); 563 if (!c) { 564 result = -ENOMEM; 565 goto fail_alloc; 566 } 567 568 c->region = r; 569 c->len = len; 570 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 571 /* allocate IO address */ 572 if (list_empty(&r->chunk_list.head)) { 573 /* first one */ 574 c->bus_addr = r->bus_addr; 575 } else { 576 /* derive from last bus addr*/ 577 last = list_entry(r->chunk_list.head.next, 578 struct dma_chunk, link); 579 c->bus_addr = last->bus_addr + last->len; 580 DBG("%s: last bus=%#lx, len=%#lx\n", __func__, 581 last->bus_addr, last->len); 582 } 583 584 /* FIXME: check whether length exceeds region size */ 585 586 /* build ioptes for the area */ 587 pages = len >> r->page_size; 588 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__, 589 r->page_size, r->len, pages, iopte_flag); 590 for (iopage = 0; iopage < pages; iopage++) { 591 offset = (1 << r->page_size) * iopage; 592 result = lv1_put_iopte(0, 593 c->bus_addr + offset, 594 c->lpar_addr + offset, 595 r->ioid, 596 iopte_flag); 597 if (result) { 598 pr_warn("%s:%d: lv1_put_iopte failed: %s\n", 599 __func__, __LINE__, ps3_result(result)); 600 goto fail_map; 601 } 602 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__, 603 iopage, c->bus_addr + offset, c->lpar_addr + offset, 604 r->ioid); 605 } 606 607 /* be sure that last allocated one is inserted at head */ 608 list_add(&c->link, &r->chunk_list.head); 609 610 *c_out = c; 611 DBG("%s: end\n", __func__); 612 return 0; 613 614 fail_map: 615 for (iopage--; 0 <= iopage; iopage--) { 616 lv1_put_iopte(0, 617 c->bus_addr + offset, 618 c->lpar_addr + offset, 619 r->ioid, 620 0); 621 } 622 kfree(c); 623 fail_alloc: 624 *c_out = NULL; 625 return result; 626 } 627 628 /** 629 * dma_sb_region_create - Create a device dma region. 630 * @r: Pointer to a struct ps3_dma_region. 631 * 632 * This is the lowest level dma region create routine, and is the one that 633 * will make the HV call to create the region. 634 */ 635 636 static int dma_sb_region_create(struct ps3_dma_region *r) 637 { 638 int result; 639 u64 bus_addr; 640 641 DBG(" -> %s:%d:\n", __func__, __LINE__); 642 643 BUG_ON(!r); 644 645 if (!r->dev->bus_id) { 646 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__, 647 r->dev->bus_id, r->dev->dev_id); 648 return 0; 649 } 650 651 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__, 652 __LINE__, r->len, r->page_size, r->offset); 653 654 BUG_ON(!r->len); 655 BUG_ON(!r->page_size); 656 BUG_ON(!r->region_ops); 657 658 INIT_LIST_HEAD(&r->chunk_list.head); 659 spin_lock_init(&r->chunk_list.lock); 660 661 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id, 662 roundup_pow_of_two(r->len), r->page_size, r->region_type, 663 &bus_addr); 664 r->bus_addr = bus_addr; 665 666 if (result) { 667 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n", 668 __func__, __LINE__, ps3_result(result)); 669 r->len = r->bus_addr = 0; 670 } 671 672 return result; 673 } 674 675 static int dma_ioc0_region_create(struct ps3_dma_region *r) 676 { 677 int result; 678 u64 bus_addr; 679 680 INIT_LIST_HEAD(&r->chunk_list.head); 681 spin_lock_init(&r->chunk_list.lock); 682 683 result = lv1_allocate_io_segment(0, 684 r->len, 685 r->page_size, 686 &bus_addr); 687 r->bus_addr = bus_addr; 688 if (result) { 689 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n", 690 __func__, __LINE__, ps3_result(result)); 691 r->len = r->bus_addr = 0; 692 } 693 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__, 694 r->len, r->page_size, r->bus_addr); 695 return result; 696 } 697 698 /** 699 * dma_region_free - Free a device dma region. 700 * @r: Pointer to a struct ps3_dma_region. 701 * 702 * This is the lowest level dma region free routine, and is the one that 703 * will make the HV call to free the region. 704 */ 705 706 static int dma_sb_region_free(struct ps3_dma_region *r) 707 { 708 int result; 709 struct dma_chunk *c; 710 struct dma_chunk *tmp; 711 712 BUG_ON(!r); 713 714 if (!r->dev->bus_id) { 715 pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__, 716 r->dev->bus_id, r->dev->dev_id); 717 return 0; 718 } 719 720 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { 721 list_del(&c->link); 722 dma_sb_free_chunk(c); 723 } 724 725 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id, 726 r->bus_addr); 727 728 if (result) 729 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 730 __func__, __LINE__, ps3_result(result)); 731 732 r->bus_addr = 0; 733 734 return result; 735 } 736 737 static int dma_ioc0_region_free(struct ps3_dma_region *r) 738 { 739 int result; 740 struct dma_chunk *c, *n; 741 742 DBG("%s: start\n", __func__); 743 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { 744 list_del(&c->link); 745 dma_ioc0_free_chunk(c); 746 } 747 748 result = lv1_release_io_segment(0, r->bus_addr); 749 750 if (result) 751 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 752 __func__, __LINE__, ps3_result(result)); 753 754 r->bus_addr = 0; 755 DBG("%s: end\n", __func__); 756 757 return result; 758 } 759 760 /** 761 * dma_sb_map_area - Map an area of memory into a device dma region. 762 * @r: Pointer to a struct ps3_dma_region. 763 * @virt_addr: Starting virtual address of the area to map. 764 * @len: Length in bytes of the area to map. 765 * @bus_addr: A pointer to return the starting ioc bus address of the area to 766 * map. 767 * 768 * This is the common dma mapping routine. 769 */ 770 771 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 772 unsigned long len, dma_addr_t *bus_addr, 773 u64 iopte_flag) 774 { 775 int result; 776 unsigned long flags; 777 struct dma_chunk *c; 778 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 779 : virt_addr; 780 unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); 781 unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys, 782 1 << r->page_size); 783 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 784 785 if (!USE_DYNAMIC_DMA) { 786 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 787 DBG(" -> %s:%d\n", __func__, __LINE__); 788 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__, 789 virt_addr); 790 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__, 791 phys_addr); 792 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__, 793 lpar_addr); 794 DBG("%s:%d len %lxh\n", __func__, __LINE__, len); 795 DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__, 796 *bus_addr, len); 797 } 798 799 spin_lock_irqsave(&r->chunk_list.lock, flags); 800 c = dma_find_chunk(r, *bus_addr, len); 801 802 if (c) { 803 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__); 804 dma_dump_chunk(c); 805 c->usage_count++; 806 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 807 return 0; 808 } 809 810 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag); 811 812 if (result) { 813 *bus_addr = 0; 814 DBG("%s:%d: dma_sb_map_pages failed (%d)\n", 815 __func__, __LINE__, result); 816 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 817 return result; 818 } 819 820 c->usage_count = 1; 821 822 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 823 return result; 824 } 825 826 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 827 unsigned long len, dma_addr_t *bus_addr, 828 u64 iopte_flag) 829 { 830 int result; 831 unsigned long flags; 832 struct dma_chunk *c; 833 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 834 : virt_addr; 835 unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); 836 unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys, 837 1 << r->page_size); 838 839 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__, 840 virt_addr, len); 841 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__, 842 phys_addr, aligned_phys, aligned_len); 843 844 spin_lock_irqsave(&r->chunk_list.lock, flags); 845 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len); 846 847 if (c) { 848 /* FIXME */ 849 BUG(); 850 *bus_addr = c->bus_addr + phys_addr - aligned_phys; 851 c->usage_count++; 852 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 853 return 0; 854 } 855 856 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c, 857 iopte_flag); 858 859 if (result) { 860 *bus_addr = 0; 861 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n", 862 __func__, __LINE__, result); 863 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 864 return result; 865 } 866 *bus_addr = c->bus_addr + phys_addr - aligned_phys; 867 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__, 868 virt_addr, phys_addr, aligned_phys, *bus_addr); 869 c->usage_count = 1; 870 871 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 872 return result; 873 } 874 875 /** 876 * dma_sb_unmap_area - Unmap an area of memory from a device dma region. 877 * @r: Pointer to a struct ps3_dma_region. 878 * @bus_addr: The starting ioc bus address of the area to unmap. 879 * @len: Length in bytes of the area to unmap. 880 * 881 * This is the common dma unmap routine. 882 */ 883 884 static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, 885 unsigned long len) 886 { 887 unsigned long flags; 888 struct dma_chunk *c; 889 890 spin_lock_irqsave(&r->chunk_list.lock, flags); 891 c = dma_find_chunk(r, bus_addr, len); 892 893 if (!c) { 894 unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 895 1 << r->page_size); 896 unsigned long aligned_len = ALIGN(len + bus_addr 897 - aligned_bus, 1 << r->page_size); 898 DBG("%s:%d: not found: bus_addr %llxh\n", 899 __func__, __LINE__, bus_addr); 900 DBG("%s:%d: not found: len %lxh\n", 901 __func__, __LINE__, len); 902 DBG("%s:%d: not found: aligned_bus %lxh\n", 903 __func__, __LINE__, aligned_bus); 904 DBG("%s:%d: not found: aligned_len %lxh\n", 905 __func__, __LINE__, aligned_len); 906 BUG(); 907 } 908 909 c->usage_count--; 910 911 if (!c->usage_count) { 912 list_del(&c->link); 913 dma_sb_free_chunk(c); 914 } 915 916 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 917 return 0; 918 } 919 920 static int dma_ioc0_unmap_area(struct ps3_dma_region *r, 921 dma_addr_t bus_addr, unsigned long len) 922 { 923 unsigned long flags; 924 struct dma_chunk *c; 925 926 DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len); 927 spin_lock_irqsave(&r->chunk_list.lock, flags); 928 c = dma_find_chunk(r, bus_addr, len); 929 930 if (!c) { 931 unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 932 1 << r->page_size); 933 unsigned long aligned_len = ALIGN(len + bus_addr 934 - aligned_bus, 935 1 << r->page_size); 936 DBG("%s:%d: not found: bus_addr %llxh\n", 937 __func__, __LINE__, bus_addr); 938 DBG("%s:%d: not found: len %lxh\n", 939 __func__, __LINE__, len); 940 DBG("%s:%d: not found: aligned_bus %lxh\n", 941 __func__, __LINE__, aligned_bus); 942 DBG("%s:%d: not found: aligned_len %lxh\n", 943 __func__, __LINE__, aligned_len); 944 BUG(); 945 } 946 947 c->usage_count--; 948 949 if (!c->usage_count) { 950 list_del(&c->link); 951 dma_ioc0_free_chunk(c); 952 } 953 954 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 955 DBG("%s: end\n", __func__); 956 return 0; 957 } 958 959 /** 960 * dma_sb_region_create_linear - Setup a linear dma mapping for a device. 961 * @r: Pointer to a struct ps3_dma_region. 962 * 963 * This routine creates an HV dma region for the device and maps all available 964 * ram into the io controller bus address space. 965 */ 966 967 static int dma_sb_region_create_linear(struct ps3_dma_region *r) 968 { 969 int result; 970 unsigned long virt_addr, len; 971 dma_addr_t tmp; 972 973 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */ 974 /* force 16M dma pages for linear mapping */ 975 if (r->page_size != PS3_DMA_16M) { 976 pr_info("%s:%d: forcing 16M pages for linear map\n", 977 __func__, __LINE__); 978 r->page_size = PS3_DMA_16M; 979 r->len = ALIGN(r->len, 1 << r->page_size); 980 } 981 } 982 983 result = dma_sb_region_create(r); 984 BUG_ON(result); 985 986 if (r->offset < map.rm.size) { 987 /* Map (part of) 1st RAM chunk */ 988 virt_addr = map.rm.base + r->offset; 989 len = map.rm.size - r->offset; 990 if (len > r->len) 991 len = r->len; 992 result = dma_sb_map_area(r, virt_addr, len, &tmp, 993 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW | 994 CBE_IOPTE_M); 995 BUG_ON(result); 996 } 997 998 if (r->offset + r->len > map.rm.size) { 999 /* Map (part of) 2nd RAM chunk */ 1000 virt_addr = map.rm.size; 1001 len = r->len; 1002 if (r->offset >= map.rm.size) 1003 virt_addr += r->offset - map.rm.size; 1004 else 1005 len -= map.rm.size - r->offset; 1006 result = dma_sb_map_area(r, virt_addr, len, &tmp, 1007 CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW | 1008 CBE_IOPTE_M); 1009 BUG_ON(result); 1010 } 1011 1012 return result; 1013 } 1014 1015 /** 1016 * dma_sb_region_free_linear - Free a linear dma mapping for a device. 1017 * @r: Pointer to a struct ps3_dma_region. 1018 * 1019 * This routine will unmap all mapped areas and free the HV dma region. 1020 */ 1021 1022 static int dma_sb_region_free_linear(struct ps3_dma_region *r) 1023 { 1024 int result; 1025 dma_addr_t bus_addr; 1026 unsigned long len, lpar_addr; 1027 1028 if (r->offset < map.rm.size) { 1029 /* Unmap (part of) 1st RAM chunk */ 1030 lpar_addr = map.rm.base + r->offset; 1031 len = map.rm.size - r->offset; 1032 if (len > r->len) 1033 len = r->len; 1034 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); 1035 result = dma_sb_unmap_area(r, bus_addr, len); 1036 BUG_ON(result); 1037 } 1038 1039 if (r->offset + r->len > map.rm.size) { 1040 /* Unmap (part of) 2nd RAM chunk */ 1041 lpar_addr = map.r1.base; 1042 len = r->len; 1043 if (r->offset >= map.rm.size) 1044 lpar_addr += r->offset - map.rm.size; 1045 else 1046 len -= map.rm.size - r->offset; 1047 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); 1048 result = dma_sb_unmap_area(r, bus_addr, len); 1049 BUG_ON(result); 1050 } 1051 1052 result = dma_sb_region_free(r); 1053 BUG_ON(result); 1054 1055 return result; 1056 } 1057 1058 /** 1059 * dma_sb_map_area_linear - Map an area of memory into a device dma region. 1060 * @r: Pointer to a struct ps3_dma_region. 1061 * @virt_addr: Starting virtual address of the area to map. 1062 * @len: Length in bytes of the area to map. 1063 * @bus_addr: A pointer to return the starting ioc bus address of the area to 1064 * map. 1065 * 1066 * This routine just returns the corresponding bus address. Actual mapping 1067 * occurs in dma_region_create_linear(). 1068 */ 1069 1070 static int dma_sb_map_area_linear(struct ps3_dma_region *r, 1071 unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, 1072 u64 iopte_flag) 1073 { 1074 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 1075 : virt_addr; 1076 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 1077 return 0; 1078 } 1079 1080 /** 1081 * dma_unmap_area_linear - Unmap an area of memory from a device dma region. 1082 * @r: Pointer to a struct ps3_dma_region. 1083 * @bus_addr: The starting ioc bus address of the area to unmap. 1084 * @len: Length in bytes of the area to unmap. 1085 * 1086 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear(). 1087 */ 1088 1089 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r, 1090 dma_addr_t bus_addr, unsigned long len) 1091 { 1092 return 0; 1093 }; 1094 1095 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = { 1096 .create = dma_sb_region_create, 1097 .free = dma_sb_region_free, 1098 .map = dma_sb_map_area, 1099 .unmap = dma_sb_unmap_area 1100 }; 1101 1102 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = { 1103 .create = dma_sb_region_create_linear, 1104 .free = dma_sb_region_free_linear, 1105 .map = dma_sb_map_area_linear, 1106 .unmap = dma_sb_unmap_area_linear 1107 }; 1108 1109 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = { 1110 .create = dma_ioc0_region_create, 1111 .free = dma_ioc0_region_free, 1112 .map = dma_ioc0_map_area, 1113 .unmap = dma_ioc0_unmap_area 1114 }; 1115 1116 int ps3_dma_region_init(struct ps3_system_bus_device *dev, 1117 struct ps3_dma_region *r, enum ps3_dma_page_size page_size, 1118 enum ps3_dma_region_type region_type, void *addr, unsigned long len) 1119 { 1120 unsigned long lpar_addr; 1121 1122 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0; 1123 1124 r->dev = dev; 1125 r->page_size = page_size; 1126 r->region_type = region_type; 1127 r->offset = lpar_addr; 1128 if (r->offset >= map.rm.size) 1129 r->offset -= map.r1.offset; 1130 r->len = len ? len : ALIGN(map.total, 1 << r->page_size); 1131 1132 switch (dev->dev_type) { 1133 case PS3_DEVICE_TYPE_SB: 1134 r->region_ops = (USE_DYNAMIC_DMA) 1135 ? &ps3_dma_sb_region_ops 1136 : &ps3_dma_sb_region_linear_ops; 1137 break; 1138 case PS3_DEVICE_TYPE_IOC0: 1139 r->region_ops = &ps3_dma_ioc0_region_ops; 1140 break; 1141 default: 1142 BUG(); 1143 return -EINVAL; 1144 } 1145 return 0; 1146 } 1147 EXPORT_SYMBOL(ps3_dma_region_init); 1148 1149 int ps3_dma_region_create(struct ps3_dma_region *r) 1150 { 1151 BUG_ON(!r); 1152 BUG_ON(!r->region_ops); 1153 BUG_ON(!r->region_ops->create); 1154 return r->region_ops->create(r); 1155 } 1156 EXPORT_SYMBOL(ps3_dma_region_create); 1157 1158 int ps3_dma_region_free(struct ps3_dma_region *r) 1159 { 1160 BUG_ON(!r); 1161 BUG_ON(!r->region_ops); 1162 BUG_ON(!r->region_ops->free); 1163 return r->region_ops->free(r); 1164 } 1165 EXPORT_SYMBOL(ps3_dma_region_free); 1166 1167 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr, 1168 unsigned long len, dma_addr_t *bus_addr, 1169 u64 iopte_flag) 1170 { 1171 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag); 1172 } 1173 1174 int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr, 1175 unsigned long len) 1176 { 1177 return r->region_ops->unmap(r, bus_addr, len); 1178 } 1179 1180 /*============================================================================*/ 1181 /* system startup routines */ 1182 /*============================================================================*/ 1183 1184 /** 1185 * ps3_mm_init - initialize the address space state variables 1186 */ 1187 1188 void __init ps3_mm_init(void) 1189 { 1190 int result; 1191 1192 DBG(" -> %s:%d\n", __func__, __LINE__); 1193 1194 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, 1195 &map.total); 1196 1197 if (result) 1198 panic("ps3_repository_read_mm_info() failed"); 1199 1200 map.rm.offset = map.rm.base; 1201 map.vas_id = map.htab_size = 0; 1202 1203 /* this implementation assumes map.rm.base is zero */ 1204 1205 BUG_ON(map.rm.base); 1206 BUG_ON(!map.rm.size); 1207 1208 /* Check if we got the highmem region from an earlier boot step */ 1209 1210 if (ps3_mm_get_repository_highmem(&map.r1)) { 1211 result = ps3_mm_region_create(&map.r1, map.total - map.rm.size); 1212 1213 if (!result) 1214 ps3_mm_set_repository_highmem(&map.r1); 1215 } 1216 1217 /* correct map.total for the real total amount of memory we use */ 1218 map.total = map.rm.size + map.r1.size; 1219 1220 if (!map.r1.size) { 1221 DBG("%s:%d: No highmem region found\n", __func__, __LINE__); 1222 } else { 1223 DBG("%s:%d: Adding highmem region: %llxh %llxh\n", 1224 __func__, __LINE__, map.rm.size, 1225 map.total - map.rm.size); 1226 memblock_add(map.rm.size, map.total - map.rm.size); 1227 } 1228 1229 DBG(" <- %s:%d\n", __func__, __LINE__); 1230 } 1231 1232 /** 1233 * ps3_mm_shutdown - final cleanup of address space 1234 */ 1235 1236 void ps3_mm_shutdown(void) 1237 { 1238 ps3_mm_region_destroy(&map.r1); 1239 } 1240