1 /* 2 * PS3 address space management. 3 * 4 * Copyright (C) 2006 Sony Computer Entertainment Inc. 5 * Copyright 2006 Sony Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/memory_hotplug.h> 24 25 #include <asm/firmware.h> 26 #include <asm/lmb.h> 27 #include <asm/udbg.h> 28 #include <asm/lv1call.h> 29 30 #include "platform.h" 31 32 #if defined(DEBUG) 33 #define DBG(fmt...) udbg_printf(fmt) 34 #else 35 #define DBG(fmt...) do{if(0)printk(fmt);}while(0) 36 #endif 37 38 enum { 39 #if defined(CONFIG_PS3_USE_LPAR_ADDR) 40 USE_LPAR_ADDR = 1, 41 #else 42 USE_LPAR_ADDR = 0, 43 #endif 44 #if defined(CONFIG_PS3_DYNAMIC_DMA) 45 USE_DYNAMIC_DMA = 1, 46 #else 47 USE_DYNAMIC_DMA = 0, 48 #endif 49 }; 50 51 enum { 52 PAGE_SHIFT_4K = 12U, 53 PAGE_SHIFT_64K = 16U, 54 PAGE_SHIFT_16M = 24U, 55 }; 56 57 static unsigned long make_page_sizes(unsigned long a, unsigned long b) 58 { 59 return (a << 56) | (b << 48); 60 } 61 62 enum { 63 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04, 64 ALLOCATE_MEMORY_ADDR_ZERO = 0X08, 65 }; 66 67 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */ 68 69 enum { 70 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */ 71 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */ 72 }; 73 74 /*============================================================================*/ 75 /* virtual address space routines */ 76 /*============================================================================*/ 77 78 /** 79 * struct mem_region - memory region structure 80 * @base: base address 81 * @size: size in bytes 82 * @offset: difference between base and rm.size 83 */ 84 85 struct mem_region { 86 unsigned long base; 87 unsigned long size; 88 unsigned long offset; 89 }; 90 91 /** 92 * struct map - address space state variables holder 93 * @total: total memory available as reported by HV 94 * @vas_id - HV virtual address space id 95 * @htab_size: htab size in bytes 96 * 97 * The HV virtual address space (vas) allows for hotplug memory regions. 98 * Memory regions can be created and destroyed in the vas at runtime. 99 * @rm: real mode (bootmem) region 100 * @r1: hotplug memory region(s) 101 * 102 * ps3 addresses 103 * virt_addr: a cpu 'translated' effective address 104 * phys_addr: an address in what Linux thinks is the physical address space 105 * lpar_addr: an address in the HV virtual address space 106 * bus_addr: an io controller 'translated' address on a device bus 107 */ 108 109 struct map { 110 unsigned long total; 111 unsigned long vas_id; 112 unsigned long htab_size; 113 struct mem_region rm; 114 struct mem_region r1; 115 }; 116 117 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__) 118 static void _debug_dump_map(const struct map* m, const char* func, int line) 119 { 120 DBG("%s:%d: map.total = %lxh\n", func, line, m->total); 121 DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size); 122 DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id); 123 DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size); 124 DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base); 125 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); 126 DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size); 127 } 128 129 static struct map map; 130 131 /** 132 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address 133 * @phys_addr: linux physical address 134 */ 135 136 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr) 137 { 138 BUG_ON(is_kernel_addr(phys_addr)); 139 if (USE_LPAR_ADDR) 140 return phys_addr; 141 else 142 return (phys_addr < map.rm.size || phys_addr >= map.total) 143 ? phys_addr : phys_addr + map.r1.offset; 144 } 145 146 EXPORT_SYMBOL(ps3_mm_phys_to_lpar); 147 148 /** 149 * ps3_mm_vas_create - create the virtual address space 150 */ 151 152 void __init ps3_mm_vas_create(unsigned long* htab_size) 153 { 154 int result; 155 unsigned long start_address; 156 unsigned long size; 157 unsigned long access_right; 158 unsigned long max_page_size; 159 unsigned long flags; 160 161 result = lv1_query_logical_partition_address_region_info(0, 162 &start_address, &size, &access_right, &max_page_size, 163 &flags); 164 165 if (result) { 166 DBG("%s:%d: lv1_query_logical_partition_address_region_info " 167 "failed: %s\n", __func__, __LINE__, 168 ps3_result(result)); 169 goto fail; 170 } 171 172 if (max_page_size < PAGE_SHIFT_16M) { 173 DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__, 174 max_page_size); 175 goto fail; 176 } 177 178 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX); 179 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN); 180 181 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE, 182 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K), 183 &map.vas_id, &map.htab_size); 184 185 if (result) { 186 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n", 187 __func__, __LINE__, ps3_result(result)); 188 goto fail; 189 } 190 191 result = lv1_select_virtual_address_space(map.vas_id); 192 193 if (result) { 194 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n", 195 __func__, __LINE__, ps3_result(result)); 196 goto fail; 197 } 198 199 *htab_size = map.htab_size; 200 201 debug_dump_map(&map); 202 203 return; 204 205 fail: 206 panic("ps3_mm_vas_create failed"); 207 } 208 209 /** 210 * ps3_mm_vas_destroy - 211 */ 212 213 void ps3_mm_vas_destroy(void) 214 { 215 if (map.vas_id) { 216 lv1_select_virtual_address_space(0); 217 lv1_destruct_virtual_address_space(map.vas_id); 218 map.vas_id = 0; 219 } 220 } 221 222 /*============================================================================*/ 223 /* memory hotplug routines */ 224 /*============================================================================*/ 225 226 /** 227 * ps3_mm_region_create - create a memory region in the vas 228 * @r: pointer to a struct mem_region to accept initialized values 229 * @size: requested region size 230 * 231 * This implementation creates the region with the vas large page size. 232 * @size is rounded down to a multiple of the vas large page size. 233 */ 234 235 int ps3_mm_region_create(struct mem_region *r, unsigned long size) 236 { 237 int result; 238 unsigned long muid; 239 240 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); 241 242 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); 243 DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size); 244 DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__, 245 (unsigned long)(size - r->size), 246 (size - r->size) / 1024 / 1024); 247 248 if (r->size == 0) { 249 DBG("%s:%d: size == 0\n", __func__, __LINE__); 250 result = -1; 251 goto zero_region; 252 } 253 254 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, 255 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); 256 257 if (result || r->base < map.rm.size) { 258 DBG("%s:%d: lv1_allocate_memory failed: %s\n", 259 __func__, __LINE__, ps3_result(result)); 260 goto zero_region; 261 } 262 263 r->offset = r->base - map.rm.size; 264 return result; 265 266 zero_region: 267 r->size = r->base = r->offset = 0; 268 return result; 269 } 270 271 /** 272 * ps3_mm_region_destroy - destroy a memory region 273 * @r: pointer to struct mem_region 274 */ 275 276 void ps3_mm_region_destroy(struct mem_region *r) 277 { 278 if (r->base) { 279 lv1_release_memory(r->base); 280 r->size = r->base = r->offset = 0; 281 map.total = map.rm.size; 282 } 283 } 284 285 /** 286 * ps3_mm_add_memory - hot add memory 287 */ 288 289 static int __init ps3_mm_add_memory(void) 290 { 291 int result; 292 unsigned long start_addr; 293 unsigned long start_pfn; 294 unsigned long nr_pages; 295 296 if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) 297 return -ENODEV; 298 299 BUG_ON(!mem_init_done); 300 301 start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size; 302 start_pfn = start_addr >> PAGE_SHIFT; 303 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; 304 305 DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n", 306 __func__, __LINE__, start_addr, start_pfn, nr_pages); 307 308 result = add_memory(0, start_addr, map.r1.size); 309 310 if (result) { 311 DBG("%s:%d: add_memory failed: (%d)\n", 312 __func__, __LINE__, result); 313 return result; 314 } 315 316 result = online_pages(start_pfn, nr_pages); 317 318 if (result) 319 DBG("%s:%d: online_pages failed: (%d)\n", 320 __func__, __LINE__, result); 321 322 return result; 323 } 324 325 core_initcall(ps3_mm_add_memory); 326 327 /*============================================================================*/ 328 /* dma routines */ 329 /*============================================================================*/ 330 331 /** 332 * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address. 333 * @r: pointer to dma region structure 334 * @lpar_addr: HV lpar address 335 */ 336 337 static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r, 338 unsigned long lpar_addr) 339 { 340 BUG_ON(lpar_addr >= map.r1.base + map.r1.size); 341 return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr 342 : lpar_addr - map.r1.offset); 343 } 344 345 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__) 346 static void _dma_dump_region(const struct ps3_dma_region *r, const char* func, 347 int line) 348 { 349 DBG("%s:%d: dev %u:%u\n", func, line, r->did.bus_id, 350 r->did.dev_id); 351 DBG("%s:%d: page_size %u\n", func, line, r->page_size); 352 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); 353 DBG("%s:%d: len %lxh\n", func, line, r->len); 354 } 355 356 /** 357 * dma_chunk - A chunk of dma pages mapped by the io controller. 358 * @region - The dma region that owns this chunk. 359 * @lpar_addr: Starting lpar address of the area to map. 360 * @bus_addr: Starting ioc bus address of the area to map. 361 * @len: Length in bytes of the area to map. 362 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the 363 * list of all chuncks owned by the region. 364 * 365 * This implementation uses a very simple dma page manager 366 * based on the dma_chunk structure. This scheme assumes 367 * that all drivers use very well behaved dma ops. 368 */ 369 370 struct dma_chunk { 371 struct ps3_dma_region *region; 372 unsigned long lpar_addr; 373 unsigned long bus_addr; 374 unsigned long len; 375 struct list_head link; 376 unsigned int usage_count; 377 }; 378 379 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__) 380 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, 381 int line) 382 { 383 DBG("%s:%d: r.dev %u:%u\n", func, line, 384 c->region->did.bus_id, c->region->did.dev_id); 385 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); 386 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); 387 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); 388 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); 389 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); 390 DBG("%s:%d: c.len %lxh\n", func, line, c->len); 391 } 392 393 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, 394 unsigned long bus_addr, unsigned long len) 395 { 396 struct dma_chunk *c; 397 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); 398 unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size); 399 400 list_for_each_entry(c, &r->chunk_list.head, link) { 401 /* intersection */ 402 if (aligned_bus >= c->bus_addr 403 && aligned_bus < c->bus_addr + c->len 404 && aligned_bus + aligned_len <= c->bus_addr + c->len) { 405 return c; 406 } 407 /* below */ 408 if (aligned_bus + aligned_len <= c->bus_addr) { 409 continue; 410 } 411 /* above */ 412 if (aligned_bus >= c->bus_addr + c->len) { 413 continue; 414 } 415 416 /* we don't handle the multi-chunk case for now */ 417 418 dma_dump_chunk(c); 419 BUG(); 420 } 421 return NULL; 422 } 423 424 static int dma_free_chunk(struct dma_chunk *c) 425 { 426 int result = 0; 427 428 if (c->bus_addr) { 429 result = lv1_unmap_device_dma_region(c->region->did.bus_id, 430 c->region->did.dev_id, c->bus_addr, c->len); 431 BUG_ON(result); 432 } 433 434 kfree(c); 435 return result; 436 } 437 438 /** 439 * dma_map_pages - Maps dma pages into the io controller bus address space. 440 * @r: Pointer to a struct ps3_dma_region. 441 * @phys_addr: Starting physical address of the area to map. 442 * @len: Length in bytes of the area to map. 443 * c_out: A pointer to receive an allocated struct dma_chunk for this area. 444 * 445 * This is the lowest level dma mapping routine, and is the one that will 446 * make the HV call to add the pages into the io controller address space. 447 */ 448 449 static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 450 unsigned long len, struct dma_chunk **c_out) 451 { 452 int result; 453 struct dma_chunk *c; 454 455 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); 456 457 if (!c) { 458 result = -ENOMEM; 459 goto fail_alloc; 460 } 461 462 c->region = r; 463 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 464 c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr); 465 c->len = len; 466 467 result = lv1_map_device_dma_region(c->region->did.bus_id, 468 c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len, 469 0xf800000000000000UL); 470 471 if (result) { 472 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n", 473 __func__, __LINE__, ps3_result(result)); 474 goto fail_map; 475 } 476 477 list_add(&c->link, &r->chunk_list.head); 478 479 *c_out = c; 480 return 0; 481 482 fail_map: 483 kfree(c); 484 fail_alloc: 485 *c_out = NULL; 486 DBG(" <- %s:%d\n", __func__, __LINE__); 487 return result; 488 } 489 490 /** 491 * dma_region_create - Create a device dma region. 492 * @r: Pointer to a struct ps3_dma_region. 493 * 494 * This is the lowest level dma region create routine, and is the one that 495 * will make the HV call to create the region. 496 */ 497 498 static int dma_region_create(struct ps3_dma_region* r) 499 { 500 int result; 501 502 r->len = _ALIGN_UP(map.total, 1 << r->page_size); 503 INIT_LIST_HEAD(&r->chunk_list.head); 504 spin_lock_init(&r->chunk_list.lock); 505 506 result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id, 507 r->len, r->page_size, r->region_type, &r->bus_addr); 508 509 dma_dump_region(r); 510 511 if (result) { 512 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n", 513 __func__, __LINE__, ps3_result(result)); 514 r->len = r->bus_addr = 0; 515 } 516 517 return result; 518 } 519 520 /** 521 * dma_region_free - Free a device dma region. 522 * @r: Pointer to a struct ps3_dma_region. 523 * 524 * This is the lowest level dma region free routine, and is the one that 525 * will make the HV call to free the region. 526 */ 527 528 static int dma_region_free(struct ps3_dma_region* r) 529 { 530 int result; 531 struct dma_chunk *c; 532 struct dma_chunk *tmp; 533 534 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { 535 list_del(&c->link); 536 dma_free_chunk(c); 537 } 538 539 result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id, 540 r->bus_addr); 541 542 if (result) 543 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 544 __func__, __LINE__, ps3_result(result)); 545 546 r->len = r->bus_addr = 0; 547 548 return result; 549 } 550 551 /** 552 * dma_map_area - Map an area of memory into a device dma region. 553 * @r: Pointer to a struct ps3_dma_region. 554 * @virt_addr: Starting virtual address of the area to map. 555 * @len: Length in bytes of the area to map. 556 * @bus_addr: A pointer to return the starting ioc bus address of the area to 557 * map. 558 * 559 * This is the common dma mapping routine. 560 */ 561 562 static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 563 unsigned long len, unsigned long *bus_addr) 564 { 565 int result; 566 unsigned long flags; 567 struct dma_chunk *c; 568 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 569 : virt_addr; 570 571 *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 572 573 if (!USE_DYNAMIC_DMA) { 574 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 575 DBG(" -> %s:%d\n", __func__, __LINE__); 576 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__, 577 virt_addr); 578 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__, 579 phys_addr); 580 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__, 581 lpar_addr); 582 DBG("%s:%d len %lxh\n", __func__, __LINE__, len); 583 DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__, 584 *bus_addr, len); 585 } 586 587 spin_lock_irqsave(&r->chunk_list.lock, flags); 588 c = dma_find_chunk(r, *bus_addr, len); 589 590 if (c) { 591 c->usage_count++; 592 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 593 return 0; 594 } 595 596 result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size), 597 _ALIGN_UP(len, 1 << r->page_size), &c); 598 599 if (result) { 600 *bus_addr = 0; 601 DBG("%s:%d: dma_map_pages failed (%d)\n", 602 __func__, __LINE__, result); 603 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 604 return result; 605 } 606 607 c->usage_count = 1; 608 609 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 610 return result; 611 } 612 613 /** 614 * dma_unmap_area - Unmap an area of memory from a device dma region. 615 * @r: Pointer to a struct ps3_dma_region. 616 * @bus_addr: The starting ioc bus address of the area to unmap. 617 * @len: Length in bytes of the area to unmap. 618 * 619 * This is the common dma unmap routine. 620 */ 621 622 int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr, 623 unsigned long len) 624 { 625 unsigned long flags; 626 struct dma_chunk *c; 627 628 spin_lock_irqsave(&r->chunk_list.lock, flags); 629 c = dma_find_chunk(r, bus_addr, len); 630 631 if (!c) { 632 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 633 1 << r->page_size); 634 unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size); 635 DBG("%s:%d: not found: bus_addr %lxh\n", 636 __func__, __LINE__, bus_addr); 637 DBG("%s:%d: not found: len %lxh\n", 638 __func__, __LINE__, len); 639 DBG("%s:%d: not found: aligned_bus %lxh\n", 640 __func__, __LINE__, aligned_bus); 641 DBG("%s:%d: not found: aligned_len %lxh\n", 642 __func__, __LINE__, aligned_len); 643 BUG(); 644 } 645 646 c->usage_count--; 647 648 if (!c->usage_count) { 649 list_del(&c->link); 650 dma_free_chunk(c); 651 } 652 653 spin_unlock_irqrestore(&r->chunk_list.lock, flags); 654 return 0; 655 } 656 657 /** 658 * dma_region_create_linear - Setup a linear dma maping for a device. 659 * @r: Pointer to a struct ps3_dma_region. 660 * 661 * This routine creates an HV dma region for the device and maps all available 662 * ram into the io controller bus address space. 663 */ 664 665 static int dma_region_create_linear(struct ps3_dma_region *r) 666 { 667 int result; 668 unsigned long tmp; 669 670 /* force 16M dma pages for linear mapping */ 671 672 if (r->page_size != PS3_DMA_16M) { 673 pr_info("%s:%d: forcing 16M pages for linear map\n", 674 __func__, __LINE__); 675 r->page_size = PS3_DMA_16M; 676 } 677 678 result = dma_region_create(r); 679 BUG_ON(result); 680 681 result = dma_map_area(r, map.rm.base, map.rm.size, &tmp); 682 BUG_ON(result); 683 684 if (USE_LPAR_ADDR) 685 result = dma_map_area(r, map.r1.base, map.r1.size, 686 &tmp); 687 else 688 result = dma_map_area(r, map.rm.size, map.r1.size, 689 &tmp); 690 691 BUG_ON(result); 692 693 return result; 694 } 695 696 /** 697 * dma_region_free_linear - Free a linear dma mapping for a device. 698 * @r: Pointer to a struct ps3_dma_region. 699 * 700 * This routine will unmap all mapped areas and free the HV dma region. 701 */ 702 703 static int dma_region_free_linear(struct ps3_dma_region *r) 704 { 705 int result; 706 707 result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size); 708 BUG_ON(result); 709 710 result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base), 711 map.r1.size); 712 BUG_ON(result); 713 714 result = dma_region_free(r); 715 BUG_ON(result); 716 717 return result; 718 } 719 720 /** 721 * dma_map_area_linear - Map an area of memory into a device dma region. 722 * @r: Pointer to a struct ps3_dma_region. 723 * @virt_addr: Starting virtual address of the area to map. 724 * @len: Length in bytes of the area to map. 725 * @bus_addr: A pointer to return the starting ioc bus address of the area to 726 * map. 727 * 728 * This routine just returns the coresponding bus address. Actual mapping 729 * occurs in dma_region_create_linear(). 730 */ 731 732 static int dma_map_area_linear(struct ps3_dma_region *r, 733 unsigned long virt_addr, unsigned long len, unsigned long *bus_addr) 734 { 735 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 736 : virt_addr; 737 *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 738 return 0; 739 } 740 741 /** 742 * dma_unmap_area_linear - Unmap an area of memory from a device dma region. 743 * @r: Pointer to a struct ps3_dma_region. 744 * @bus_addr: The starting ioc bus address of the area to unmap. 745 * @len: Length in bytes of the area to unmap. 746 * 747 * This routine does nothing. Unmapping occurs in dma_region_free_linear(). 748 */ 749 750 static int dma_unmap_area_linear(struct ps3_dma_region *r, 751 unsigned long bus_addr, unsigned long len) 752 { 753 return 0; 754 } 755 756 int ps3_dma_region_create(struct ps3_dma_region *r) 757 { 758 return (USE_DYNAMIC_DMA) 759 ? dma_region_create(r) 760 : dma_region_create_linear(r); 761 } 762 763 int ps3_dma_region_free(struct ps3_dma_region *r) 764 { 765 return (USE_DYNAMIC_DMA) 766 ? dma_region_free(r) 767 : dma_region_free_linear(r); 768 } 769 770 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr, 771 unsigned long len, unsigned long *bus_addr) 772 { 773 return (USE_DYNAMIC_DMA) 774 ? dma_map_area(r, virt_addr, len, bus_addr) 775 : dma_map_area_linear(r, virt_addr, len, bus_addr); 776 } 777 778 int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr, 779 unsigned long len) 780 { 781 return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len) 782 : dma_unmap_area_linear(r, bus_addr, len); 783 } 784 785 /*============================================================================*/ 786 /* system startup routines */ 787 /*============================================================================*/ 788 789 /** 790 * ps3_mm_init - initialize the address space state variables 791 */ 792 793 void __init ps3_mm_init(void) 794 { 795 int result; 796 797 DBG(" -> %s:%d\n", __func__, __LINE__); 798 799 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, 800 &map.total); 801 802 if (result) 803 panic("ps3_repository_read_mm_info() failed"); 804 805 map.rm.offset = map.rm.base; 806 map.vas_id = map.htab_size = 0; 807 808 /* this implementation assumes map.rm.base is zero */ 809 810 BUG_ON(map.rm.base); 811 BUG_ON(!map.rm.size); 812 813 lmb_add(map.rm.base, map.rm.size); 814 lmb_analyze(); 815 816 /* arrange to do this in ps3_mm_add_memory */ 817 ps3_mm_region_create(&map.r1, map.total - map.rm.size); 818 819 DBG(" <- %s:%d\n", __func__, __LINE__); 820 } 821 822 /** 823 * ps3_mm_shutdown - final cleanup of address space 824 */ 825 826 void ps3_mm_shutdown(void) 827 { 828 ps3_mm_region_destroy(&map.r1); 829 } 830