Lines Matching +full:io +full:- +full:hv

1 // SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/dma-mapping.h>
15 #include <asm/cell-regs.h>
56 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
65 * struct mem_region - memory region structure
80 * struct map - address space state variables holder
81 * @total: total memory available as reported by HV
82 * @vas_id - HV virtual address space id
85 * The HV virtual address space (vas) allows for hotplug memory regions.
93 * lpar_addr: an address in the HV virtual address space
94 * bus_addr: an io controller 'translated' address on a device bus
109 DBG("%s:%d: map.total = %llxh\n", func, line, m->total); in _debug_dump_map()
110 DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size); in _debug_dump_map()
111 DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id); in _debug_dump_map()
112 DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size); in _debug_dump_map()
113 DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base); in _debug_dump_map()
114 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); in _debug_dump_map()
115 DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size); in _debug_dump_map()
121 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
135 * ps3_mm_vas_create - create the virtual address space
196 * ps3_mm_vas_destroy -
223 result = ps3_repository_read_highmem_info(0, &r->base, &r->size); in ps3_mm_get_repository_highmem()
228 if (!r->base || !r->size) { in ps3_mm_get_repository_highmem()
229 result = -1; in ps3_mm_get_repository_highmem()
233 r->offset = r->base - map.rm.size; in ps3_mm_get_repository_highmem()
236 __func__, __LINE__, r->base, r->size); in ps3_mm_get_repository_highmem()
243 r->size = r->base = r->offset = 0; in ps3_mm_get_repository_highmem()
251 return r ? ps3_repository_write_highmem_info(0, r->base, r->size) : in ps3_mm_set_repository_highmem()
256 * ps3_mm_region_create - create a memory region in the vas
269 r->size = ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); in ps3_mm_region_create()
272 DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size); in ps3_mm_region_create()
274 size - r->size, (size - r->size) / 1024 / 1024); in ps3_mm_region_create()
276 if (r->size == 0) { in ps3_mm_region_create()
278 result = -1; in ps3_mm_region_create()
282 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, in ps3_mm_region_create()
283 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); in ps3_mm_region_create()
285 if (result || r->base < map.rm.size) { in ps3_mm_region_create()
291 r->destroy = 1; in ps3_mm_region_create()
292 r->offset = r->base - map.rm.size; in ps3_mm_region_create()
296 r->size = r->base = r->offset = 0; in ps3_mm_region_create()
301 * ps3_mm_region_destroy - destroy a memory region
309 if (!r->destroy) { in ps3_mm_region_destroy()
313 if (r->base) { in ps3_mm_region_destroy()
314 result = lv1_release_memory(r->base); in ps3_mm_region_destroy()
320 r->size = r->base = r->offset = 0; in ps3_mm_region_destroy()
332 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
334 * @lpar_addr: HV lpar address
341 lpar_addr -= map.r1.offset; in dma_sb_lpar_to_bus()
342 BUG_ON(lpar_addr < r->offset); in dma_sb_lpar_to_bus()
343 BUG_ON(lpar_addr >= r->offset + r->len); in dma_sb_lpar_to_bus()
344 return r->bus_addr + lpar_addr - r->offset; in dma_sb_lpar_to_bus()
351 DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id, in _dma_dump_region()
352 r->dev->dev_id); in _dma_dump_region()
353 DBG("%s:%d: page_size %u\n", func, line, r->page_size); in _dma_dump_region()
354 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); in _dma_dump_region()
355 DBG("%s:%d: len %lxh\n", func, line, r->len); in _dma_dump_region()
356 DBG("%s:%d: offset %lxh\n", func, line, r->offset); in _dma_dump_region()
360 * dma_chunk - A chunk of dma pages mapped by the io controller.
361 * @region - The dma region that owns this chunk.
387 c->region->dev->bus_id, c->region->dev->dev_id); in _dma_dump_chunk()
388 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); in _dma_dump_chunk()
389 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); in _dma_dump_chunk()
390 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); in _dma_dump_chunk()
391 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset); in _dma_dump_chunk()
392 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); in _dma_dump_chunk()
393 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); in _dma_dump_chunk()
394 DBG("%s:%d: c.len %lxh\n", func, line, c->len); in _dma_dump_chunk()
401 unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size); in dma_find_chunk()
402 unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus, in dma_find_chunk()
403 1 << r->page_size); in dma_find_chunk()
405 list_for_each_entry(c, &r->chunk_list.head, link) { in dma_find_chunk()
407 if (aligned_bus >= c->bus_addr && in dma_find_chunk()
408 aligned_bus + aligned_len <= c->bus_addr + c->len) in dma_find_chunk()
412 if (aligned_bus + aligned_len <= c->bus_addr) in dma_find_chunk()
416 if (aligned_bus >= c->bus_addr + c->len) in dma_find_chunk()
419 /* we don't handle the multi-chunk case for now */ in dma_find_chunk()
430 unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size); in dma_find_chunk_lpar()
431 unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar, in dma_find_chunk_lpar()
432 1 << r->page_size); in dma_find_chunk_lpar()
434 list_for_each_entry(c, &r->chunk_list.head, link) { in dma_find_chunk_lpar()
436 if (c->lpar_addr <= aligned_lpar && in dma_find_chunk_lpar()
437 aligned_lpar < c->lpar_addr + c->len) { in dma_find_chunk_lpar()
438 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len) in dma_find_chunk_lpar()
446 if (aligned_lpar + aligned_len <= c->lpar_addr) { in dma_find_chunk_lpar()
450 if (c->lpar_addr + c->len <= aligned_lpar) { in dma_find_chunk_lpar()
461 if (c->bus_addr) { in dma_sb_free_chunk()
462 result = lv1_unmap_device_dma_region(c->region->dev->bus_id, in dma_sb_free_chunk()
463 c->region->dev->dev_id, c->bus_addr, c->len); in dma_sb_free_chunk()
476 struct ps3_dma_region *r = c->region; in dma_ioc0_free_chunk()
479 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) { in dma_ioc0_free_chunk()
480 offset = (1 << r->page_size) * iopage; in dma_ioc0_free_chunk()
483 c->bus_addr + offset, in dma_ioc0_free_chunk()
484 c->lpar_addr + offset, in dma_ioc0_free_chunk()
485 r->ioid, in dma_ioc0_free_chunk()
488 c->bus_addr + offset, in dma_ioc0_free_chunk()
489 c->lpar_addr + offset, in dma_ioc0_free_chunk()
490 r->ioid); in dma_ioc0_free_chunk()
503 * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
510 * make the HV call to add the pages into the io controller address space.
521 result = -ENOMEM; in dma_sb_map_pages()
525 c->region = r; in dma_sb_map_pages()
526 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); in dma_sb_map_pages()
527 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr); in dma_sb_map_pages()
528 c->len = len; in dma_sb_map_pages()
531 result = lv1_map_device_dma_region(c->region->dev->bus_id, in dma_sb_map_pages()
532 c->region->dev->dev_id, c->lpar_addr, in dma_sb_map_pages()
533 c->bus_addr, c->len, iopte_flag); in dma_sb_map_pages()
540 list_add(&c->link, &r->chunk_list.head); in dma_sb_map_pages()
549 DBG(" <- %s:%d\n", __func__, __LINE__); in dma_sb_map_pages()
566 result = -ENOMEM; in dma_ioc0_map_pages()
570 c->region = r; in dma_ioc0_map_pages()
571 c->len = len; in dma_ioc0_map_pages()
572 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); in dma_ioc0_map_pages()
573 /* allocate IO address */ in dma_ioc0_map_pages()
574 if (list_empty(&r->chunk_list.head)) { in dma_ioc0_map_pages()
576 c->bus_addr = r->bus_addr; in dma_ioc0_map_pages()
579 last = list_entry(r->chunk_list.head.next, in dma_ioc0_map_pages()
581 c->bus_addr = last->bus_addr + last->len; in dma_ioc0_map_pages()
583 last->bus_addr, last->len); in dma_ioc0_map_pages()
589 pages = len >> r->page_size; in dma_ioc0_map_pages()
591 r->page_size, r->len, pages, iopte_flag); in dma_ioc0_map_pages()
593 offset = (1 << r->page_size) * iopage; in dma_ioc0_map_pages()
595 c->bus_addr + offset, in dma_ioc0_map_pages()
596 c->lpar_addr + offset, in dma_ioc0_map_pages()
597 r->ioid, in dma_ioc0_map_pages()
605 iopage, c->bus_addr + offset, c->lpar_addr + offset, in dma_ioc0_map_pages()
606 r->ioid); in dma_ioc0_map_pages()
610 list_add(&c->link, &r->chunk_list.head); in dma_ioc0_map_pages()
617 for (iopage--; 0 <= iopage; iopage--) { in dma_ioc0_map_pages()
619 c->bus_addr + offset, in dma_ioc0_map_pages()
620 c->lpar_addr + offset, in dma_ioc0_map_pages()
621 r->ioid, in dma_ioc0_map_pages()
631 * dma_sb_region_create - Create a device dma region.
635 * will make the HV call to create the region.
643 DBG(" -> %s:%d:\n", __func__, __LINE__); in dma_sb_region_create()
647 if (!r->dev->bus_id) { in dma_sb_region_create()
649 r->dev->bus_id, r->dev->dev_id); in dma_sb_region_create()
654 __LINE__, r->len, r->page_size, r->offset); in dma_sb_region_create()
656 BUG_ON(!r->len); in dma_sb_region_create()
657 BUG_ON(!r->page_size); in dma_sb_region_create()
658 BUG_ON(!r->region_ops); in dma_sb_region_create()
660 INIT_LIST_HEAD(&r->chunk_list.head); in dma_sb_region_create()
661 spin_lock_init(&r->chunk_list.lock); in dma_sb_region_create()
663 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id, in dma_sb_region_create()
664 roundup_pow_of_two(r->len), r->page_size, r->region_type, in dma_sb_region_create()
666 r->bus_addr = bus_addr; in dma_sb_region_create()
671 r->len = r->bus_addr = 0; in dma_sb_region_create()
682 INIT_LIST_HEAD(&r->chunk_list.head); in dma_ioc0_region_create()
683 spin_lock_init(&r->chunk_list.lock); in dma_ioc0_region_create()
686 r->len, in dma_ioc0_region_create()
687 r->page_size, in dma_ioc0_region_create()
689 r->bus_addr = bus_addr; in dma_ioc0_region_create()
693 r->len = r->bus_addr = 0; in dma_ioc0_region_create()
696 r->len, r->page_size, r->bus_addr); in dma_ioc0_region_create()
701 * dma_region_free - Free a device dma region.
705 * will make the HV call to free the region.
716 if (!r->dev->bus_id) { in dma_sb_region_free()
718 r->dev->bus_id, r->dev->dev_id); in dma_sb_region_free()
722 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { in dma_sb_region_free()
723 list_del(&c->link); in dma_sb_region_free()
727 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id, in dma_sb_region_free()
728 r->bus_addr); in dma_sb_region_free()
734 r->bus_addr = 0; in dma_sb_region_free()
745 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { in dma_ioc0_region_free()
746 list_del(&c->link); in dma_ioc0_region_free()
750 result = lv1_release_io_segment(0, r->bus_addr); in dma_ioc0_region_free()
756 r->bus_addr = 0; in dma_ioc0_region_free()
763 * dma_sb_map_area - Map an area of memory into a device dma region.
782 unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); in dma_sb_map_area()
783 unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys, in dma_sb_map_area()
784 1 << r->page_size); in dma_sb_map_area()
789 DBG(" -> %s:%d\n", __func__, __LINE__); in dma_sb_map_area()
801 spin_lock_irqsave(&r->chunk_list.lock, flags); in dma_sb_map_area()
807 c->usage_count++; in dma_sb_map_area()
808 spin_unlock_irqrestore(&r->chunk_list.lock, flags); in dma_sb_map_area()
818 spin_unlock_irqrestore(&r->chunk_list.lock, flags); in dma_sb_map_area()
822 c->usage_count = 1; in dma_sb_map_area()
824 spin_unlock_irqrestore(&r->chunk_list.lock, flags); in dma_sb_map_area()
837 unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size); in dma_ioc0_map_area()
838 unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys, in dma_ioc0_map_area()
839 1 << r->page_size); in dma_ioc0_map_area()
846 spin_lock_irqsave(&r->chunk_list.lock, flags); in dma_ioc0_map_area()
852 *bus_addr = c->bus_addr + phys_addr - aligned_phys; in dma_ioc0_map_area()
853 c->usage_count++; in dma_ioc0_map_area()
854 spin_unlock_irqrestore(&r->chunk_list.lock, flags); in dma_ioc0_map_area()
865 spin_unlock_irqrestore(&r->chunk_list.lock, flags); in dma_ioc0_map_area()
868 *bus_addr = c->bus_addr + phys_addr - aligned_phys; in dma_ioc0_map_area()
871 c->usage_count = 1; in dma_ioc0_map_area()
873 spin_unlock_irqrestore(&r->chunk_list.lock, flags); in dma_ioc0_map_area()
878 * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
892 spin_lock_irqsave(&r->chunk_list.lock, flags); in dma_sb_unmap_area()
897 1 << r->page_size); in dma_sb_unmap_area()
899 - aligned_bus, 1 << r->page_size); in dma_sb_unmap_area()
911 c->usage_count--; in dma_sb_unmap_area()
913 if (!c->usage_count) { in dma_sb_unmap_area()
914 list_del(&c->link); in dma_sb_unmap_area()
918 spin_unlock_irqrestore(&r->chunk_list.lock, flags); in dma_sb_unmap_area()
929 spin_lock_irqsave(&r->chunk_list.lock, flags); in dma_ioc0_unmap_area()
934 1 << r->page_size); in dma_ioc0_unmap_area()
936 - aligned_bus, in dma_ioc0_unmap_area()
937 1 << r->page_size); in dma_ioc0_unmap_area()
949 c->usage_count--; in dma_ioc0_unmap_area()
951 if (!c->usage_count) { in dma_ioc0_unmap_area()
952 list_del(&c->link); in dma_ioc0_unmap_area()
956 spin_unlock_irqrestore(&r->chunk_list.lock, flags); in dma_ioc0_unmap_area()
962 * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
965 * This routine creates an HV dma region for the device and maps all available
966 * ram into the io controller bus address space.
975 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */ in dma_sb_region_create_linear()
977 if (r->page_size != PS3_DMA_16M) { in dma_sb_region_create_linear()
980 r->page_size = PS3_DMA_16M; in dma_sb_region_create_linear()
981 r->len = ALIGN(r->len, 1 << r->page_size); in dma_sb_region_create_linear()
988 if (r->offset < map.rm.size) { in dma_sb_region_create_linear()
990 virt_addr = map.rm.base + r->offset; in dma_sb_region_create_linear()
991 len = map.rm.size - r->offset; in dma_sb_region_create_linear()
992 if (len > r->len) in dma_sb_region_create_linear()
993 len = r->len; in dma_sb_region_create_linear()
1000 if (r->offset + r->len > map.rm.size) { in dma_sb_region_create_linear()
1003 len = r->len; in dma_sb_region_create_linear()
1004 if (r->offset >= map.rm.size) in dma_sb_region_create_linear()
1005 virt_addr += r->offset - map.rm.size; in dma_sb_region_create_linear()
1007 len -= map.rm.size - r->offset; in dma_sb_region_create_linear()
1018 * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1021 * This routine will unmap all mapped areas and free the HV dma region.
1030 if (r->offset < map.rm.size) { in dma_sb_region_free_linear()
1032 lpar_addr = map.rm.base + r->offset; in dma_sb_region_free_linear()
1033 len = map.rm.size - r->offset; in dma_sb_region_free_linear()
1034 if (len > r->len) in dma_sb_region_free_linear()
1035 len = r->len; in dma_sb_region_free_linear()
1041 if (r->offset + r->len > map.rm.size) { in dma_sb_region_free_linear()
1044 len = r->len; in dma_sb_region_free_linear()
1045 if (r->offset >= map.rm.size) in dma_sb_region_free_linear()
1046 lpar_addr += r->offset - map.rm.size; in dma_sb_region_free_linear()
1048 len -= map.rm.size - r->offset; in dma_sb_region_free_linear()
1061 * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1083 * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1127 r->dev = dev; in ps3_dma_region_init()
1128 r->page_size = page_size; in ps3_dma_region_init()
1129 r->region_type = region_type; in ps3_dma_region_init()
1130 r->offset = lpar_addr; in ps3_dma_region_init()
1131 if (r->offset >= map.rm.size) in ps3_dma_region_init()
1132 r->offset -= map.r1.offset; in ps3_dma_region_init()
1133 r->len = len ? len : ALIGN(map.total, 1 << r->page_size); in ps3_dma_region_init()
1135 dev->core.dma_mask = &r->dma_mask; in ps3_dma_region_init()
1137 result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32)); in ps3_dma_region_init()
1140 dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n", in ps3_dma_region_init()
1145 switch (dev->dev_type) { in ps3_dma_region_init()
1147 r->region_ops = (USE_DYNAMIC_DMA) in ps3_dma_region_init()
1152 r->region_ops = &ps3_dma_ioc0_region_ops; in ps3_dma_region_init()
1156 return -EINVAL; in ps3_dma_region_init()
1165 BUG_ON(!r->region_ops); in ps3_dma_region_create()
1166 BUG_ON(!r->region_ops->create); in ps3_dma_region_create()
1167 return r->region_ops->create(r); in ps3_dma_region_create()
1174 BUG_ON(!r->region_ops); in ps3_dma_region_free()
1175 BUG_ON(!r->region_ops->free); in ps3_dma_region_free()
1176 return r->region_ops->free(r); in ps3_dma_region_free()
1184 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag); in ps3_dma_map()
1190 return r->region_ops->unmap(r, bus_addr, len); in ps3_dma_unmap()
1198 * ps3_mm_init - initialize the address space state variables
1205 DBG(" -> %s:%d\n", __func__, __LINE__); in ps3_mm_init()
1224 result = ps3_mm_region_create(&map.r1, map.total - map.rm.size); in ps3_mm_init()
1238 map.total - map.rm.size); in ps3_mm_init()
1239 memblock_add(map.rm.size, map.total - map.rm.size); in ps3_mm_init()
1242 DBG(" <- %s:%d\n", __func__, __LINE__); in ps3_mm_init()
1246 * ps3_mm_shutdown - final cleanup of address space