1f58a9d17SGeoff Levand /* 2f58a9d17SGeoff Levand * PS3 address space management. 3f58a9d17SGeoff Levand * 4f58a9d17SGeoff Levand * Copyright (C) 2006 Sony Computer Entertainment Inc. 5f58a9d17SGeoff Levand * Copyright 2006 Sony Corp. 6f58a9d17SGeoff Levand * 7f58a9d17SGeoff Levand * This program is free software; you can redistribute it and/or modify 8f58a9d17SGeoff Levand * it under the terms of the GNU General Public License as published by 9f58a9d17SGeoff Levand * the Free Software Foundation; version 2 of the License. 10f58a9d17SGeoff Levand * 11f58a9d17SGeoff Levand * This program is distributed in the hope that it will be useful, 12f58a9d17SGeoff Levand * but WITHOUT ANY WARRANTY; without even the implied warranty of 13f58a9d17SGeoff Levand * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14f58a9d17SGeoff Levand * GNU General Public License for more details. 15f58a9d17SGeoff Levand * 16f58a9d17SGeoff Levand * You should have received a copy of the GNU General Public License 17f58a9d17SGeoff Levand * along with this program; if not, write to the Free Software 18f58a9d17SGeoff Levand * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19f58a9d17SGeoff Levand */ 20f58a9d17SGeoff Levand 21f58a9d17SGeoff Levand #include <linux/kernel.h> 22f58a9d17SGeoff Levand #include <linux/module.h> 23f58a9d17SGeoff Levand #include <linux/memory_hotplug.h> 24f58a9d17SGeoff Levand 25e22ba7e3SArnd Bergmann #include <asm/firmware.h> 26f58a9d17SGeoff Levand #include <asm/lmb.h> 27f58a9d17SGeoff Levand #include <asm/udbg.h> 28f58a9d17SGeoff Levand #include <asm/lv1call.h> 29f58a9d17SGeoff Levand 30f58a9d17SGeoff Levand #include "platform.h" 31f58a9d17SGeoff Levand 32f58a9d17SGeoff Levand #if defined(DEBUG) 33f58a9d17SGeoff Levand #define DBG(fmt...) udbg_printf(fmt) 34f58a9d17SGeoff Levand #else 35f58a9d17SGeoff Levand #define DBG(fmt...) do{if(0)printk(fmt);}while(0) 36f58a9d17SGeoff Levand #endif 37f58a9d17SGeoff Levand 38f58a9d17SGeoff Levand enum { 39f58a9d17SGeoff Levand #if defined(CONFIG_PS3_USE_LPAR_ADDR) 40f58a9d17SGeoff Levand USE_LPAR_ADDR = 1, 41f58a9d17SGeoff Levand #else 42f58a9d17SGeoff Levand USE_LPAR_ADDR = 0, 43f58a9d17SGeoff Levand #endif 44f58a9d17SGeoff Levand #if defined(CONFIG_PS3_DYNAMIC_DMA) 45f58a9d17SGeoff Levand USE_DYNAMIC_DMA = 1, 46f58a9d17SGeoff Levand #else 47f58a9d17SGeoff Levand USE_DYNAMIC_DMA = 0, 48f58a9d17SGeoff Levand #endif 49f58a9d17SGeoff Levand }; 50f58a9d17SGeoff Levand 51f58a9d17SGeoff Levand enum { 52f58a9d17SGeoff Levand PAGE_SHIFT_4K = 12U, 53f58a9d17SGeoff Levand PAGE_SHIFT_64K = 16U, 54f58a9d17SGeoff Levand PAGE_SHIFT_16M = 24U, 55f58a9d17SGeoff Levand }; 56f58a9d17SGeoff Levand 57f58a9d17SGeoff Levand static unsigned long make_page_sizes(unsigned long a, unsigned long b) 58f58a9d17SGeoff Levand { 59f58a9d17SGeoff Levand return (a << 56) | (b << 48); 60f58a9d17SGeoff Levand } 61f58a9d17SGeoff Levand 62f58a9d17SGeoff Levand enum { 63f58a9d17SGeoff Levand ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04, 64f58a9d17SGeoff Levand ALLOCATE_MEMORY_ADDR_ZERO = 0X08, 65f58a9d17SGeoff Levand }; 66f58a9d17SGeoff Levand 67f58a9d17SGeoff Levand /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */ 68f58a9d17SGeoff Levand 69f58a9d17SGeoff Levand enum { 70f58a9d17SGeoff Levand HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */ 71f58a9d17SGeoff Levand HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */ 72f58a9d17SGeoff Levand }; 73f58a9d17SGeoff Levand 74f58a9d17SGeoff Levand /*============================================================================*/ 75f58a9d17SGeoff Levand /* virtual address space routines */ 76f58a9d17SGeoff Levand /*============================================================================*/ 77f58a9d17SGeoff Levand 78f58a9d17SGeoff Levand /** 79f58a9d17SGeoff Levand * struct mem_region - memory region structure 80f58a9d17SGeoff Levand * @base: base address 81f58a9d17SGeoff Levand * @size: size in bytes 82f58a9d17SGeoff Levand * @offset: difference between base and rm.size 83f58a9d17SGeoff Levand */ 84f58a9d17SGeoff Levand 85f58a9d17SGeoff Levand struct mem_region { 86f58a9d17SGeoff Levand unsigned long base; 87f58a9d17SGeoff Levand unsigned long size; 88f58a9d17SGeoff Levand unsigned long offset; 89f58a9d17SGeoff Levand }; 90f58a9d17SGeoff Levand 91f58a9d17SGeoff Levand /** 92f58a9d17SGeoff Levand * struct map - address space state variables holder 93f58a9d17SGeoff Levand * @total: total memory available as reported by HV 94f58a9d17SGeoff Levand * @vas_id - HV virtual address space id 95f58a9d17SGeoff Levand * @htab_size: htab size in bytes 96f58a9d17SGeoff Levand * 97f58a9d17SGeoff Levand * The HV virtual address space (vas) allows for hotplug memory regions. 98f58a9d17SGeoff Levand * Memory regions can be created and destroyed in the vas at runtime. 99f58a9d17SGeoff Levand * @rm: real mode (bootmem) region 100f58a9d17SGeoff Levand * @r1: hotplug memory region(s) 101f58a9d17SGeoff Levand * 102f58a9d17SGeoff Levand * ps3 addresses 103f58a9d17SGeoff Levand * virt_addr: a cpu 'translated' effective address 104f58a9d17SGeoff Levand * phys_addr: an address in what Linux thinks is the physical address space 105f58a9d17SGeoff Levand * lpar_addr: an address in the HV virtual address space 106f58a9d17SGeoff Levand * bus_addr: an io controller 'translated' address on a device bus 107f58a9d17SGeoff Levand */ 108f58a9d17SGeoff Levand 109f58a9d17SGeoff Levand struct map { 110f58a9d17SGeoff Levand unsigned long total; 111f58a9d17SGeoff Levand unsigned long vas_id; 112f58a9d17SGeoff Levand unsigned long htab_size; 113f58a9d17SGeoff Levand struct mem_region rm; 114f58a9d17SGeoff Levand struct mem_region r1; 115f58a9d17SGeoff Levand }; 116f58a9d17SGeoff Levand 117f58a9d17SGeoff Levand #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__) 118f58a9d17SGeoff Levand static void _debug_dump_map(const struct map* m, const char* func, int line) 119f58a9d17SGeoff Levand { 120f58a9d17SGeoff Levand DBG("%s:%d: map.total = %lxh\n", func, line, m->total); 121f58a9d17SGeoff Levand DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size); 122f58a9d17SGeoff Levand DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id); 123f58a9d17SGeoff Levand DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size); 124f58a9d17SGeoff Levand DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base); 125f58a9d17SGeoff Levand DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); 126f58a9d17SGeoff Levand DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size); 127f58a9d17SGeoff Levand } 128f58a9d17SGeoff Levand 129f58a9d17SGeoff Levand static struct map map; 130f58a9d17SGeoff Levand 131f58a9d17SGeoff Levand /** 132f58a9d17SGeoff Levand * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address 133f58a9d17SGeoff Levand * @phys_addr: linux physical address 134f58a9d17SGeoff Levand */ 135f58a9d17SGeoff Levand 136f58a9d17SGeoff Levand unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr) 137f58a9d17SGeoff Levand { 138f58a9d17SGeoff Levand BUG_ON(is_kernel_addr(phys_addr)); 139f58a9d17SGeoff Levand if (USE_LPAR_ADDR) 140f58a9d17SGeoff Levand return phys_addr; 141f58a9d17SGeoff Levand else 142f58a9d17SGeoff Levand return (phys_addr < map.rm.size || phys_addr >= map.total) 143f58a9d17SGeoff Levand ? phys_addr : phys_addr + map.r1.offset; 144f58a9d17SGeoff Levand } 145f58a9d17SGeoff Levand 146f58a9d17SGeoff Levand EXPORT_SYMBOL(ps3_mm_phys_to_lpar); 147f58a9d17SGeoff Levand 148f58a9d17SGeoff Levand /** 149f58a9d17SGeoff Levand * ps3_mm_vas_create - create the virtual address space 150f58a9d17SGeoff Levand */ 151f58a9d17SGeoff Levand 152f58a9d17SGeoff Levand void __init ps3_mm_vas_create(unsigned long* htab_size) 153f58a9d17SGeoff Levand { 154f58a9d17SGeoff Levand int result; 155f58a9d17SGeoff Levand unsigned long start_address; 156f58a9d17SGeoff Levand unsigned long size; 157f58a9d17SGeoff Levand unsigned long access_right; 158f58a9d17SGeoff Levand unsigned long max_page_size; 159f58a9d17SGeoff Levand unsigned long flags; 160f58a9d17SGeoff Levand 161f58a9d17SGeoff Levand result = lv1_query_logical_partition_address_region_info(0, 162f58a9d17SGeoff Levand &start_address, &size, &access_right, &max_page_size, 163f58a9d17SGeoff Levand &flags); 164f58a9d17SGeoff Levand 165f58a9d17SGeoff Levand if (result) { 166f58a9d17SGeoff Levand DBG("%s:%d: lv1_query_logical_partition_address_region_info " 167f58a9d17SGeoff Levand "failed: %s\n", __func__, __LINE__, 168f58a9d17SGeoff Levand ps3_result(result)); 169f58a9d17SGeoff Levand goto fail; 170f58a9d17SGeoff Levand } 171f58a9d17SGeoff Levand 172f58a9d17SGeoff Levand if (max_page_size < PAGE_SHIFT_16M) { 173f58a9d17SGeoff Levand DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__, 174f58a9d17SGeoff Levand max_page_size); 175f58a9d17SGeoff Levand goto fail; 176f58a9d17SGeoff Levand } 177f58a9d17SGeoff Levand 178f58a9d17SGeoff Levand BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX); 179f58a9d17SGeoff Levand BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN); 180f58a9d17SGeoff Levand 181f58a9d17SGeoff Levand result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE, 182f58a9d17SGeoff Levand 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K), 183f58a9d17SGeoff Levand &map.vas_id, &map.htab_size); 184f58a9d17SGeoff Levand 185f58a9d17SGeoff Levand if (result) { 186f58a9d17SGeoff Levand DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n", 187f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 188f58a9d17SGeoff Levand goto fail; 189f58a9d17SGeoff Levand } 190f58a9d17SGeoff Levand 191f58a9d17SGeoff Levand result = lv1_select_virtual_address_space(map.vas_id); 192f58a9d17SGeoff Levand 193f58a9d17SGeoff Levand if (result) { 194f58a9d17SGeoff Levand DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n", 195f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 196f58a9d17SGeoff Levand goto fail; 197f58a9d17SGeoff Levand } 198f58a9d17SGeoff Levand 199f58a9d17SGeoff Levand *htab_size = map.htab_size; 200f58a9d17SGeoff Levand 201f58a9d17SGeoff Levand debug_dump_map(&map); 202f58a9d17SGeoff Levand 203f58a9d17SGeoff Levand return; 204f58a9d17SGeoff Levand 205f58a9d17SGeoff Levand fail: 206f58a9d17SGeoff Levand panic("ps3_mm_vas_create failed"); 207f58a9d17SGeoff Levand } 208f58a9d17SGeoff Levand 209f58a9d17SGeoff Levand /** 210f58a9d17SGeoff Levand * ps3_mm_vas_destroy - 211f58a9d17SGeoff Levand */ 212f58a9d17SGeoff Levand 213f58a9d17SGeoff Levand void ps3_mm_vas_destroy(void) 214f58a9d17SGeoff Levand { 215f58a9d17SGeoff Levand if (map.vas_id) { 216f58a9d17SGeoff Levand lv1_select_virtual_address_space(0); 217f58a9d17SGeoff Levand lv1_destruct_virtual_address_space(map.vas_id); 218f58a9d17SGeoff Levand map.vas_id = 0; 219f58a9d17SGeoff Levand } 220f58a9d17SGeoff Levand } 221f58a9d17SGeoff Levand 222f58a9d17SGeoff Levand /*============================================================================*/ 223f58a9d17SGeoff Levand /* memory hotplug routines */ 224f58a9d17SGeoff Levand /*============================================================================*/ 225f58a9d17SGeoff Levand 226f58a9d17SGeoff Levand /** 227f58a9d17SGeoff Levand * ps3_mm_region_create - create a memory region in the vas 228f58a9d17SGeoff Levand * @r: pointer to a struct mem_region to accept initialized values 229f58a9d17SGeoff Levand * @size: requested region size 230f58a9d17SGeoff Levand * 231f58a9d17SGeoff Levand * This implementation creates the region with the vas large page size. 232f58a9d17SGeoff Levand * @size is rounded down to a multiple of the vas large page size. 233f58a9d17SGeoff Levand */ 234f58a9d17SGeoff Levand 235f58a9d17SGeoff Levand int ps3_mm_region_create(struct mem_region *r, unsigned long size) 236f58a9d17SGeoff Levand { 237f58a9d17SGeoff Levand int result; 238f58a9d17SGeoff Levand unsigned long muid; 239f58a9d17SGeoff Levand 240f58a9d17SGeoff Levand r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); 241f58a9d17SGeoff Levand 242f58a9d17SGeoff Levand DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); 243f58a9d17SGeoff Levand DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size); 244f58a9d17SGeoff Levand DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__, 245f58a9d17SGeoff Levand (unsigned long)(size - r->size), 246f58a9d17SGeoff Levand (size - r->size) / 1024 / 1024); 247f58a9d17SGeoff Levand 248f58a9d17SGeoff Levand if (r->size == 0) { 249f58a9d17SGeoff Levand DBG("%s:%d: size == 0\n", __func__, __LINE__); 250f58a9d17SGeoff Levand result = -1; 251f58a9d17SGeoff Levand goto zero_region; 252f58a9d17SGeoff Levand } 253f58a9d17SGeoff Levand 254f58a9d17SGeoff Levand result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, 255f58a9d17SGeoff Levand ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); 256f58a9d17SGeoff Levand 257f58a9d17SGeoff Levand if (result || r->base < map.rm.size) { 258f58a9d17SGeoff Levand DBG("%s:%d: lv1_allocate_memory failed: %s\n", 259f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 260f58a9d17SGeoff Levand goto zero_region; 261f58a9d17SGeoff Levand } 262f58a9d17SGeoff Levand 263f58a9d17SGeoff Levand r->offset = r->base - map.rm.size; 264f58a9d17SGeoff Levand return result; 265f58a9d17SGeoff Levand 266f58a9d17SGeoff Levand zero_region: 267f58a9d17SGeoff Levand r->size = r->base = r->offset = 0; 268f58a9d17SGeoff Levand return result; 269f58a9d17SGeoff Levand } 270f58a9d17SGeoff Levand 271f58a9d17SGeoff Levand /** 272f58a9d17SGeoff Levand * ps3_mm_region_destroy - destroy a memory region 273f58a9d17SGeoff Levand * @r: pointer to struct mem_region 274f58a9d17SGeoff Levand */ 275f58a9d17SGeoff Levand 276f58a9d17SGeoff Levand void ps3_mm_region_destroy(struct mem_region *r) 277f58a9d17SGeoff Levand { 278f58a9d17SGeoff Levand if (r->base) { 279f58a9d17SGeoff Levand lv1_release_memory(r->base); 280f58a9d17SGeoff Levand r->size = r->base = r->offset = 0; 281f58a9d17SGeoff Levand map.total = map.rm.size; 282f58a9d17SGeoff Levand } 283f58a9d17SGeoff Levand } 284f58a9d17SGeoff Levand 285f58a9d17SGeoff Levand /** 286f58a9d17SGeoff Levand * ps3_mm_add_memory - hot add memory 287f58a9d17SGeoff Levand */ 288f58a9d17SGeoff Levand 289f58a9d17SGeoff Levand static int __init ps3_mm_add_memory(void) 290f58a9d17SGeoff Levand { 291f58a9d17SGeoff Levand int result; 292f58a9d17SGeoff Levand unsigned long start_addr; 293f58a9d17SGeoff Levand unsigned long start_pfn; 294f58a9d17SGeoff Levand unsigned long nr_pages; 295f58a9d17SGeoff Levand 296e22ba7e3SArnd Bergmann if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) 297*ef596c69SGeert Uytterhoeven return -ENODEV; 298e22ba7e3SArnd Bergmann 299f58a9d17SGeoff Levand BUG_ON(!mem_init_done); 300f58a9d17SGeoff Levand 301f58a9d17SGeoff Levand start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size; 302f58a9d17SGeoff Levand start_pfn = start_addr >> PAGE_SHIFT; 303f58a9d17SGeoff Levand nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; 304f58a9d17SGeoff Levand 305f58a9d17SGeoff Levand DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n", 306f58a9d17SGeoff Levand __func__, __LINE__, start_addr, start_pfn, nr_pages); 307f58a9d17SGeoff Levand 308f58a9d17SGeoff Levand result = add_memory(0, start_addr, map.r1.size); 309f58a9d17SGeoff Levand 310f58a9d17SGeoff Levand if (result) { 311f58a9d17SGeoff Levand DBG("%s:%d: add_memory failed: (%d)\n", 312f58a9d17SGeoff Levand __func__, __LINE__, result); 313f58a9d17SGeoff Levand return result; 314f58a9d17SGeoff Levand } 315f58a9d17SGeoff Levand 316f58a9d17SGeoff Levand result = online_pages(start_pfn, nr_pages); 317f58a9d17SGeoff Levand 318f58a9d17SGeoff Levand if (result) 319f58a9d17SGeoff Levand DBG("%s:%d: online_pages failed: (%d)\n", 320f58a9d17SGeoff Levand __func__, __LINE__, result); 321f58a9d17SGeoff Levand 322f58a9d17SGeoff Levand return result; 323f58a9d17SGeoff Levand } 324f58a9d17SGeoff Levand 325f58a9d17SGeoff Levand core_initcall(ps3_mm_add_memory); 326f58a9d17SGeoff Levand 327f58a9d17SGeoff Levand /*============================================================================*/ 328f58a9d17SGeoff Levand /* dma routines */ 329f58a9d17SGeoff Levand /*============================================================================*/ 330f58a9d17SGeoff Levand 331f58a9d17SGeoff Levand /** 332f58a9d17SGeoff Levand * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address. 333f58a9d17SGeoff Levand * @r: pointer to dma region structure 334f58a9d17SGeoff Levand * @lpar_addr: HV lpar address 335f58a9d17SGeoff Levand */ 336f58a9d17SGeoff Levand 337f58a9d17SGeoff Levand static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r, 338f58a9d17SGeoff Levand unsigned long lpar_addr) 339f58a9d17SGeoff Levand { 340f58a9d17SGeoff Levand BUG_ON(lpar_addr >= map.r1.base + map.r1.size); 341f58a9d17SGeoff Levand return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr 342f58a9d17SGeoff Levand : lpar_addr - map.r1.offset); 343f58a9d17SGeoff Levand } 344f58a9d17SGeoff Levand 345f58a9d17SGeoff Levand #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__) 346f58a9d17SGeoff Levand static void _dma_dump_region(const struct ps3_dma_region *r, const char* func, 347f58a9d17SGeoff Levand int line) 348f58a9d17SGeoff Levand { 349f58a9d17SGeoff Levand DBG("%s:%d: dev %u:%u\n", func, line, r->did.bus_id, 350f58a9d17SGeoff Levand r->did.dev_id); 351f58a9d17SGeoff Levand DBG("%s:%d: page_size %u\n", func, line, r->page_size); 352f58a9d17SGeoff Levand DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); 353f58a9d17SGeoff Levand DBG("%s:%d: len %lxh\n", func, line, r->len); 354f58a9d17SGeoff Levand } 355f58a9d17SGeoff Levand 356f58a9d17SGeoff Levand /** 357f58a9d17SGeoff Levand * dma_chunk - A chunk of dma pages mapped by the io controller. 358f58a9d17SGeoff Levand * @region - The dma region that owns this chunk. 359f58a9d17SGeoff Levand * @lpar_addr: Starting lpar address of the area to map. 360f58a9d17SGeoff Levand * @bus_addr: Starting ioc bus address of the area to map. 361f58a9d17SGeoff Levand * @len: Length in bytes of the area to map. 362f58a9d17SGeoff Levand * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the 363f58a9d17SGeoff Levand * list of all chuncks owned by the region. 364f58a9d17SGeoff Levand * 365f58a9d17SGeoff Levand * This implementation uses a very simple dma page manager 366f58a9d17SGeoff Levand * based on the dma_chunk structure. This scheme assumes 367f58a9d17SGeoff Levand * that all drivers use very well behaved dma ops. 368f58a9d17SGeoff Levand */ 369f58a9d17SGeoff Levand 370f58a9d17SGeoff Levand struct dma_chunk { 371f58a9d17SGeoff Levand struct ps3_dma_region *region; 372f58a9d17SGeoff Levand unsigned long lpar_addr; 373f58a9d17SGeoff Levand unsigned long bus_addr; 374f58a9d17SGeoff Levand unsigned long len; 375f58a9d17SGeoff Levand struct list_head link; 376f58a9d17SGeoff Levand unsigned int usage_count; 377f58a9d17SGeoff Levand }; 378f58a9d17SGeoff Levand 379f58a9d17SGeoff Levand #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__) 380f58a9d17SGeoff Levand static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, 381f58a9d17SGeoff Levand int line) 382f58a9d17SGeoff Levand { 383f58a9d17SGeoff Levand DBG("%s:%d: r.dev %u:%u\n", func, line, 384f58a9d17SGeoff Levand c->region->did.bus_id, c->region->did.dev_id); 385f58a9d17SGeoff Levand DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); 386f58a9d17SGeoff Levand DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); 387f58a9d17SGeoff Levand DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); 388f58a9d17SGeoff Levand DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); 389f58a9d17SGeoff Levand DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); 390f58a9d17SGeoff Levand DBG("%s:%d: c.len %lxh\n", func, line, c->len); 391f58a9d17SGeoff Levand } 392f58a9d17SGeoff Levand 393f58a9d17SGeoff Levand static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, 394f58a9d17SGeoff Levand unsigned long bus_addr, unsigned long len) 395f58a9d17SGeoff Levand { 396f58a9d17SGeoff Levand struct dma_chunk *c; 397f58a9d17SGeoff Levand unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); 398f58a9d17SGeoff Levand unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size); 399f58a9d17SGeoff Levand 400f58a9d17SGeoff Levand list_for_each_entry(c, &r->chunk_list.head, link) { 401f58a9d17SGeoff Levand /* intersection */ 402f58a9d17SGeoff Levand if (aligned_bus >= c->bus_addr 403f58a9d17SGeoff Levand && aligned_bus < c->bus_addr + c->len 404f58a9d17SGeoff Levand && aligned_bus + aligned_len <= c->bus_addr + c->len) { 405f58a9d17SGeoff Levand return c; 406f58a9d17SGeoff Levand } 407f58a9d17SGeoff Levand /* below */ 408f58a9d17SGeoff Levand if (aligned_bus + aligned_len <= c->bus_addr) { 409f58a9d17SGeoff Levand continue; 410f58a9d17SGeoff Levand } 411f58a9d17SGeoff Levand /* above */ 412f58a9d17SGeoff Levand if (aligned_bus >= c->bus_addr + c->len) { 413f58a9d17SGeoff Levand continue; 414f58a9d17SGeoff Levand } 415f58a9d17SGeoff Levand 416f58a9d17SGeoff Levand /* we don't handle the multi-chunk case for now */ 417f58a9d17SGeoff Levand 418f58a9d17SGeoff Levand dma_dump_chunk(c); 419f58a9d17SGeoff Levand BUG(); 420f58a9d17SGeoff Levand } 421f58a9d17SGeoff Levand return NULL; 422f58a9d17SGeoff Levand } 423f58a9d17SGeoff Levand 424f58a9d17SGeoff Levand static int dma_free_chunk(struct dma_chunk *c) 425f58a9d17SGeoff Levand { 426f58a9d17SGeoff Levand int result = 0; 427f58a9d17SGeoff Levand 428f58a9d17SGeoff Levand if (c->bus_addr) { 429f58a9d17SGeoff Levand result = lv1_unmap_device_dma_region(c->region->did.bus_id, 430f58a9d17SGeoff Levand c->region->did.dev_id, c->bus_addr, c->len); 431f58a9d17SGeoff Levand BUG_ON(result); 432f58a9d17SGeoff Levand } 433f58a9d17SGeoff Levand 434f58a9d17SGeoff Levand kfree(c); 435f58a9d17SGeoff Levand return result; 436f58a9d17SGeoff Levand } 437f58a9d17SGeoff Levand 438f58a9d17SGeoff Levand /** 439f58a9d17SGeoff Levand * dma_map_pages - Maps dma pages into the io controller bus address space. 440f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 441f58a9d17SGeoff Levand * @phys_addr: Starting physical address of the area to map. 442f58a9d17SGeoff Levand * @len: Length in bytes of the area to map. 443f58a9d17SGeoff Levand * c_out: A pointer to receive an allocated struct dma_chunk for this area. 444f58a9d17SGeoff Levand * 445f58a9d17SGeoff Levand * This is the lowest level dma mapping routine, and is the one that will 446f58a9d17SGeoff Levand * make the HV call to add the pages into the io controller address space. 447f58a9d17SGeoff Levand */ 448f58a9d17SGeoff Levand 449f58a9d17SGeoff Levand static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 450f58a9d17SGeoff Levand unsigned long len, struct dma_chunk **c_out) 451f58a9d17SGeoff Levand { 452f58a9d17SGeoff Levand int result; 453f58a9d17SGeoff Levand struct dma_chunk *c; 454f58a9d17SGeoff Levand 455f58a9d17SGeoff Levand c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); 456f58a9d17SGeoff Levand 457f58a9d17SGeoff Levand if (!c) { 458f58a9d17SGeoff Levand result = -ENOMEM; 459f58a9d17SGeoff Levand goto fail_alloc; 460f58a9d17SGeoff Levand } 461f58a9d17SGeoff Levand 462f58a9d17SGeoff Levand c->region = r; 463f58a9d17SGeoff Levand c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 464f58a9d17SGeoff Levand c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr); 465f58a9d17SGeoff Levand c->len = len; 466f58a9d17SGeoff Levand 467f58a9d17SGeoff Levand result = lv1_map_device_dma_region(c->region->did.bus_id, 468f58a9d17SGeoff Levand c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len, 469f58a9d17SGeoff Levand 0xf800000000000000UL); 470f58a9d17SGeoff Levand 471f58a9d17SGeoff Levand if (result) { 472f58a9d17SGeoff Levand DBG("%s:%d: lv1_map_device_dma_region failed: %s\n", 473f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 474f58a9d17SGeoff Levand goto fail_map; 475f58a9d17SGeoff Levand } 476f58a9d17SGeoff Levand 477f58a9d17SGeoff Levand list_add(&c->link, &r->chunk_list.head); 478f58a9d17SGeoff Levand 479f58a9d17SGeoff Levand *c_out = c; 480f58a9d17SGeoff Levand return 0; 481f58a9d17SGeoff Levand 482f58a9d17SGeoff Levand fail_map: 483f58a9d17SGeoff Levand kfree(c); 484f58a9d17SGeoff Levand fail_alloc: 485f58a9d17SGeoff Levand *c_out = NULL; 486f58a9d17SGeoff Levand DBG(" <- %s:%d\n", __func__, __LINE__); 487f58a9d17SGeoff Levand return result; 488f58a9d17SGeoff Levand } 489f58a9d17SGeoff Levand 490f58a9d17SGeoff Levand /** 491f58a9d17SGeoff Levand * dma_region_create - Create a device dma region. 492f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 493f58a9d17SGeoff Levand * 494f58a9d17SGeoff Levand * This is the lowest level dma region create routine, and is the one that 495f58a9d17SGeoff Levand * will make the HV call to create the region. 496f58a9d17SGeoff Levand */ 497f58a9d17SGeoff Levand 498f58a9d17SGeoff Levand static int dma_region_create(struct ps3_dma_region* r) 499f58a9d17SGeoff Levand { 500f58a9d17SGeoff Levand int result; 501f58a9d17SGeoff Levand 502f58a9d17SGeoff Levand r->len = _ALIGN_UP(map.total, 1 << r->page_size); 503f58a9d17SGeoff Levand INIT_LIST_HEAD(&r->chunk_list.head); 504f58a9d17SGeoff Levand spin_lock_init(&r->chunk_list.lock); 505f58a9d17SGeoff Levand 506f58a9d17SGeoff Levand result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id, 507f58a9d17SGeoff Levand r->len, r->page_size, r->region_type, &r->bus_addr); 508f58a9d17SGeoff Levand 509f58a9d17SGeoff Levand dma_dump_region(r); 510f58a9d17SGeoff Levand 511f58a9d17SGeoff Levand if (result) { 512f58a9d17SGeoff Levand DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n", 513f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 514f58a9d17SGeoff Levand r->len = r->bus_addr = 0; 515f58a9d17SGeoff Levand } 516f58a9d17SGeoff Levand 517f58a9d17SGeoff Levand return result; 518f58a9d17SGeoff Levand } 519f58a9d17SGeoff Levand 520f58a9d17SGeoff Levand /** 521f58a9d17SGeoff Levand * dma_region_free - Free a device dma region. 522f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 523f58a9d17SGeoff Levand * 524f58a9d17SGeoff Levand * This is the lowest level dma region free routine, and is the one that 525f58a9d17SGeoff Levand * will make the HV call to free the region. 526f58a9d17SGeoff Levand */ 527f58a9d17SGeoff Levand 528f58a9d17SGeoff Levand static int dma_region_free(struct ps3_dma_region* r) 529f58a9d17SGeoff Levand { 530f58a9d17SGeoff Levand int result; 531f58a9d17SGeoff Levand struct dma_chunk *c; 532f58a9d17SGeoff Levand struct dma_chunk *tmp; 533f58a9d17SGeoff Levand 534f58a9d17SGeoff Levand list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { 535f58a9d17SGeoff Levand list_del(&c->link); 536f58a9d17SGeoff Levand dma_free_chunk(c); 537f58a9d17SGeoff Levand } 538f58a9d17SGeoff Levand 539f58a9d17SGeoff Levand result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id, 540f58a9d17SGeoff Levand r->bus_addr); 541f58a9d17SGeoff Levand 542f58a9d17SGeoff Levand if (result) 543f58a9d17SGeoff Levand DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 544f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 545f58a9d17SGeoff Levand 546f58a9d17SGeoff Levand r->len = r->bus_addr = 0; 547f58a9d17SGeoff Levand 548f58a9d17SGeoff Levand return result; 549f58a9d17SGeoff Levand } 550f58a9d17SGeoff Levand 551f58a9d17SGeoff Levand /** 552f58a9d17SGeoff Levand * dma_map_area - Map an area of memory into a device dma region. 553f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 554f58a9d17SGeoff Levand * @virt_addr: Starting virtual address of the area to map. 555f58a9d17SGeoff Levand * @len: Length in bytes of the area to map. 556f58a9d17SGeoff Levand * @bus_addr: A pointer to return the starting ioc bus address of the area to 557f58a9d17SGeoff Levand * map. 558f58a9d17SGeoff Levand * 559f58a9d17SGeoff Levand * This is the common dma mapping routine. 560f58a9d17SGeoff Levand */ 561f58a9d17SGeoff Levand 562f58a9d17SGeoff Levand static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 563f58a9d17SGeoff Levand unsigned long len, unsigned long *bus_addr) 564f58a9d17SGeoff Levand { 565f58a9d17SGeoff Levand int result; 566f58a9d17SGeoff Levand unsigned long flags; 567f58a9d17SGeoff Levand struct dma_chunk *c; 568f58a9d17SGeoff Levand unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 569f58a9d17SGeoff Levand : virt_addr; 570f58a9d17SGeoff Levand 571f58a9d17SGeoff Levand *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 572f58a9d17SGeoff Levand 573f58a9d17SGeoff Levand if (!USE_DYNAMIC_DMA) { 574f58a9d17SGeoff Levand unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 575f58a9d17SGeoff Levand DBG(" -> %s:%d\n", __func__, __LINE__); 576f58a9d17SGeoff Levand DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__, 577f58a9d17SGeoff Levand virt_addr); 578f58a9d17SGeoff Levand DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__, 579f58a9d17SGeoff Levand phys_addr); 580f58a9d17SGeoff Levand DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__, 581f58a9d17SGeoff Levand lpar_addr); 582f58a9d17SGeoff Levand DBG("%s:%d len %lxh\n", __func__, __LINE__, len); 583f58a9d17SGeoff Levand DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__, 584f58a9d17SGeoff Levand *bus_addr, len); 585f58a9d17SGeoff Levand } 586f58a9d17SGeoff Levand 587f58a9d17SGeoff Levand spin_lock_irqsave(&r->chunk_list.lock, flags); 588f58a9d17SGeoff Levand c = dma_find_chunk(r, *bus_addr, len); 589f58a9d17SGeoff Levand 590f58a9d17SGeoff Levand if (c) { 591f58a9d17SGeoff Levand c->usage_count++; 592f58a9d17SGeoff Levand spin_unlock_irqrestore(&r->chunk_list.lock, flags); 593f58a9d17SGeoff Levand return 0; 594f58a9d17SGeoff Levand } 595f58a9d17SGeoff Levand 596f58a9d17SGeoff Levand result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size), 597f58a9d17SGeoff Levand _ALIGN_UP(len, 1 << r->page_size), &c); 598f58a9d17SGeoff Levand 599f58a9d17SGeoff Levand if (result) { 600f58a9d17SGeoff Levand *bus_addr = 0; 601f58a9d17SGeoff Levand DBG("%s:%d: dma_map_pages failed (%d)\n", 602f58a9d17SGeoff Levand __func__, __LINE__, result); 603f58a9d17SGeoff Levand spin_unlock_irqrestore(&r->chunk_list.lock, flags); 604f58a9d17SGeoff Levand return result; 605f58a9d17SGeoff Levand } 606f58a9d17SGeoff Levand 607f58a9d17SGeoff Levand c->usage_count = 1; 608f58a9d17SGeoff Levand 609f58a9d17SGeoff Levand spin_unlock_irqrestore(&r->chunk_list.lock, flags); 610f58a9d17SGeoff Levand return result; 611f58a9d17SGeoff Levand } 612f58a9d17SGeoff Levand 613f58a9d17SGeoff Levand /** 614f58a9d17SGeoff Levand * dma_unmap_area - Unmap an area of memory from a device dma region. 615f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 616f58a9d17SGeoff Levand * @bus_addr: The starting ioc bus address of the area to unmap. 617f58a9d17SGeoff Levand * @len: Length in bytes of the area to unmap. 618f58a9d17SGeoff Levand * 619f58a9d17SGeoff Levand * This is the common dma unmap routine. 620f58a9d17SGeoff Levand */ 621f58a9d17SGeoff Levand 622f58a9d17SGeoff Levand int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr, 623f58a9d17SGeoff Levand unsigned long len) 624f58a9d17SGeoff Levand { 625f58a9d17SGeoff Levand unsigned long flags; 626f58a9d17SGeoff Levand struct dma_chunk *c; 627f58a9d17SGeoff Levand 628f58a9d17SGeoff Levand spin_lock_irqsave(&r->chunk_list.lock, flags); 629f58a9d17SGeoff Levand c = dma_find_chunk(r, bus_addr, len); 630f58a9d17SGeoff Levand 631f58a9d17SGeoff Levand if (!c) { 632f58a9d17SGeoff Levand unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 633f58a9d17SGeoff Levand 1 << r->page_size); 634f58a9d17SGeoff Levand unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size); 635f58a9d17SGeoff Levand DBG("%s:%d: not found: bus_addr %lxh\n", 636f58a9d17SGeoff Levand __func__, __LINE__, bus_addr); 637f58a9d17SGeoff Levand DBG("%s:%d: not found: len %lxh\n", 638f58a9d17SGeoff Levand __func__, __LINE__, len); 639f58a9d17SGeoff Levand DBG("%s:%d: not found: aligned_bus %lxh\n", 640f58a9d17SGeoff Levand __func__, __LINE__, aligned_bus); 641f58a9d17SGeoff Levand DBG("%s:%d: not found: aligned_len %lxh\n", 642f58a9d17SGeoff Levand __func__, __LINE__, aligned_len); 643f58a9d17SGeoff Levand BUG(); 644f58a9d17SGeoff Levand } 645f58a9d17SGeoff Levand 646f58a9d17SGeoff Levand c->usage_count--; 647f58a9d17SGeoff Levand 648f58a9d17SGeoff Levand if (!c->usage_count) { 649f58a9d17SGeoff Levand list_del(&c->link); 650f58a9d17SGeoff Levand dma_free_chunk(c); 651f58a9d17SGeoff Levand } 652f58a9d17SGeoff Levand 653f58a9d17SGeoff Levand spin_unlock_irqrestore(&r->chunk_list.lock, flags); 654f58a9d17SGeoff Levand return 0; 655f58a9d17SGeoff Levand } 656f58a9d17SGeoff Levand 657f58a9d17SGeoff Levand /** 658f58a9d17SGeoff Levand * dma_region_create_linear - Setup a linear dma maping for a device. 659f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 660f58a9d17SGeoff Levand * 661f58a9d17SGeoff Levand * This routine creates an HV dma region for the device and maps all available 662f58a9d17SGeoff Levand * ram into the io controller bus address space. 663f58a9d17SGeoff Levand */ 664f58a9d17SGeoff Levand 665f58a9d17SGeoff Levand static int dma_region_create_linear(struct ps3_dma_region *r) 666f58a9d17SGeoff Levand { 667f58a9d17SGeoff Levand int result; 668f58a9d17SGeoff Levand unsigned long tmp; 669f58a9d17SGeoff Levand 670f58a9d17SGeoff Levand /* force 16M dma pages for linear mapping */ 671f58a9d17SGeoff Levand 672f58a9d17SGeoff Levand if (r->page_size != PS3_DMA_16M) { 673f58a9d17SGeoff Levand pr_info("%s:%d: forcing 16M pages for linear map\n", 674f58a9d17SGeoff Levand __func__, __LINE__); 675f58a9d17SGeoff Levand r->page_size = PS3_DMA_16M; 676f58a9d17SGeoff Levand } 677f58a9d17SGeoff Levand 678f58a9d17SGeoff Levand result = dma_region_create(r); 679f58a9d17SGeoff Levand BUG_ON(result); 680f58a9d17SGeoff Levand 681f58a9d17SGeoff Levand result = dma_map_area(r, map.rm.base, map.rm.size, &tmp); 682f58a9d17SGeoff Levand BUG_ON(result); 683f58a9d17SGeoff Levand 684f58a9d17SGeoff Levand if (USE_LPAR_ADDR) 685f58a9d17SGeoff Levand result = dma_map_area(r, map.r1.base, map.r1.size, 686f58a9d17SGeoff Levand &tmp); 687f58a9d17SGeoff Levand else 688f58a9d17SGeoff Levand result = dma_map_area(r, map.rm.size, map.r1.size, 689f58a9d17SGeoff Levand &tmp); 690f58a9d17SGeoff Levand 691f58a9d17SGeoff Levand BUG_ON(result); 692f58a9d17SGeoff Levand 693f58a9d17SGeoff Levand return result; 694f58a9d17SGeoff Levand } 695f58a9d17SGeoff Levand 696f58a9d17SGeoff Levand /** 697f58a9d17SGeoff Levand * dma_region_free_linear - Free a linear dma mapping for a device. 698f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 699f58a9d17SGeoff Levand * 700f58a9d17SGeoff Levand * This routine will unmap all mapped areas and free the HV dma region. 701f58a9d17SGeoff Levand */ 702f58a9d17SGeoff Levand 703f58a9d17SGeoff Levand static int dma_region_free_linear(struct ps3_dma_region *r) 704f58a9d17SGeoff Levand { 705f58a9d17SGeoff Levand int result; 706f58a9d17SGeoff Levand 707f58a9d17SGeoff Levand result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size); 708f58a9d17SGeoff Levand BUG_ON(result); 709f58a9d17SGeoff Levand 710f58a9d17SGeoff Levand result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base), 711f58a9d17SGeoff Levand map.r1.size); 712f58a9d17SGeoff Levand BUG_ON(result); 713f58a9d17SGeoff Levand 714f58a9d17SGeoff Levand result = dma_region_free(r); 715f58a9d17SGeoff Levand BUG_ON(result); 716f58a9d17SGeoff Levand 717f58a9d17SGeoff Levand return result; 718f58a9d17SGeoff Levand } 719f58a9d17SGeoff Levand 720f58a9d17SGeoff Levand /** 721f58a9d17SGeoff Levand * dma_map_area_linear - Map an area of memory into a device dma region. 722f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 723f58a9d17SGeoff Levand * @virt_addr: Starting virtual address of the area to map. 724f58a9d17SGeoff Levand * @len: Length in bytes of the area to map. 725f58a9d17SGeoff Levand * @bus_addr: A pointer to return the starting ioc bus address of the area to 726f58a9d17SGeoff Levand * map. 727f58a9d17SGeoff Levand * 728f58a9d17SGeoff Levand * This routine just returns the coresponding bus address. Actual mapping 729f58a9d17SGeoff Levand * occurs in dma_region_create_linear(). 730f58a9d17SGeoff Levand */ 731f58a9d17SGeoff Levand 732f58a9d17SGeoff Levand static int dma_map_area_linear(struct ps3_dma_region *r, 733f58a9d17SGeoff Levand unsigned long virt_addr, unsigned long len, unsigned long *bus_addr) 734f58a9d17SGeoff Levand { 735f58a9d17SGeoff Levand unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 736f58a9d17SGeoff Levand : virt_addr; 737f58a9d17SGeoff Levand *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 738f58a9d17SGeoff Levand return 0; 739f58a9d17SGeoff Levand } 740f58a9d17SGeoff Levand 741f58a9d17SGeoff Levand /** 742f58a9d17SGeoff Levand * dma_unmap_area_linear - Unmap an area of memory from a device dma region. 743f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 744f58a9d17SGeoff Levand * @bus_addr: The starting ioc bus address of the area to unmap. 745f58a9d17SGeoff Levand * @len: Length in bytes of the area to unmap. 746f58a9d17SGeoff Levand * 747f58a9d17SGeoff Levand * This routine does nothing. Unmapping occurs in dma_region_free_linear(). 748f58a9d17SGeoff Levand */ 749f58a9d17SGeoff Levand 750f58a9d17SGeoff Levand static int dma_unmap_area_linear(struct ps3_dma_region *r, 751f58a9d17SGeoff Levand unsigned long bus_addr, unsigned long len) 752f58a9d17SGeoff Levand { 753f58a9d17SGeoff Levand return 0; 754f58a9d17SGeoff Levand } 755f58a9d17SGeoff Levand 756f58a9d17SGeoff Levand int ps3_dma_region_create(struct ps3_dma_region *r) 757f58a9d17SGeoff Levand { 758f58a9d17SGeoff Levand return (USE_DYNAMIC_DMA) 759f58a9d17SGeoff Levand ? dma_region_create(r) 760f58a9d17SGeoff Levand : dma_region_create_linear(r); 761f58a9d17SGeoff Levand } 762f58a9d17SGeoff Levand 763f58a9d17SGeoff Levand int ps3_dma_region_free(struct ps3_dma_region *r) 764f58a9d17SGeoff Levand { 765f58a9d17SGeoff Levand return (USE_DYNAMIC_DMA) 766f58a9d17SGeoff Levand ? dma_region_free(r) 767f58a9d17SGeoff Levand : dma_region_free_linear(r); 768f58a9d17SGeoff Levand } 769f58a9d17SGeoff Levand 770f58a9d17SGeoff Levand int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr, 771f58a9d17SGeoff Levand unsigned long len, unsigned long *bus_addr) 772f58a9d17SGeoff Levand { 773f58a9d17SGeoff Levand return (USE_DYNAMIC_DMA) 774f58a9d17SGeoff Levand ? dma_map_area(r, virt_addr, len, bus_addr) 775f58a9d17SGeoff Levand : dma_map_area_linear(r, virt_addr, len, bus_addr); 776f58a9d17SGeoff Levand } 777f58a9d17SGeoff Levand 778f58a9d17SGeoff Levand int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr, 779f58a9d17SGeoff Levand unsigned long len) 780f58a9d17SGeoff Levand { 781f58a9d17SGeoff Levand return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len) 782f58a9d17SGeoff Levand : dma_unmap_area_linear(r, bus_addr, len); 783f58a9d17SGeoff Levand } 784f58a9d17SGeoff Levand 785f58a9d17SGeoff Levand /*============================================================================*/ 786f58a9d17SGeoff Levand /* system startup routines */ 787f58a9d17SGeoff Levand /*============================================================================*/ 788f58a9d17SGeoff Levand 789f58a9d17SGeoff Levand /** 790f58a9d17SGeoff Levand * ps3_mm_init - initialize the address space state variables 791f58a9d17SGeoff Levand */ 792f58a9d17SGeoff Levand 793f58a9d17SGeoff Levand void __init ps3_mm_init(void) 794f58a9d17SGeoff Levand { 795f58a9d17SGeoff Levand int result; 796f58a9d17SGeoff Levand 797f58a9d17SGeoff Levand DBG(" -> %s:%d\n", __func__, __LINE__); 798f58a9d17SGeoff Levand 799f58a9d17SGeoff Levand result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, 800f58a9d17SGeoff Levand &map.total); 801f58a9d17SGeoff Levand 802f58a9d17SGeoff Levand if (result) 803f58a9d17SGeoff Levand panic("ps3_repository_read_mm_info() failed"); 804f58a9d17SGeoff Levand 805f58a9d17SGeoff Levand map.rm.offset = map.rm.base; 806f58a9d17SGeoff Levand map.vas_id = map.htab_size = 0; 807f58a9d17SGeoff Levand 808f58a9d17SGeoff Levand /* this implementation assumes map.rm.base is zero */ 809f58a9d17SGeoff Levand 810f58a9d17SGeoff Levand BUG_ON(map.rm.base); 811f58a9d17SGeoff Levand BUG_ON(!map.rm.size); 812f58a9d17SGeoff Levand 813f58a9d17SGeoff Levand lmb_add(map.rm.base, map.rm.size); 814f58a9d17SGeoff Levand lmb_analyze(); 815f58a9d17SGeoff Levand 816f58a9d17SGeoff Levand /* arrange to do this in ps3_mm_add_memory */ 817f58a9d17SGeoff Levand ps3_mm_region_create(&map.r1, map.total - map.rm.size); 818f58a9d17SGeoff Levand 819f58a9d17SGeoff Levand DBG(" <- %s:%d\n", __func__, __LINE__); 820f58a9d17SGeoff Levand } 821f58a9d17SGeoff Levand 822f58a9d17SGeoff Levand /** 823f58a9d17SGeoff Levand * ps3_mm_shutdown - final cleanup of address space 824f58a9d17SGeoff Levand */ 825f58a9d17SGeoff Levand 826f58a9d17SGeoff Levand void ps3_mm_shutdown(void) 827f58a9d17SGeoff Levand { 828f58a9d17SGeoff Levand ps3_mm_region_destroy(&map.r1); 829f58a9d17SGeoff Levand map.total = map.rm.size; 830f58a9d17SGeoff Levand } 831