1 /* 2 * arch/sh/mm/consistent.c 3 * 4 * Copyright (C) 2004 - 2007 Paul Mundt 5 * 6 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12 #include <linux/mm.h> 13 #include <linux/platform_device.h> 14 #include <linux/dma-mapping.h> 15 #include <asm/cacheflush.h> 16 #include <asm/addrspace.h> 17 #include <asm/io.h> 18 19 struct dma_coherent_mem { 20 void *virt_base; 21 u32 device_base; 22 int size; 23 int flags; 24 unsigned long *bitmap; 25 }; 26 27 void *dma_alloc_coherent(struct device *dev, size_t size, 28 dma_addr_t *dma_handle, gfp_t gfp) 29 { 30 void *ret, *ret_nocache; 31 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 32 int order = get_order(size); 33 34 if (mem) { 35 int page = bitmap_find_free_region(mem->bitmap, mem->size, 36 order); 37 if (page >= 0) { 38 *dma_handle = mem->device_base + (page << PAGE_SHIFT); 39 ret = mem->virt_base + (page << PAGE_SHIFT); 40 memset(ret, 0, size); 41 return ret; 42 } 43 if (mem->flags & DMA_MEMORY_EXCLUSIVE) 44 return NULL; 45 } 46 47 ret = (void *)__get_free_pages(gfp, order); 48 if (!ret) 49 return NULL; 50 51 memset(ret, 0, size); 52 /* 53 * Pages from the page allocator may have data present in 54 * cache. So flush the cache before using uncached memory. 55 */ 56 dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); 57 58 ret_nocache = ioremap_nocache(virt_to_phys(ret), size); 59 if (!ret_nocache) { 60 free_pages((unsigned long)ret, order); 61 return NULL; 62 } 63 64 *dma_handle = virt_to_phys(ret); 65 return ret_nocache; 66 } 67 EXPORT_SYMBOL(dma_alloc_coherent); 68 69 void dma_free_coherent(struct device *dev, size_t size, 70 void *vaddr, dma_addr_t dma_handle) 71 { 72 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 73 int order = get_order(size); 74 75 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { 76 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 77 78 bitmap_release_region(mem->bitmap, page, order); 79 } else { 80 WARN_ON(irqs_disabled()); /* for portability */ 81 BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); 82 free_pages((unsigned long)phys_to_virt(dma_handle), order); 83 iounmap(vaddr); 84 } 85 } 86 EXPORT_SYMBOL(dma_free_coherent); 87 88 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 89 dma_addr_t device_addr, size_t size, int flags) 90 { 91 void __iomem *mem_base = NULL; 92 int pages = size >> PAGE_SHIFT; 93 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); 94 95 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) 96 goto out; 97 if (!size) 98 goto out; 99 if (dev->dma_mem) 100 goto out; 101 102 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ 103 104 mem_base = ioremap_nocache(bus_addr, size); 105 if (!mem_base) 106 goto out; 107 108 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 109 if (!dev->dma_mem) 110 goto out; 111 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 112 if (!dev->dma_mem->bitmap) 113 goto free1_out; 114 115 dev->dma_mem->virt_base = mem_base; 116 dev->dma_mem->device_base = device_addr; 117 dev->dma_mem->size = pages; 118 dev->dma_mem->flags = flags; 119 120 if (flags & DMA_MEMORY_MAP) 121 return DMA_MEMORY_MAP; 122 123 return DMA_MEMORY_IO; 124 125 free1_out: 126 kfree(dev->dma_mem); 127 out: 128 if (mem_base) 129 iounmap(mem_base); 130 return 0; 131 } 132 EXPORT_SYMBOL(dma_declare_coherent_memory); 133 134 void dma_release_declared_memory(struct device *dev) 135 { 136 struct dma_coherent_mem *mem = dev->dma_mem; 137 138 if (!mem) 139 return; 140 dev->dma_mem = NULL; 141 iounmap(mem->virt_base); 142 kfree(mem->bitmap); 143 kfree(mem); 144 } 145 EXPORT_SYMBOL(dma_release_declared_memory); 146 147 void *dma_mark_declared_memory_occupied(struct device *dev, 148 dma_addr_t device_addr, size_t size) 149 { 150 struct dma_coherent_mem *mem = dev->dma_mem; 151 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; 152 int pos, err; 153 154 if (!mem) 155 return ERR_PTR(-EINVAL); 156 157 pos = (device_addr - mem->device_base) >> PAGE_SHIFT; 158 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); 159 if (err != 0) 160 return ERR_PTR(err); 161 return mem->virt_base + (pos << PAGE_SHIFT); 162 } 163 EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 164 165 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 166 enum dma_data_direction direction) 167 { 168 #ifdef CONFIG_CPU_SH5 169 void *p1addr = vaddr; 170 #else 171 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); 172 #endif 173 174 switch (direction) { 175 case DMA_FROM_DEVICE: /* invalidate only */ 176 __flush_invalidate_region(p1addr, size); 177 break; 178 case DMA_TO_DEVICE: /* writeback only */ 179 __flush_wback_region(p1addr, size); 180 break; 181 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 182 __flush_purge_region(p1addr, size); 183 break; 184 default: 185 BUG(); 186 } 187 } 188 EXPORT_SYMBOL(dma_cache_sync); 189 190 int platform_resource_setup_memory(struct platform_device *pdev, 191 char *name, unsigned long memsize) 192 { 193 struct resource *r; 194 dma_addr_t dma_handle; 195 void *buf; 196 197 r = pdev->resource + pdev->num_resources - 1; 198 if (r->flags) { 199 pr_warning("%s: unable to find empty space for resource\n", 200 name); 201 return -EINVAL; 202 } 203 204 buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); 205 if (!buf) { 206 pr_warning("%s: unable to allocate memory\n", name); 207 return -ENOMEM; 208 } 209 210 memset(buf, 0, memsize); 211 212 r->flags = IORESOURCE_MEM; 213 r->start = dma_handle; 214 r->end = r->start + memsize - 1; 215 r->name = name; 216 return 0; 217 } 218