1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file COPYING in the main directory of this archive 4 * for more details. 5 */ 6 7 #undef DEBUG 8 9 #include <linux/dma-noncoherent.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/platform_device.h> 13 #include <linux/scatterlist.h> 14 #include <linux/slab.h> 15 #include <linux/vmalloc.h> 16 #include <linux/export.h> 17 18 #include <asm/pgalloc.h> 19 20 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) 21 22 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 23 gfp_t flag, unsigned long attrs) 24 { 25 struct page *page, **map; 26 pgprot_t pgprot; 27 void *addr; 28 int i, order; 29 30 pr_debug("dma_alloc_coherent: %d,%x\n", size, flag); 31 32 size = PAGE_ALIGN(size); 33 order = get_order(size); 34 35 page = alloc_pages(flag | __GFP_ZERO, order); 36 if (!page) 37 return NULL; 38 39 *handle = page_to_phys(page); 40 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA); 41 if (!map) { 42 __free_pages(page, order); 43 return NULL; 44 } 45 split_page(page, order); 46 47 order = 1 << order; 48 size >>= PAGE_SHIFT; 49 map[0] = page; 50 for (i = 1; i < size; i++) 51 map[i] = page + i; 52 for (; i < order; i++) 53 __free_page(page + i); 54 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); 55 if (CPU_IS_040_OR_060) 56 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S; 57 else 58 pgprot_val(pgprot) |= _PAGE_NOCACHE030; 59 addr = vmap(map, size, VM_MAP, pgprot); 60 kfree(map); 61 62 return addr; 63 } 64 65 void arch_dma_free(struct device *dev, size_t size, void *addr, 66 dma_addr_t handle, unsigned long attrs) 67 { 68 pr_debug("dma_free_coherent: %p, %x\n", addr, handle); 69 vfree(addr); 70 } 71 72 #else 73 74 #include <asm/cacheflush.h> 75 76 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 77 gfp_t gfp, unsigned long attrs) 78 { 79 void *ret; 80 81 if (dev == NULL || (*dev->dma_mask < 0xffffffff)) 82 gfp |= GFP_DMA; 83 ret = (void *)__get_free_pages(gfp, get_order(size)); 84 85 if (ret != NULL) { 86 memset(ret, 0, size); 87 *dma_handle = virt_to_phys(ret); 88 } 89 return ret; 90 } 91 92 void arch_dma_free(struct device *dev, size_t size, void *vaddr, 93 dma_addr_t dma_handle, unsigned long attrs) 94 { 95 free_pages((unsigned long)vaddr, get_order(size)); 96 } 97 98 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ 99 100 void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle, 101 size_t size, enum dma_data_direction dir) 102 { 103 switch (dir) { 104 case DMA_BIDIRECTIONAL: 105 case DMA_TO_DEVICE: 106 cache_push(handle, size); 107 break; 108 case DMA_FROM_DEVICE: 109 cache_clear(handle, size); 110 break; 111 default: 112 pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n", 113 dir); 114 break; 115 } 116 } 117 118 void arch_setup_pdev_archdata(struct platform_device *pdev) 119 { 120 if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE && 121 pdev->dev.dma_mask == NULL) { 122 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 123 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 124 } 125 } 126