1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file COPYING in the main directory of this archive 4 * for more details. 5 */ 6 7 #undef DEBUG 8 9 #include <linux/dma-noncoherent.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/platform_device.h> 13 #include <linux/scatterlist.h> 14 #include <linux/slab.h> 15 #include <linux/vmalloc.h> 16 #include <linux/export.h> 17 18 #include <asm/pgalloc.h> 19 20 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) 21 void arch_dma_prep_coherent(struct page *page, size_t size) 22 { 23 cache_push(page_to_phys(page), size); 24 } 25 26 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 27 unsigned long attrs) 28 { 29 if (CPU_IS_040_OR_060) { 30 pgprot_val(prot) &= ~_PAGE_CACHE040; 31 pgprot_val(prot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S; 32 } else { 33 pgprot_val(prot) |= _PAGE_NOCACHE030; 34 } 35 return prot; 36 } 37 #else 38 39 #include <asm/cacheflush.h> 40 41 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 42 gfp_t gfp, unsigned long attrs) 43 { 44 void *ret; 45 46 if (dev == NULL || (*dev->dma_mask < 0xffffffff)) 47 gfp |= GFP_DMA; 48 ret = (void *)__get_free_pages(gfp, get_order(size)); 49 50 if (ret != NULL) { 51 memset(ret, 0, size); 52 *dma_handle = virt_to_phys(ret); 53 } 54 return ret; 55 } 56 57 void arch_dma_free(struct device *dev, size_t size, void *vaddr, 58 dma_addr_t dma_handle, unsigned long attrs) 59 { 60 free_pages((unsigned long)vaddr, get_order(size)); 61 } 62 63 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ 64 65 void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle, 66 size_t size, enum dma_data_direction dir) 67 { 68 switch (dir) { 69 case DMA_BIDIRECTIONAL: 70 case DMA_TO_DEVICE: 71 cache_push(handle, size); 72 break; 73 case DMA_FROM_DEVICE: 74 cache_clear(handle, size); 75 break; 76 default: 77 pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n", 78 dir); 79 break; 80 } 81 } 82 83 void arch_setup_pdev_archdata(struct platform_device *pdev) 84 { 85 if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE && 86 pdev->dev.dma_mask == NULL) { 87 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 88 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 89 } 90 } 91