xref: /openbmc/linux/arch/csky/mm/dma-mapping.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1013de2d6SGuo Ren // SPDX-License-Identifier: GPL-2.0
2013de2d6SGuo Ren // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3013de2d6SGuo Ren 
4013de2d6SGuo Ren #include <linux/cache.h>
5*0b1abd1fSChristoph Hellwig #include <linux/dma-map-ops.h>
6013de2d6SGuo Ren #include <linux/genalloc.h>
7013de2d6SGuo Ren #include <linux/highmem.h>
8013de2d6SGuo Ren #include <linux/io.h>
9013de2d6SGuo Ren #include <linux/mm.h>
10013de2d6SGuo Ren #include <linux/scatterlist.h>
11013de2d6SGuo Ren #include <linux/types.h>
12013de2d6SGuo Ren #include <asm/cache.h>
13013de2d6SGuo Ren 
cache_op(phys_addr_t paddr,size_t size,void (* fn)(unsigned long start,unsigned long end))14013de2d6SGuo Ren static inline void cache_op(phys_addr_t paddr, size_t size,
15013de2d6SGuo Ren 			    void (*fn)(unsigned long start, unsigned long end))
16013de2d6SGuo Ren {
174af9027dSGuo Ren 	struct page *page    = phys_to_page(paddr);
184af9027dSGuo Ren 	void *start          = __va(page_to_phys(page));
194af9027dSGuo Ren 	unsigned long offset = offset_in_page(paddr);
20013de2d6SGuo Ren 	size_t left          = size;
21013de2d6SGuo Ren 
22013de2d6SGuo Ren 	do {
23013de2d6SGuo Ren 		size_t len = left;
24013de2d6SGuo Ren 
254af9027dSGuo Ren 		if (offset + len > PAGE_SIZE)
26013de2d6SGuo Ren 			len = PAGE_SIZE - offset;
27013de2d6SGuo Ren 
284af9027dSGuo Ren 		if (PageHighMem(page)) {
294af9027dSGuo Ren 			start = kmap_atomic(page);
304af9027dSGuo Ren 
314af9027dSGuo Ren 			fn((unsigned long)start + offset,
324af9027dSGuo Ren 					(unsigned long)start + offset + len);
334af9027dSGuo Ren 
344af9027dSGuo Ren 			kunmap_atomic(start);
35013de2d6SGuo Ren 		} else {
364af9027dSGuo Ren 			fn((unsigned long)start + offset,
374af9027dSGuo Ren 					(unsigned long)start + offset + len);
38013de2d6SGuo Ren 		}
39013de2d6SGuo Ren 		offset = 0;
404af9027dSGuo Ren 
41013de2d6SGuo Ren 		page++;
424af9027dSGuo Ren 		start += PAGE_SIZE;
43013de2d6SGuo Ren 		left -= len;
44013de2d6SGuo Ren 	} while (left);
45013de2d6SGuo Ren }
46013de2d6SGuo Ren 
dma_wbinv_set_zero_range(unsigned long start,unsigned long end)474af9027dSGuo Ren static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
484af9027dSGuo Ren {
494af9027dSGuo Ren 	memset((void *)start, 0, end - start);
504af9027dSGuo Ren 	dma_wbinv_range(start, end);
514af9027dSGuo Ren }
524af9027dSGuo Ren 
arch_dma_prep_coherent(struct page * page,size_t size)534af9027dSGuo Ren void arch_dma_prep_coherent(struct page *page, size_t size)
544af9027dSGuo Ren {
554af9027dSGuo Ren 	cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
564af9027dSGuo Ren }
574af9027dSGuo Ren 
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)5856e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
5956e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
60013de2d6SGuo Ren {
61013de2d6SGuo Ren 	switch (dir) {
62013de2d6SGuo Ren 	case DMA_TO_DEVICE:
63013de2d6SGuo Ren 		cache_op(paddr, size, dma_wb_range);
64013de2d6SGuo Ren 		break;
65013de2d6SGuo Ren 	case DMA_FROM_DEVICE:
66013de2d6SGuo Ren 	case DMA_BIDIRECTIONAL:
67013de2d6SGuo Ren 		cache_op(paddr, size, dma_wbinv_range);
68013de2d6SGuo Ren 		break;
69013de2d6SGuo Ren 	default:
70013de2d6SGuo Ren 		BUG();
71013de2d6SGuo Ren 	}
72013de2d6SGuo Ren }
73013de2d6SGuo Ren 
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)7456e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
7556e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
76013de2d6SGuo Ren {
77013de2d6SGuo Ren 	switch (dir) {
78013de2d6SGuo Ren 	case DMA_TO_DEVICE:
79ae76f635SGuo Ren 		return;
80013de2d6SGuo Ren 	case DMA_FROM_DEVICE:
81013de2d6SGuo Ren 	case DMA_BIDIRECTIONAL:
82ae76f635SGuo Ren 		cache_op(paddr, size, dma_inv_range);
83013de2d6SGuo Ren 		break;
84013de2d6SGuo Ren 	default:
85013de2d6SGuo Ren 		BUG();
86013de2d6SGuo Ren 	}
87013de2d6SGuo Ren }
88