xref: /openbmc/linux/arch/xtensa/kernel/pci-dma.c (revision 9f4df96b)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
25a0015d6SChris Zankel /*
35a0015d6SChris Zankel  * DMA coherent memory allocation.
45a0015d6SChris Zankel  *
55a0015d6SChris Zankel  * Copyright (C) 2002 - 2005 Tensilica Inc.
6c75959a6SMax Filippov  * Copyright (C) 2015 Cadence Design Systems Inc.
75a0015d6SChris Zankel  *
85a0015d6SChris Zankel  * Based on version for i386.
95a0015d6SChris Zankel  *
105a0015d6SChris Zankel  * Chris Zankel <chris@zankel.net>
115a0015d6SChris Zankel  * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
125a0015d6SChris Zankel  */
135a0015d6SChris Zankel 
140b1abd1fSChristoph Hellwig #include <linux/dma-map-ops.h>
156137e416SMax Filippov #include <linux/dma-direct.h>
165a0e3ad6STejun Heo #include <linux/gfp.h>
17c7ca9fe1SMax Filippov #include <linux/highmem.h>
18c7ca9fe1SMax Filippov #include <linux/mm.h>
19c7ca9fe1SMax Filippov #include <linux/types.h>
205a0015d6SChris Zankel #include <asm/cacheflush.h>
21c7ca9fe1SMax Filippov #include <asm/io.h>
222cc15e80SMax Filippov #include <asm/platform.h>
235a0015d6SChris Zankel 
do_cache_op(phys_addr_t paddr,size_t size,void (* fn)(unsigned long,unsigned long))243f2bbf44SChristoph Hellwig static void do_cache_op(phys_addr_t paddr, size_t size,
25c7ca9fe1SMax Filippov 			void (*fn)(unsigned long, unsigned long))
26c7ca9fe1SMax Filippov {
273f2bbf44SChristoph Hellwig 	unsigned long off = paddr & (PAGE_SIZE - 1);
283f2bbf44SChristoph Hellwig 	unsigned long pfn = PFN_DOWN(paddr);
29c7ca9fe1SMax Filippov 	struct page *page = pfn_to_page(pfn);
30c7ca9fe1SMax Filippov 
31c7ca9fe1SMax Filippov 	if (!PageHighMem(page))
323f2bbf44SChristoph Hellwig 		fn((unsigned long)phys_to_virt(paddr), size);
33c7ca9fe1SMax Filippov 	else
34c7ca9fe1SMax Filippov 		while (size > 0) {
35c7ca9fe1SMax Filippov 			size_t sz = min_t(size_t, size, PAGE_SIZE - off);
36c7ca9fe1SMax Filippov 			void *vaddr = kmap_atomic(page);
37c7ca9fe1SMax Filippov 
38c7ca9fe1SMax Filippov 			fn((unsigned long)vaddr + off, sz);
39c7ca9fe1SMax Filippov 			kunmap_atomic(vaddr);
40c7ca9fe1SMax Filippov 			off = 0;
41c7ca9fe1SMax Filippov 			++page;
42c7ca9fe1SMax Filippov 			size -= sz;
43c7ca9fe1SMax Filippov 		}
44c7ca9fe1SMax Filippov }
45c7ca9fe1SMax Filippov 
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)4656e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
4756e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
48c75959a6SMax Filippov {
49c75959a6SMax Filippov 	switch (dir) {
50c75959a6SMax Filippov 	case DMA_BIDIRECTIONAL:
51c75959a6SMax Filippov 	case DMA_FROM_DEVICE:
523f2bbf44SChristoph Hellwig 		do_cache_op(paddr, size, __invalidate_dcache_range);
53c75959a6SMax Filippov 		break;
54c75959a6SMax Filippov 
55c75959a6SMax Filippov 	case DMA_NONE:
56c75959a6SMax Filippov 		BUG();
57c75959a6SMax Filippov 		break;
58c75959a6SMax Filippov 
59c75959a6SMax Filippov 	default:
60c75959a6SMax Filippov 		break;
61c75959a6SMax Filippov 	}
62c75959a6SMax Filippov }
63c75959a6SMax Filippov 
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)6456e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
6556e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
66c75959a6SMax Filippov {
67c75959a6SMax Filippov 	switch (dir) {
68c75959a6SMax Filippov 	case DMA_BIDIRECTIONAL:
69c75959a6SMax Filippov 	case DMA_TO_DEVICE:
70c7ca9fe1SMax Filippov 		if (XCHAL_DCACHE_IS_WRITEBACK)
713f2bbf44SChristoph Hellwig 			do_cache_op(paddr, size, __flush_dcache_range);
72c75959a6SMax Filippov 		break;
73c75959a6SMax Filippov 
74c75959a6SMax Filippov 	case DMA_NONE:
75c75959a6SMax Filippov 		BUG();
76c75959a6SMax Filippov 		break;
77c75959a6SMax Filippov 
78c75959a6SMax Filippov 	default:
79c75959a6SMax Filippov 		break;
80c75959a6SMax Filippov 	}
81c75959a6SMax Filippov }
82c75959a6SMax Filippov 
arch_dma_prep_coherent(struct page * page,size_t size)830f665b9eSChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size)
840f665b9eSChristoph Hellwig {
850f665b9eSChristoph Hellwig 	__invalidate_dcache_range((unsigned long)page_address(page), size);
860f665b9eSChristoph Hellwig }
870f665b9eSChristoph Hellwig 
880f665b9eSChristoph Hellwig /*
890f665b9eSChristoph Hellwig  * Memory caching is platform-dependent in noMMU xtensa configurations.
904f8232bbSChristoph Hellwig  * This function should be implemented in platform code in order to enable
914f8232bbSChristoph Hellwig  * coherent DMA memory operations when CONFIG_MMU is not enabled.
920f665b9eSChristoph Hellwig  */
932cc15e80SMax Filippov #ifdef CONFIG_MMU
arch_dma_set_uncached(void * p,size_t size)94fa7e2247SChristoph Hellwig void *arch_dma_set_uncached(void *p, size_t size)
952cc15e80SMax Filippov {
962cc15e80SMax Filippov 	return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
97c75959a6SMax Filippov }
980f665b9eSChristoph Hellwig #endif /* CONFIG_MMU */
99