xref: /openbmc/linux/arch/arc/mm/dma.c (revision 7e5b06b8)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21162b070SVineet Gupta /*
31162b070SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
41162b070SVineet Gupta  */
51162b070SVineet Gupta 
60a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
7f2b0b25aSAlexey Brodkin #include <asm/cache.h>
81162b070SVineet Gupta #include <asm/cacheflush.h>
91162b070SVineet Gupta 
102820a708SEugeniy Paltsev /*
11f73c9045SChristoph Hellwig  * ARCH specific callbacks for generic noncoherent DMA ops
122820a708SEugeniy Paltsev  *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
132820a708SEugeniy Paltsev  *  - But still handle both coherent and non-coherent requests from caller
142820a708SEugeniy Paltsev  *
152820a708SEugeniy Paltsev  * For DMA coherent hardware (IOC) generic code suffices
162820a708SEugeniy Paltsev  */
17f73c9045SChristoph Hellwig 
arch_dma_prep_coherent(struct page * page,size_t size)18f73c9045SChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size)
191162b070SVineet Gupta {
20795f4558SVineet Gupta 	/*
21795f4558SVineet Gupta 	 * Evict any existing L1 and/or L2 lines for the backing page
22795f4558SVineet Gupta 	 * in case it was used earlier as a normal "cached" page.
23795f4558SVineet Gupta 	 * Yeah this bit us - STAR 9000898266
24795f4558SVineet Gupta 	 *
25795f4558SVineet Gupta 	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
26795f4558SVineet Gupta 	 * can't be used to efficiently flush L1 and/or L2 which need paddr
27795f4558SVineet Gupta 	 * Currently flush_cache_vmap nukes the L1 cache completely which
28795f4558SVineet Gupta 	 * will be optimized as a separate commit
29795f4558SVineet Gupta 	 */
30f73c9045SChristoph Hellwig 	dma_cache_wback_inv(page_to_phys(page), size);
31a79a8121SAlexey Brodkin }
32a79a8121SAlexey Brodkin 
334c612addSEugeniy Paltsev /*
344c612addSEugeniy Paltsev  * Cache operations depending on function and direction argument, inspired by
35*7e5b06b8SKees Cook  * https://lore.kernel.org/lkml/20180518175004.GF17671@n2100.armlinux.org.uk
364c612addSEugeniy Paltsev  * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
374c612addSEugeniy Paltsev  * dma-mapping: provide a generic dma-noncoherent implementation)"
384c612addSEugeniy Paltsev  *
394c612addSEugeniy Paltsev  *          |   map          ==  for_device     |   unmap     ==  for_cpu
404c612addSEugeniy Paltsev  *          |----------------------------------------------------------------
414c612addSEugeniy Paltsev  * TO_DEV   |   writeback        writeback      |   none          none
424c612addSEugeniy Paltsev  * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
434c612addSEugeniy Paltsev  * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
444c612addSEugeniy Paltsev  *
454c612addSEugeniy Paltsev  *     [*] needed for CPU speculative prefetches
464c612addSEugeniy Paltsev  *
474c612addSEugeniy Paltsev  * NOTE: we don't check the validity of direction argument as it is done in
484c612addSEugeniy Paltsev  * upper layer functions (in include/linux/dma-mapping.h)
494c612addSEugeniy Paltsev  */
504c612addSEugeniy Paltsev 
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)5156e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
5256e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
53713a7462SChristoph Hellwig {
544c612addSEugeniy Paltsev 	switch (dir) {
554c612addSEugeniy Paltsev 	case DMA_TO_DEVICE:
566c3e71ddSChristoph Hellwig 		dma_cache_wback(paddr, size);
574c612addSEugeniy Paltsev 		break;
584c612addSEugeniy Paltsev 
594c612addSEugeniy Paltsev 	case DMA_FROM_DEVICE:
604c612addSEugeniy Paltsev 		dma_cache_inv(paddr, size);
614c612addSEugeniy Paltsev 		break;
624c612addSEugeniy Paltsev 
634c612addSEugeniy Paltsev 	case DMA_BIDIRECTIONAL:
644c612addSEugeniy Paltsev 		dma_cache_wback_inv(paddr, size);
654c612addSEugeniy Paltsev 		break;
664c612addSEugeniy Paltsev 
674c612addSEugeniy Paltsev 	default:
684c612addSEugeniy Paltsev 		break;
694c612addSEugeniy Paltsev 	}
70713a7462SChristoph Hellwig }
71713a7462SChristoph Hellwig 
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)7256e35f9cSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
7356e35f9cSChristoph Hellwig 		enum dma_data_direction dir)
74713a7462SChristoph Hellwig {
754c612addSEugeniy Paltsev 	switch (dir) {
764c612addSEugeniy Paltsev 	case DMA_TO_DEVICE:
774c612addSEugeniy Paltsev 		break;
784c612addSEugeniy Paltsev 
794c612addSEugeniy Paltsev 	/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
804c612addSEugeniy Paltsev 	case DMA_FROM_DEVICE:
814c612addSEugeniy Paltsev 	case DMA_BIDIRECTIONAL:
826c3e71ddSChristoph Hellwig 		dma_cache_inv(paddr, size);
834c612addSEugeniy Paltsev 		break;
844c612addSEugeniy Paltsev 
854c612addSEugeniy Paltsev 	default:
864c612addSEugeniy Paltsev 		break;
874c612addSEugeniy Paltsev 	}
88713a7462SChristoph Hellwig }
892820a708SEugeniy Paltsev 
902820a708SEugeniy Paltsev /*
91bc3ec75dSChristoph Hellwig  * Plug in direct dma map ops.
922820a708SEugeniy Paltsev  */
arch_setup_dma_ops(struct device * dev,u64 dma_base,u64 size,const struct iommu_ops * iommu,bool coherent)932820a708SEugeniy Paltsev void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
942820a708SEugeniy Paltsev 			const struct iommu_ops *iommu, bool coherent)
952820a708SEugeniy Paltsev {
962820a708SEugeniy Paltsev 	/*
972820a708SEugeniy Paltsev 	 * IOC hardware snoops all DMA traffic keeping the caches consistent
982820a708SEugeniy Paltsev 	 * with memory - eliding need for any explicit cache maintenance of
99bc3ec75dSChristoph Hellwig 	 * DMA buffers.
1002820a708SEugeniy Paltsev 	 */
101bc3ec75dSChristoph Hellwig 	if (is_isa_arcv2() && ioc_enable && coherent)
102bc3ec75dSChristoph Hellwig 		dev->dma_coherent = true;
103bc3ec75dSChristoph Hellwig 
104da31076fSEugeniy Paltsev 	dev_info(dev, "use %scoherent DMA ops\n",
105bc3ec75dSChristoph Hellwig 		 dev->dma_coherent ? "" : "non");
1062820a708SEugeniy Paltsev }
107