xref: /openbmc/linux/arch/arm/mm/dma-mapping-nommu.c (revision 1c51c429)
11c51c429SVladimir Murzin /*
21c51c429SVladimir Murzin  *  Based on linux/arch/arm/mm/dma-mapping.c
31c51c429SVladimir Murzin  *
41c51c429SVladimir Murzin  *  Copyright (C) 2000-2004 Russell King
51c51c429SVladimir Murzin  *
61c51c429SVladimir Murzin  * This program is free software; you can redistribute it and/or modify
71c51c429SVladimir Murzin  * it under the terms of the GNU General Public License version 2 as
81c51c429SVladimir Murzin  * published by the Free Software Foundation.
91c51c429SVladimir Murzin  *
101c51c429SVladimir Murzin  */
111c51c429SVladimir Murzin 
121c51c429SVladimir Murzin #include <linux/export.h>
131c51c429SVladimir Murzin #include <linux/mm.h>
141c51c429SVladimir Murzin #include <linux/dma-mapping.h>
151c51c429SVladimir Murzin #include <linux/scatterlist.h>
161c51c429SVladimir Murzin 
171c51c429SVladimir Murzin #include <asm/cachetype.h>
181c51c429SVladimir Murzin #include <asm/cacheflush.h>
191c51c429SVladimir Murzin #include <asm/outercache.h>
201c51c429SVladimir Murzin #include <asm/cp15.h>
211c51c429SVladimir Murzin 
221c51c429SVladimir Murzin #include "dma.h"
231c51c429SVladimir Murzin 
241c51c429SVladimir Murzin /*
251c51c429SVladimir Murzin  *  dma_noop_ops is used if
261c51c429SVladimir Murzin  *   - MMU/MPU is off
271c51c429SVladimir Murzin  *   - cpu is v7m w/o cache support
281c51c429SVladimir Murzin  *   - device is coherent
291c51c429SVladimir Murzin  *  otherwise arm_nommu_dma_ops is used.
301c51c429SVladimir Murzin  *
311c51c429SVladimir Murzin  *  arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
321c51c429SVladimir Murzin  *  [1] on how to declare such memory).
331c51c429SVladimir Murzin  *
341c51c429SVladimir Murzin  *  [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
351c51c429SVladimir Murzin  */
361c51c429SVladimir Murzin 
371c51c429SVladimir Murzin static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
381c51c429SVladimir Murzin 				 dma_addr_t *dma_handle, gfp_t gfp,
391c51c429SVladimir Murzin 				 unsigned long attrs)
401c51c429SVladimir Murzin 
411c51c429SVladimir Murzin {
421c51c429SVladimir Murzin 	const struct dma_map_ops *ops = &dma_noop_ops;
431c51c429SVladimir Murzin 
441c51c429SVladimir Murzin 	/*
451c51c429SVladimir Murzin 	 * We are here because:
461c51c429SVladimir Murzin 	 * - no consistent DMA region has been defined, so we can't
471c51c429SVladimir Murzin 	 *   continue.
481c51c429SVladimir Murzin 	 * - there is no space left in consistent DMA region, so we
491c51c429SVladimir Murzin 	 *   only can fallback to generic allocator if we are
501c51c429SVladimir Murzin 	 *   advertised that consistency is not required.
511c51c429SVladimir Murzin 	 */
521c51c429SVladimir Murzin 
531c51c429SVladimir Murzin 	if (attrs & DMA_ATTR_NON_CONSISTENT)
541c51c429SVladimir Murzin 		return ops->alloc(dev, size, dma_handle, gfp, attrs);
551c51c429SVladimir Murzin 
561c51c429SVladimir Murzin 	WARN_ON_ONCE(1);
571c51c429SVladimir Murzin 	return NULL;
581c51c429SVladimir Murzin }
591c51c429SVladimir Murzin 
601c51c429SVladimir Murzin static void arm_nommu_dma_free(struct device *dev, size_t size,
611c51c429SVladimir Murzin 			       void *cpu_addr, dma_addr_t dma_addr,
621c51c429SVladimir Murzin 			       unsigned long attrs)
631c51c429SVladimir Murzin {
641c51c429SVladimir Murzin 	const struct dma_map_ops *ops = &dma_noop_ops;
651c51c429SVladimir Murzin 
661c51c429SVladimir Murzin 	if (attrs & DMA_ATTR_NON_CONSISTENT)
671c51c429SVladimir Murzin 		ops->free(dev, size, cpu_addr, dma_addr, attrs);
681c51c429SVladimir Murzin 	else
691c51c429SVladimir Murzin 		WARN_ON_ONCE(1);
701c51c429SVladimir Murzin 
711c51c429SVladimir Murzin 	return;
721c51c429SVladimir Murzin }
731c51c429SVladimir Murzin 
741c51c429SVladimir Murzin static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
751c51c429SVladimir Murzin 				  enum dma_data_direction dir)
761c51c429SVladimir Murzin {
771c51c429SVladimir Murzin 	dmac_map_area(__va(paddr), size, dir);
781c51c429SVladimir Murzin 
791c51c429SVladimir Murzin 	if (dir == DMA_FROM_DEVICE)
801c51c429SVladimir Murzin 		outer_inv_range(paddr, paddr + size);
811c51c429SVladimir Murzin 	else
821c51c429SVladimir Murzin 		outer_clean_range(paddr, paddr + size);
831c51c429SVladimir Murzin }
841c51c429SVladimir Murzin 
851c51c429SVladimir Murzin static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
861c51c429SVladimir Murzin 				  enum dma_data_direction dir)
871c51c429SVladimir Murzin {
881c51c429SVladimir Murzin 	if (dir != DMA_TO_DEVICE) {
891c51c429SVladimir Murzin 		outer_inv_range(paddr, paddr + size);
901c51c429SVladimir Murzin 		dmac_unmap_area(__va(paddr), size, dir);
911c51c429SVladimir Murzin 	}
921c51c429SVladimir Murzin }
931c51c429SVladimir Murzin 
941c51c429SVladimir Murzin static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
951c51c429SVladimir Murzin 					 unsigned long offset, size_t size,
961c51c429SVladimir Murzin 					 enum dma_data_direction dir,
971c51c429SVladimir Murzin 					 unsigned long attrs)
981c51c429SVladimir Murzin {
991c51c429SVladimir Murzin 	dma_addr_t handle = page_to_phys(page) + offset;
1001c51c429SVladimir Murzin 
1011c51c429SVladimir Murzin 	__dma_page_cpu_to_dev(handle, size, dir);
1021c51c429SVladimir Murzin 
1031c51c429SVladimir Murzin 	return handle;
1041c51c429SVladimir Murzin }
1051c51c429SVladimir Murzin 
1061c51c429SVladimir Murzin static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
1071c51c429SVladimir Murzin 				     size_t size, enum dma_data_direction dir,
1081c51c429SVladimir Murzin 				     unsigned long attrs)
1091c51c429SVladimir Murzin {
1101c51c429SVladimir Murzin 	__dma_page_dev_to_cpu(handle, size, dir);
1111c51c429SVladimir Murzin }
1121c51c429SVladimir Murzin 
1131c51c429SVladimir Murzin 
1141c51c429SVladimir Murzin static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
1151c51c429SVladimir Murzin 				int nents, enum dma_data_direction dir,
1161c51c429SVladimir Murzin 				unsigned long attrs)
1171c51c429SVladimir Murzin {
1181c51c429SVladimir Murzin 	int i;
1191c51c429SVladimir Murzin 	struct scatterlist *sg;
1201c51c429SVladimir Murzin 
1211c51c429SVladimir Murzin 	for_each_sg(sgl, sg, nents, i) {
1221c51c429SVladimir Murzin 		sg_dma_address(sg) = sg_phys(sg);
1231c51c429SVladimir Murzin 		sg_dma_len(sg) = sg->length;
1241c51c429SVladimir Murzin 		__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
1251c51c429SVladimir Murzin 	}
1261c51c429SVladimir Murzin 
1271c51c429SVladimir Murzin 	return nents;
1281c51c429SVladimir Murzin }
1291c51c429SVladimir Murzin 
1301c51c429SVladimir Murzin static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
1311c51c429SVladimir Murzin 				   int nents, enum dma_data_direction dir,
1321c51c429SVladimir Murzin 				   unsigned long attrs)
1331c51c429SVladimir Murzin {
1341c51c429SVladimir Murzin 	struct scatterlist *sg;
1351c51c429SVladimir Murzin 	int i;
1361c51c429SVladimir Murzin 
1371c51c429SVladimir Murzin 	for_each_sg(sgl, sg, nents, i)
1381c51c429SVladimir Murzin 		__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
1391c51c429SVladimir Murzin }
1401c51c429SVladimir Murzin 
1411c51c429SVladimir Murzin static void arm_nommu_dma_sync_single_for_device(struct device *dev,
1421c51c429SVladimir Murzin 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
1431c51c429SVladimir Murzin {
1441c51c429SVladimir Murzin 	__dma_page_cpu_to_dev(handle, size, dir);
1451c51c429SVladimir Murzin }
1461c51c429SVladimir Murzin 
1471c51c429SVladimir Murzin static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
1481c51c429SVladimir Murzin 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
1491c51c429SVladimir Murzin {
1501c51c429SVladimir Murzin 	__dma_page_cpu_to_dev(handle, size, dir);
1511c51c429SVladimir Murzin }
1521c51c429SVladimir Murzin 
1531c51c429SVladimir Murzin static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
1541c51c429SVladimir Murzin 					     int nents, enum dma_data_direction dir)
1551c51c429SVladimir Murzin {
1561c51c429SVladimir Murzin 	struct scatterlist *sg;
1571c51c429SVladimir Murzin 	int i;
1581c51c429SVladimir Murzin 
1591c51c429SVladimir Murzin 	for_each_sg(sgl, sg, nents, i)
1601c51c429SVladimir Murzin 		__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
1611c51c429SVladimir Murzin }
1621c51c429SVladimir Murzin 
1631c51c429SVladimir Murzin static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
1641c51c429SVladimir Murzin 					  int nents, enum dma_data_direction dir)
1651c51c429SVladimir Murzin {
1661c51c429SVladimir Murzin 	struct scatterlist *sg;
1671c51c429SVladimir Murzin 	int i;
1681c51c429SVladimir Murzin 
1691c51c429SVladimir Murzin 	for_each_sg(sgl, sg, nents, i)
1701c51c429SVladimir Murzin 		__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
1711c51c429SVladimir Murzin }
1721c51c429SVladimir Murzin 
1731c51c429SVladimir Murzin const struct dma_map_ops arm_nommu_dma_ops = {
1741c51c429SVladimir Murzin 	.alloc			= arm_nommu_dma_alloc,
1751c51c429SVladimir Murzin 	.free			= arm_nommu_dma_free,
1761c51c429SVladimir Murzin 	.map_page		= arm_nommu_dma_map_page,
1771c51c429SVladimir Murzin 	.unmap_page		= arm_nommu_dma_unmap_page,
1781c51c429SVladimir Murzin 	.map_sg			= arm_nommu_dma_map_sg,
1791c51c429SVladimir Murzin 	.unmap_sg		= arm_nommu_dma_unmap_sg,
1801c51c429SVladimir Murzin 	.sync_single_for_device	= arm_nommu_dma_sync_single_for_device,
1811c51c429SVladimir Murzin 	.sync_single_for_cpu	= arm_nommu_dma_sync_single_for_cpu,
1821c51c429SVladimir Murzin 	.sync_sg_for_device	= arm_nommu_dma_sync_sg_for_device,
1831c51c429SVladimir Murzin 	.sync_sg_for_cpu	= arm_nommu_dma_sync_sg_for_cpu,
1841c51c429SVladimir Murzin };
1851c51c429SVladimir Murzin EXPORT_SYMBOL(arm_nommu_dma_ops);
1861c51c429SVladimir Murzin 
1871c51c429SVladimir Murzin static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
1881c51c429SVladimir Murzin {
1891c51c429SVladimir Murzin 	return coherent ? &dma_noop_ops : &arm_nommu_dma_ops;
1901c51c429SVladimir Murzin }
1911c51c429SVladimir Murzin 
1921c51c429SVladimir Murzin void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
1931c51c429SVladimir Murzin 			const struct iommu_ops *iommu, bool coherent)
1941c51c429SVladimir Murzin {
1951c51c429SVladimir Murzin 	const struct dma_map_ops *dma_ops;
1961c51c429SVladimir Murzin 
1971c51c429SVladimir Murzin 	if (IS_ENABLED(CONFIG_CPU_V7M)) {
1981c51c429SVladimir Murzin 		/*
1991c51c429SVladimir Murzin 		 * Cache support for v7m is optional, so can be treated as
2001c51c429SVladimir Murzin 		 * coherent if no cache has been detected. Note that it is not
2011c51c429SVladimir Murzin 		 * enough to check if MPU is in use or not since in absense of
2021c51c429SVladimir Murzin 		 * MPU system memory map is used.
2031c51c429SVladimir Murzin 		 */
2041c51c429SVladimir Murzin 		dev->archdata.dma_coherent = (cacheid) ? coherent : true;
2051c51c429SVladimir Murzin 	} else {
2061c51c429SVladimir Murzin 		/*
2071c51c429SVladimir Murzin 		 * Assume coherent DMA in case MMU/MPU has not been set up.
2081c51c429SVladimir Murzin 		 */
2091c51c429SVladimir Murzin 		dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
2101c51c429SVladimir Murzin 	}
2111c51c429SVladimir Murzin 
2121c51c429SVladimir Murzin 	dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
2131c51c429SVladimir Murzin 
2141c51c429SVladimir Murzin 	set_dma_ops(dev, dma_ops);
2151c51c429SVladimir Murzin }
2161c51c429SVladimir Murzin 
2171c51c429SVladimir Murzin void arch_teardown_dma_ops(struct device *dev)
2181c51c429SVladimir Murzin {
2191c51c429SVladimir Murzin }
2201c51c429SVladimir Murzin 
2211c51c429SVladimir Murzin #define PREALLOC_DMA_DEBUG_ENTRIES	4096
2221c51c429SVladimir Murzin 
2231c51c429SVladimir Murzin static int __init dma_debug_do_init(void)
2241c51c429SVladimir Murzin {
2251c51c429SVladimir Murzin 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
2261c51c429SVladimir Murzin 	return 0;
2271c51c429SVladimir Murzin }
2281c51c429SVladimir Murzin core_initcall(dma_debug_do_init);
229