mapping.c (62fcee9a3bd73e279d3052245a652a918d0c51da) mapping.c (249baa54790171438524ba97e8e0485dd6aa2762)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch-independent dma-mapping routines
4 *
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 */
8#include <linux/memblock.h> /* for max_pfn */

--- 257 unchanged lines hidden (view full) ---

266 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
267 attrs);
268 if (!ops->mmap)
269 return -ENXIO;
270 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
271}
272EXPORT_SYMBOL(dma_mmap_attrs);
273
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch-independent dma-mapping routines
4 *
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 */
8#include <linux/memblock.h> /* for max_pfn */

--- 257 unchanged lines hidden (view full) ---

266 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
267 attrs);
268 if (!ops->mmap)
269 return -ENXIO;
270 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
271}
272EXPORT_SYMBOL(dma_mmap_attrs);
273
274static u64 dma_default_get_required_mask(struct device *dev)
275{
276 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
277 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
278 u64 mask;
279
280 if (!high_totalram) {
281 /* convert to mask just covering totalram */
282 low_totalram = (1 << (fls(low_totalram) - 1));
283 low_totalram += low_totalram - 1;
284 mask = low_totalram;
285 } else {
286 high_totalram = (1 << (fls(high_totalram) - 1));
287 high_totalram += high_totalram - 1;
288 mask = (((u64)high_totalram) << 32) + 0xffffffff;
289 }
290 return mask;
291}
292
293u64 dma_get_required_mask(struct device *dev)
294{
295 const struct dma_map_ops *ops = get_dma_ops(dev);
296
297 if (dma_is_direct(ops))
298 return dma_direct_get_required_mask(dev);
299 if (ops->get_required_mask)
300 return ops->get_required_mask(dev);
274u64 dma_get_required_mask(struct device *dev)
275{
276 const struct dma_map_ops *ops = get_dma_ops(dev);
277
278 if (dma_is_direct(ops))
279 return dma_direct_get_required_mask(dev);
280 if (ops->get_required_mask)
281 return ops->get_required_mask(dev);
301 return dma_default_get_required_mask(dev);
282
283 /*
284 * We require every DMA ops implementation to at least support a 32-bit
285 * DMA mask (and use bounce buffering if that isn't supported in
286 * hardware). As the direct mapping code has its own routine to
287 * actually report an optimal mask we default to 32-bit here as that
288 * is the right thing for most IOMMUs, and at least not actively
289 * harmful in general.
290 */
291 return DMA_BIT_MASK(32);
302}
303EXPORT_SYMBOL_GPL(dma_get_required_mask);
304
305void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
306 gfp_t flag, unsigned long attrs)
307{
308 const struct dma_map_ops *ops = get_dma_ops(dev);
309 void *cpu_addr;

--- 147 unchanged lines hidden ---
292}
293EXPORT_SYMBOL_GPL(dma_get_required_mask);
294
295void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
296 gfp_t flag, unsigned long attrs)
297{
298 const struct dma_map_ops *ops = get_dma_ops(dev);
299 void *cpu_addr;

--- 147 unchanged lines hidden ---