mapping.c (05887cb610a54bf568de7f0bc07c4a64e45ac6f9) mapping.c (7249c1a52df9967cd23550f3dc24fb6ca43cdc6a)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch-independent dma-mapping routines
4 *
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 */
8#include <linux/memblock.h> /* for max_pfn */

--- 209 unchanged lines hidden (view full) ---

218 page = virt_to_page(cpu_addr);
219 }
220
221 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
222 if (!ret)
223 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
224 return ret;
225}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch-independent dma-mapping routines
4 *
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 */
8#include <linux/memblock.h> /* for max_pfn */

--- 209 unchanged lines hidden (view full) ---

218 page = virt_to_page(cpu_addr);
219 }
220
221 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
222 if (!ret)
223 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
224 return ret;
225}
226EXPORT_SYMBOL(dma_common_get_sgtable);
227
226
227int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
228 void *cpu_addr, dma_addr_t dma_addr, size_t size,
229 unsigned long attrs)
230{
231 const struct dma_map_ops *ops = get_dma_ops(dev);
232 BUG_ON(!ops);
233 if (ops->get_sgtable)
234 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
235 attrs);
236 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
237 attrs);
238}
239EXPORT_SYMBOL(dma_get_sgtable_attrs);
240
228/*
229 * Create userspace mapping for the DMA-coherent memory.
230 */
231int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
232 void *cpu_addr, dma_addr_t dma_addr, size_t size,
233 unsigned long attrs)
234{
235#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP

--- 20 unchanged lines hidden (view full) ---

256 }
257
258 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
259 user_count << PAGE_SHIFT, vma->vm_page_prot);
260#else
261 return -ENXIO;
262#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
263}
241/*
242 * Create userspace mapping for the DMA-coherent memory.
243 */
244int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
245 void *cpu_addr, dma_addr_t dma_addr, size_t size,
246 unsigned long attrs)
247{
248#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP

--- 20 unchanged lines hidden (view full) ---

269 }
270
271 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
272 user_count << PAGE_SHIFT, vma->vm_page_prot);
273#else
274 return -ENXIO;
275#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
276}
264EXPORT_SYMBOL(dma_common_mmap);
265
277
278/**
279 * dma_mmap_attrs - map a coherent DMA allocation into user space
280 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
281 * @vma: vm_area_struct describing requested user mapping
282 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
283 * @dma_addr: device-view address returned from dma_alloc_attrs
284 * @size: size of memory originally requested in dma_alloc_attrs
285 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
286 *
287 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
288 * space. The coherent DMA buffer must not be freed by the driver until the
289 * user space mapping has been released.
290 */
291int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
292 void *cpu_addr, dma_addr_t dma_addr, size_t size,
293 unsigned long attrs)
294{
295 const struct dma_map_ops *ops = get_dma_ops(dev);
296 BUG_ON(!ops);
297 if (ops->mmap)
298 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
299 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
300}
301EXPORT_SYMBOL(dma_mmap_attrs);
302
266#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
267static u64 dma_default_get_required_mask(struct device *dev)
268{
269 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
270 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
271 u64 mask;
272
273 if (!high_totalram) {

--- 15 unchanged lines hidden (view full) ---

289
290 if (ops->get_required_mask)
291 return ops->get_required_mask(dev);
292 return dma_default_get_required_mask(dev);
293}
294EXPORT_SYMBOL_GPL(dma_get_required_mask);
295#endif
296
303#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
304static u64 dma_default_get_required_mask(struct device *dev)
305{
306 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
307 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
308 u64 mask;
309
310 if (!high_totalram) {

--- 15 unchanged lines hidden (view full) ---

326
327 if (ops->get_required_mask)
328 return ops->get_required_mask(dev);
329 return dma_default_get_required_mask(dev);
330}
331EXPORT_SYMBOL_GPL(dma_get_required_mask);
332#endif
333
334#ifndef arch_dma_alloc_attrs
335#define arch_dma_alloc_attrs(dev) (true)
336#endif
337
338void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
339 gfp_t flag, unsigned long attrs)
340{
341 const struct dma_map_ops *ops = get_dma_ops(dev);
342 void *cpu_addr;
343
344 BUG_ON(!ops);
345 WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
346
347 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
348 return cpu_addr;
349
350 /* let the implementation decide on the zone to allocate from: */
351 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
352
353 if (!arch_dma_alloc_attrs(&dev))
354 return NULL;
355 if (!ops->alloc)
356 return NULL;
357
358 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
359 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
360 return cpu_addr;
361}
362EXPORT_SYMBOL(dma_alloc_attrs);
363
364void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
365 dma_addr_t dma_handle, unsigned long attrs)
366{
367 const struct dma_map_ops *ops = get_dma_ops(dev);
368
369 BUG_ON(!ops);
370
371 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
372 return;
373 /*
374 * On non-coherent platforms which implement DMA-coherent buffers via
375 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
376 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
377 * sleep on some machines, and b) an indication that the driver is
378 * probably misusing the coherent API anyway.
379 */
380 WARN_ON(irqs_disabled());
381
382 if (!ops->free || !cpu_addr)
383 return;
384
385 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
386 ops->free(dev, size, cpu_addr, dma_handle, attrs);
387}
388EXPORT_SYMBOL(dma_free_attrs);
389
390static inline void dma_check_mask(struct device *dev, u64 mask)
391{
392 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
393 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
394}
395
396int dma_supported(struct device *dev, u64 mask)
397{
398 const struct dma_map_ops *ops = get_dma_ops(dev);
399
400 if (!ops)
401 return 0;
402 if (!ops->dma_supported)
403 return 1;
404 return ops->dma_supported(dev, mask);
405}
406EXPORT_SYMBOL(dma_supported);
407
408#ifndef HAVE_ARCH_DMA_SET_MASK
409int dma_set_mask(struct device *dev, u64 mask)
410{
411 if (!dev->dma_mask || !dma_supported(dev, mask))
412 return -EIO;
413
414 dma_check_mask(dev, mask);
415 *dev->dma_mask = mask;
416 return 0;
417}
418EXPORT_SYMBOL(dma_set_mask);
419#endif
420
421#ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
422int dma_set_coherent_mask(struct device *dev, u64 mask)
423{
424 if (!dma_supported(dev, mask))
425 return -EIO;
426
427 dma_check_mask(dev, mask);
428 dev->coherent_dma_mask = mask;
429 return 0;
430}
431EXPORT_SYMBOL(dma_set_coherent_mask);
432#endif