xref: /openbmc/linux/arch/powerpc/kernel/dma-iommu.c (revision d78c317f)
1 /*
2  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * Provide default implementations of the DMA mapping callbacks for
5  * busses using the iommu infrastructure
6  */
7 
8 #include <linux/export.h>
9 #include <asm/iommu.h>
10 
11 /*
12  * Generic iommu implementation
13  */
14 
15 /* Allocates a contiguous real buffer and creates mappings over it.
16  * Returns the virtual address of the buffer and sets dma_handle
17  * to the dma address (mapping) of the first page.
18  */
19 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
20 				      dma_addr_t *dma_handle, gfp_t flag)
21 {
22 	return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
23 				    dma_handle, dev->coherent_dma_mask, flag,
24 				    dev_to_node(dev));
25 }
26 
27 static void dma_iommu_free_coherent(struct device *dev, size_t size,
28 				    void *vaddr, dma_addr_t dma_handle)
29 {
30 	iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
31 }
32 
33 /* Creates TCEs for a user provided buffer.  The user buffer must be
34  * contiguous real kernel storage (not vmalloc).  The address passed here
35  * comprises a page address and offset into that page. The dma_addr_t
36  * returned will point to the same byte within the page as was passed in.
37  */
38 static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
39 				     unsigned long offset, size_t size,
40 				     enum dma_data_direction direction,
41 				     struct dma_attrs *attrs)
42 {
43 	return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
44 			      size, device_to_mask(dev), direction, attrs);
45 }
46 
47 
48 static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
49 				 size_t size, enum dma_data_direction direction,
50 				 struct dma_attrs *attrs)
51 {
52 	iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
53 			 attrs);
54 }
55 
56 
57 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
58 			    int nelems, enum dma_data_direction direction,
59 			    struct dma_attrs *attrs)
60 {
61 	return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
62 			    device_to_mask(dev), direction, attrs);
63 }
64 
65 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
66 		int nelems, enum dma_data_direction direction,
67 		struct dma_attrs *attrs)
68 {
69 	iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
70 		       attrs);
71 }
72 
73 /* We support DMA to/from any memory page via the iommu */
74 static int dma_iommu_dma_supported(struct device *dev, u64 mask)
75 {
76 	struct iommu_table *tbl = get_iommu_table_base(dev);
77 
78 	if (!tbl) {
79 		dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
80 			", table unavailable\n", mask);
81 		return 0;
82 	}
83 
84 	if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) {
85 		dev_info(dev, "Warning: IOMMU window too big for device mask\n");
86 		dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n",
87 				mask, (tbl->it_offset + tbl->it_size) <<
88 				IOMMU_PAGE_SHIFT);
89 		return 0;
90 	} else
91 		return 1;
92 }
93 
94 static u64 dma_iommu_get_required_mask(struct device *dev)
95 {
96 	struct iommu_table *tbl = get_iommu_table_base(dev);
97 	u64 mask;
98 	if (!tbl)
99 		return 0;
100 
101 	mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
102 	mask += mask - 1;
103 
104 	return mask;
105 }
106 
107 struct dma_map_ops dma_iommu_ops = {
108 	.alloc_coherent		= dma_iommu_alloc_coherent,
109 	.free_coherent		= dma_iommu_free_coherent,
110 	.map_sg			= dma_iommu_map_sg,
111 	.unmap_sg		= dma_iommu_unmap_sg,
112 	.dma_supported		= dma_iommu_dma_supported,
113 	.map_page		= dma_iommu_map_page,
114 	.unmap_page		= dma_iommu_unmap_page,
115 	.get_required_mask	= dma_iommu_get_required_mask,
116 };
117 EXPORT_SYMBOL(dma_iommu_ops);
118