1 /* 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation 3 * 4 * Provide default implementations of the DMA mapping callbacks for 5 * busses using the iommu infrastructure 6 */ 7 8 #include <linux/export.h> 9 #include <asm/iommu.h> 10 11 /* 12 * Generic iommu implementation 13 */ 14 15 /* Allocates a contiguous real buffer and creates mappings over it. 16 * Returns the virtual address of the buffer and sets dma_handle 17 * to the dma address (mapping) of the first page. 18 */ 19 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 20 dma_addr_t *dma_handle, gfp_t flag, 21 struct dma_attrs *attrs) 22 { 23 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, 24 dma_handle, dev->coherent_dma_mask, flag, 25 dev_to_node(dev)); 26 } 27 28 static void dma_iommu_free_coherent(struct device *dev, size_t size, 29 void *vaddr, dma_addr_t dma_handle, 30 struct dma_attrs *attrs) 31 { 32 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); 33 } 34 35 /* Creates TCEs for a user provided buffer. The user buffer must be 36 * contiguous real kernel storage (not vmalloc). The address passed here 37 * comprises a page address and offset into that page. The dma_addr_t 38 * returned will point to the same byte within the page as was passed in. 39 */ 40 static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, 41 unsigned long offset, size_t size, 42 enum dma_data_direction direction, 43 struct dma_attrs *attrs) 44 { 45 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, 46 size, device_to_mask(dev), direction, attrs); 47 } 48 49 50 static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, 51 size_t size, enum dma_data_direction direction, 52 struct dma_attrs *attrs) 53 { 54 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, 55 attrs); 56 } 57 58 59 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 60 int nelems, enum dma_data_direction direction, 61 struct dma_attrs *attrs) 62 { 63 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, 64 device_to_mask(dev), direction, attrs); 65 } 66 67 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, 68 int nelems, enum dma_data_direction direction, 69 struct dma_attrs *attrs) 70 { 71 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, 72 direction, attrs); 73 } 74 75 /* We support DMA to/from any memory page via the iommu */ 76 static int dma_iommu_dma_supported(struct device *dev, u64 mask) 77 { 78 struct iommu_table *tbl = get_iommu_table_base(dev); 79 80 if (!tbl) { 81 dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx" 82 ", table unavailable\n", mask); 83 return 0; 84 } 85 86 if (tbl->it_offset > (mask >> tbl->it_page_shift)) { 87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); 88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", 89 mask, tbl->it_offset << tbl->it_page_shift); 90 return 0; 91 } else 92 return 1; 93 } 94 95 static u64 dma_iommu_get_required_mask(struct device *dev) 96 { 97 struct iommu_table *tbl = get_iommu_table_base(dev); 98 u64 mask; 99 if (!tbl) 100 return 0; 101 102 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); 103 mask += mask - 1; 104 105 return mask; 106 } 107 108 struct dma_map_ops dma_iommu_ops = { 109 .alloc = dma_iommu_alloc_coherent, 110 .free = dma_iommu_free_coherent, 111 .mmap = dma_direct_mmap_coherent, 112 .map_sg = dma_iommu_map_sg, 113 .unmap_sg = dma_iommu_unmap_sg, 114 .dma_supported = dma_iommu_dma_supported, 115 .map_page = dma_iommu_map_page, 116 .unmap_page = dma_iommu_unmap_page, 117 .get_required_mask = dma_iommu_get_required_mask, 118 }; 119 EXPORT_SYMBOL(dma_iommu_ops); 120