1 /* 2 * A fairly generic DMA-API to IOMMU-API glue layer. 3 * 4 * Copyright (C) 2014-2015 ARM Ltd. 5 * 6 * based in part on arch/arm/mm/dma-mapping.c: 7 * Copyright (C) 2000-2004 Russell King 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/device.h> 23 #include <linux/dma-iommu.h> 24 #include <linux/huge_mm.h> 25 #include <linux/iommu.h> 26 #include <linux/iova.h> 27 #include <linux/mm.h> 28 29 int iommu_dma_init(void) 30 { 31 return iova_cache_get(); 32 } 33 34 /** 35 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 36 * @domain: IOMMU domain to prepare for DMA-API usage 37 * 38 * IOMMU drivers should normally call this from their domain_alloc 39 * callback when domain->type == IOMMU_DOMAIN_DMA. 40 */ 41 int iommu_get_dma_cookie(struct iommu_domain *domain) 42 { 43 struct iova_domain *iovad; 44 45 if (domain->iova_cookie) 46 return -EEXIST; 47 48 iovad = kzalloc(sizeof(*iovad), GFP_KERNEL); 49 domain->iova_cookie = iovad; 50 51 return iovad ? 0 : -ENOMEM; 52 } 53 EXPORT_SYMBOL(iommu_get_dma_cookie); 54 55 /** 56 * iommu_put_dma_cookie - Release a domain's DMA mapping resources 57 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 58 * 59 * IOMMU drivers should normally call this from their domain_free callback. 60 */ 61 void iommu_put_dma_cookie(struct iommu_domain *domain) 62 { 63 struct iova_domain *iovad = domain->iova_cookie; 64 65 if (!iovad) 66 return; 67 68 put_iova_domain(iovad); 69 kfree(iovad); 70 domain->iova_cookie = NULL; 71 } 72 EXPORT_SYMBOL(iommu_put_dma_cookie); 73 74 /** 75 * iommu_dma_init_domain - Initialise a DMA mapping domain 76 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 77 * @base: IOVA at which the mappable address space starts 78 * @size: Size of IOVA space 79 * 80 * @base and @size should be exact multiples of IOMMU page granularity to 81 * avoid rounding surprises. If necessary, we reserve the page at address 0 82 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 83 * any change which could make prior IOVAs invalid will fail. 84 */ 85 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) 86 { 87 struct iova_domain *iovad = domain->iova_cookie; 88 unsigned long order, base_pfn, end_pfn; 89 90 if (!iovad) 91 return -ENODEV; 92 93 /* Use the smallest supported page size for IOVA granularity */ 94 order = __ffs(domain->ops->pgsize_bitmap); 95 base_pfn = max_t(unsigned long, 1, base >> order); 96 end_pfn = (base + size - 1) >> order; 97 98 /* Check the domain allows at least some access to the device... */ 99 if (domain->geometry.force_aperture) { 100 if (base > domain->geometry.aperture_end || 101 base + size <= domain->geometry.aperture_start) { 102 pr_warn("specified DMA range outside IOMMU capability\n"); 103 return -EFAULT; 104 } 105 /* ...then finally give it a kicking to make sure it fits */ 106 base_pfn = max_t(unsigned long, base_pfn, 107 domain->geometry.aperture_start >> order); 108 end_pfn = min_t(unsigned long, end_pfn, 109 domain->geometry.aperture_end >> order); 110 } 111 112 /* All we can safely do with an existing domain is enlarge it */ 113 if (iovad->start_pfn) { 114 if (1UL << order != iovad->granule || 115 base_pfn != iovad->start_pfn || 116 end_pfn < iovad->dma_32bit_pfn) { 117 pr_warn("Incompatible range for DMA domain\n"); 118 return -EFAULT; 119 } 120 iovad->dma_32bit_pfn = end_pfn; 121 } else { 122 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); 123 } 124 return 0; 125 } 126 EXPORT_SYMBOL(iommu_dma_init_domain); 127 128 /** 129 * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags 130 * @dir: Direction of DMA transfer 131 * @coherent: Is the DMA master cache-coherent? 132 * 133 * Return: corresponding IOMMU API page protection flags 134 */ 135 int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) 136 { 137 int prot = coherent ? IOMMU_CACHE : 0; 138 139 switch (dir) { 140 case DMA_BIDIRECTIONAL: 141 return prot | IOMMU_READ | IOMMU_WRITE; 142 case DMA_TO_DEVICE: 143 return prot | IOMMU_READ; 144 case DMA_FROM_DEVICE: 145 return prot | IOMMU_WRITE; 146 default: 147 return 0; 148 } 149 } 150 151 static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, 152 dma_addr_t dma_limit) 153 { 154 unsigned long shift = iova_shift(iovad); 155 unsigned long length = iova_align(iovad, size) >> shift; 156 157 /* 158 * Enforce size-alignment to be safe - there could perhaps be an 159 * attribute to control this per-device, or at least per-domain... 160 */ 161 return alloc_iova(iovad, length, dma_limit >> shift, true); 162 } 163 164 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ 165 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) 166 { 167 struct iova_domain *iovad = domain->iova_cookie; 168 unsigned long shift = iova_shift(iovad); 169 unsigned long pfn = dma_addr >> shift; 170 struct iova *iova = find_iova(iovad, pfn); 171 size_t size; 172 173 if (WARN_ON(!iova)) 174 return; 175 176 size = iova_size(iova) << shift; 177 size -= iommu_unmap(domain, pfn << shift, size); 178 /* ...and if we can't, then something is horribly, horribly wrong */ 179 WARN_ON(size > 0); 180 __free_iova(iovad, iova); 181 } 182 183 static void __iommu_dma_free_pages(struct page **pages, int count) 184 { 185 while (count--) 186 __free_page(pages[count]); 187 kvfree(pages); 188 } 189 190 static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp) 191 { 192 struct page **pages; 193 unsigned int i = 0, array_size = count * sizeof(*pages); 194 195 if (array_size <= PAGE_SIZE) 196 pages = kzalloc(array_size, GFP_KERNEL); 197 else 198 pages = vzalloc(array_size); 199 if (!pages) 200 return NULL; 201 202 /* IOMMU can map any pages, so himem can also be used here */ 203 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 204 205 while (count) { 206 struct page *page = NULL; 207 int j, order = __fls(count); 208 209 /* 210 * Higher-order allocations are a convenience rather 211 * than a necessity, hence using __GFP_NORETRY until 212 * falling back to single-page allocations. 213 */ 214 for (order = min(order, MAX_ORDER); order > 0; order--) { 215 page = alloc_pages(gfp | __GFP_NORETRY, order); 216 if (!page) 217 continue; 218 if (PageCompound(page)) { 219 if (!split_huge_page(page)) 220 break; 221 __free_pages(page, order); 222 } else { 223 split_page(page, order); 224 break; 225 } 226 } 227 if (!page) 228 page = alloc_page(gfp); 229 if (!page) { 230 __iommu_dma_free_pages(pages, i); 231 return NULL; 232 } 233 j = 1 << order; 234 count -= j; 235 while (j--) 236 pages[i++] = page++; 237 } 238 return pages; 239 } 240 241 /** 242 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() 243 * @dev: Device which owns this buffer 244 * @pages: Array of buffer pages as returned by iommu_dma_alloc() 245 * @size: Size of buffer in bytes 246 * @handle: DMA address of buffer 247 * 248 * Frees both the pages associated with the buffer, and the array 249 * describing them 250 */ 251 void iommu_dma_free(struct device *dev, struct page **pages, size_t size, 252 dma_addr_t *handle) 253 { 254 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle); 255 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 256 *handle = DMA_ERROR_CODE; 257 } 258 259 /** 260 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space 261 * @dev: Device to allocate memory for. Must be a real device 262 * attached to an iommu_dma_domain 263 * @size: Size of buffer in bytes 264 * @gfp: Allocation flags 265 * @prot: IOMMU mapping flags 266 * @handle: Out argument for allocated DMA handle 267 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the 268 * given VA/PA are visible to the given non-coherent device. 269 * 270 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, 271 * but an IOMMU which supports smaller pages might not map the whole thing. 272 * 273 * Return: Array of struct page pointers describing the buffer, 274 * or NULL on failure. 275 */ 276 struct page **iommu_dma_alloc(struct device *dev, size_t size, 277 gfp_t gfp, int prot, dma_addr_t *handle, 278 void (*flush_page)(struct device *, const void *, phys_addr_t)) 279 { 280 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 281 struct iova_domain *iovad = domain->iova_cookie; 282 struct iova *iova; 283 struct page **pages; 284 struct sg_table sgt; 285 dma_addr_t dma_addr; 286 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 287 288 *handle = DMA_ERROR_CODE; 289 290 pages = __iommu_dma_alloc_pages(count, gfp); 291 if (!pages) 292 return NULL; 293 294 iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); 295 if (!iova) 296 goto out_free_pages; 297 298 size = iova_align(iovad, size); 299 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) 300 goto out_free_iova; 301 302 if (!(prot & IOMMU_CACHE)) { 303 struct sg_mapping_iter miter; 304 /* 305 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't 306 * sufficient here, so skip it by using the "wrong" direction. 307 */ 308 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); 309 while (sg_miter_next(&miter)) 310 flush_page(dev, miter.addr, page_to_phys(miter.page)); 311 sg_miter_stop(&miter); 312 } 313 314 dma_addr = iova_dma_addr(iovad, iova); 315 if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot) 316 < size) 317 goto out_free_sg; 318 319 *handle = dma_addr; 320 sg_free_table(&sgt); 321 return pages; 322 323 out_free_sg: 324 sg_free_table(&sgt); 325 out_free_iova: 326 __free_iova(iovad, iova); 327 out_free_pages: 328 __iommu_dma_free_pages(pages, count); 329 return NULL; 330 } 331 332 /** 333 * iommu_dma_mmap - Map a buffer into provided user VMA 334 * @pages: Array representing buffer from iommu_dma_alloc() 335 * @size: Size of buffer in bytes 336 * @vma: VMA describing requested userspace mapping 337 * 338 * Maps the pages of the buffer in @pages into @vma. The caller is responsible 339 * for verifying the correct size and protection of @vma beforehand. 340 */ 341 342 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) 343 { 344 unsigned long uaddr = vma->vm_start; 345 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; 346 int ret = -ENXIO; 347 348 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { 349 ret = vm_insert_page(vma, uaddr, pages[i]); 350 if (ret) 351 break; 352 uaddr += PAGE_SIZE; 353 } 354 return ret; 355 } 356 357 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 358 unsigned long offset, size_t size, int prot) 359 { 360 dma_addr_t dma_addr; 361 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 362 struct iova_domain *iovad = domain->iova_cookie; 363 phys_addr_t phys = page_to_phys(page) + offset; 364 size_t iova_off = iova_offset(iovad, phys); 365 size_t len = iova_align(iovad, size + iova_off); 366 struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); 367 368 if (!iova) 369 return DMA_ERROR_CODE; 370 371 dma_addr = iova_dma_addr(iovad, iova); 372 if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) { 373 __free_iova(iovad, iova); 374 return DMA_ERROR_CODE; 375 } 376 return dma_addr + iova_off; 377 } 378 379 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, 380 enum dma_data_direction dir, struct dma_attrs *attrs) 381 { 382 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); 383 } 384 385 /* 386 * Prepare a successfully-mapped scatterlist to give back to the caller. 387 * Handling IOVA concatenation can come later, if needed 388 */ 389 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 390 dma_addr_t dma_addr) 391 { 392 struct scatterlist *s; 393 int i; 394 395 for_each_sg(sg, s, nents, i) { 396 /* Un-swizzling the fields here, hence the naming mismatch */ 397 unsigned int s_offset = sg_dma_address(s); 398 unsigned int s_length = sg_dma_len(s); 399 unsigned int s_dma_len = s->length; 400 401 s->offset = s_offset; 402 s->length = s_length; 403 sg_dma_address(s) = dma_addr + s_offset; 404 dma_addr += s_dma_len; 405 } 406 return i; 407 } 408 409 /* 410 * If mapping failed, then just restore the original list, 411 * but making sure the DMA fields are invalidated. 412 */ 413 static void __invalidate_sg(struct scatterlist *sg, int nents) 414 { 415 struct scatterlist *s; 416 int i; 417 418 for_each_sg(sg, s, nents, i) { 419 if (sg_dma_address(s) != DMA_ERROR_CODE) 420 s->offset = sg_dma_address(s); 421 if (sg_dma_len(s)) 422 s->length = sg_dma_len(s); 423 sg_dma_address(s) = DMA_ERROR_CODE; 424 sg_dma_len(s) = 0; 425 } 426 } 427 428 /* 429 * The DMA API client is passing in a scatterlist which could describe 430 * any old buffer layout, but the IOMMU API requires everything to be 431 * aligned to IOMMU pages. Hence the need for this complicated bit of 432 * impedance-matching, to be able to hand off a suitably-aligned list, 433 * but still preserve the original offsets and sizes for the caller. 434 */ 435 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 436 int nents, int prot) 437 { 438 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 439 struct iova_domain *iovad = domain->iova_cookie; 440 struct iova *iova; 441 struct scatterlist *s, *prev = NULL; 442 dma_addr_t dma_addr; 443 size_t iova_len = 0; 444 int i; 445 446 /* 447 * Work out how much IOVA space we need, and align the segments to 448 * IOVA granules for the IOMMU driver to handle. With some clever 449 * trickery we can modify the list in-place, but reversibly, by 450 * hiding the original data in the as-yet-unused DMA fields. 451 */ 452 for_each_sg(sg, s, nents, i) { 453 size_t s_offset = iova_offset(iovad, s->offset); 454 size_t s_length = s->length; 455 456 sg_dma_address(s) = s->offset; 457 sg_dma_len(s) = s_length; 458 s->offset -= s_offset; 459 s_length = iova_align(iovad, s_length + s_offset); 460 s->length = s_length; 461 462 /* 463 * The simple way to avoid the rare case of a segment 464 * crossing the boundary mask is to pad the previous one 465 * to end at a naturally-aligned IOVA for this one's size, 466 * at the cost of potentially over-allocating a little. 467 */ 468 if (prev) { 469 size_t pad_len = roundup_pow_of_two(s_length); 470 471 pad_len = (pad_len - iova_len) & (pad_len - 1); 472 prev->length += pad_len; 473 iova_len += pad_len; 474 } 475 476 iova_len += s_length; 477 prev = s; 478 } 479 480 iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); 481 if (!iova) 482 goto out_restore_sg; 483 484 /* 485 * We'll leave any physical concatenation to the IOMMU driver's 486 * implementation - it knows better than we do. 487 */ 488 dma_addr = iova_dma_addr(iovad, iova); 489 if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len) 490 goto out_free_iova; 491 492 return __finalise_sg(dev, sg, nents, dma_addr); 493 494 out_free_iova: 495 __free_iova(iovad, iova); 496 out_restore_sg: 497 __invalidate_sg(sg, nents); 498 return 0; 499 } 500 501 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 502 enum dma_data_direction dir, struct dma_attrs *attrs) 503 { 504 /* 505 * The scatterlist segments are mapped into a single 506 * contiguous IOVA allocation, so this is incredibly easy. 507 */ 508 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg)); 509 } 510 511 int iommu_dma_supported(struct device *dev, u64 mask) 512 { 513 /* 514 * 'Special' IOMMUs which don't have the same addressing capability 515 * as the CPU will have to wait until we have some way to query that 516 * before they'll be able to use this framework. 517 */ 518 return 1; 519 } 520 521 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 522 { 523 return dma_addr == DMA_ERROR_CODE; 524 } 525