1 /* 2 * linux/arch/arm/mm/dma-mapping.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA uncached mapping support. 11 */ 12 #include <linux/module.h> 13 #include <linux/mm.h> 14 #include <linux/gfp.h> 15 #include <linux/errno.h> 16 #include <linux/list.h> 17 #include <linux/init.h> 18 #include <linux/device.h> 19 #include <linux/dma-mapping.h> 20 21 #include <asm/memory.h> 22 #include <asm/highmem.h> 23 #include <asm/cacheflush.h> 24 #include <asm/tlbflush.h> 25 #include <asm/sizes.h> 26 27 static u64 get_coherent_dma_mask(struct device *dev) 28 { 29 u64 mask = ISA_DMA_THRESHOLD; 30 31 if (dev) { 32 mask = dev->coherent_dma_mask; 33 34 /* 35 * Sanity check the DMA mask - it must be non-zero, and 36 * must be able to be satisfied by a DMA allocation. 37 */ 38 if (mask == 0) { 39 dev_warn(dev, "coherent DMA mask is unset\n"); 40 return 0; 41 } 42 43 if ((~mask) & ISA_DMA_THRESHOLD) { 44 dev_warn(dev, "coherent DMA mask %#llx is smaller " 45 "than system GFP_DMA mask %#llx\n", 46 mask, (unsigned long long)ISA_DMA_THRESHOLD); 47 return 0; 48 } 49 } 50 51 return mask; 52 } 53 54 /* 55 * Allocate a DMA buffer for 'dev' of size 'size' using the 56 * specified gfp mask. Note that 'size' must be page aligned. 57 */ 58 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 59 { 60 unsigned long order = get_order(size); 61 struct page *page, *p, *e; 62 void *ptr; 63 u64 mask = get_coherent_dma_mask(dev); 64 65 #ifdef CONFIG_DMA_API_DEBUG 66 u64 limit = (mask + 1) & ~mask; 67 if (limit && size >= limit) { 68 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 69 size, mask); 70 return NULL; 71 } 72 #endif 73 74 if (!mask) 75 return NULL; 76 77 if (mask < 0xffffffffULL) 78 gfp |= GFP_DMA; 79 80 page = alloc_pages(gfp, order); 81 if (!page) 82 return NULL; 83 84 /* 85 * Now split the huge page and free the excess pages 86 */ 87 split_page(page, order); 88 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 89 __free_page(p); 90 91 /* 92 * Ensure that the allocated pages are zeroed, and that any data 93 * lurking in the kernel direct-mapped region is invalidated. 94 */ 95 ptr = page_address(page); 96 memset(ptr, 0, size); 97 dmac_flush_range(ptr, ptr + size); 98 outer_flush_range(__pa(ptr), __pa(ptr) + size); 99 100 return page; 101 } 102 103 /* 104 * Free a DMA buffer. 'size' must be page aligned. 105 */ 106 static void __dma_free_buffer(struct page *page, size_t size) 107 { 108 struct page *e = page + (size >> PAGE_SHIFT); 109 110 while (page < e) { 111 __free_page(page); 112 page++; 113 } 114 } 115 116 #ifdef CONFIG_MMU 117 /* Sanity check size */ 118 #if (CONSISTENT_DMA_SIZE % SZ_2M) 119 #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" 120 #endif 121 122 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 123 #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 124 #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 125 126 /* 127 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 128 */ 129 static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; 130 131 #include "vmregion.h" 132 133 static struct arm_vmregion_head consistent_head = { 134 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), 135 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), 136 .vm_start = CONSISTENT_BASE, 137 .vm_end = CONSISTENT_END, 138 }; 139 140 #ifdef CONFIG_HUGETLB_PAGE 141 #error ARM Coherent DMA allocator does not (yet) support huge TLB 142 #endif 143 144 /* 145 * Initialise the consistent memory allocation. 146 */ 147 static int __init consistent_init(void) 148 { 149 int ret = 0; 150 pgd_t *pgd; 151 pmd_t *pmd; 152 pte_t *pte; 153 int i = 0; 154 u32 base = CONSISTENT_BASE; 155 156 do { 157 pgd = pgd_offset(&init_mm, base); 158 pmd = pmd_alloc(&init_mm, pgd, base); 159 if (!pmd) { 160 printk(KERN_ERR "%s: no pmd tables\n", __func__); 161 ret = -ENOMEM; 162 break; 163 } 164 WARN_ON(!pmd_none(*pmd)); 165 166 pte = pte_alloc_kernel(pmd, base); 167 if (!pte) { 168 printk(KERN_ERR "%s: no pte tables\n", __func__); 169 ret = -ENOMEM; 170 break; 171 } 172 173 consistent_pte[i++] = pte; 174 base += (1 << PGDIR_SHIFT); 175 } while (base < CONSISTENT_END); 176 177 return ret; 178 } 179 180 core_initcall(consistent_init); 181 182 static void * 183 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) 184 { 185 struct arm_vmregion *c; 186 size_t align; 187 int bit; 188 189 if (!consistent_pte[0]) { 190 printk(KERN_ERR "%s: not initialised\n", __func__); 191 dump_stack(); 192 return NULL; 193 } 194 195 /* 196 * Align the virtual region allocation - maximum alignment is 197 * a section size, minimum is a page size. This helps reduce 198 * fragmentation of the DMA space, and also prevents allocations 199 * smaller than a section from crossing a section boundary. 200 */ 201 bit = fls(size - 1) + 1; 202 if (bit > SECTION_SHIFT) 203 bit = SECTION_SHIFT; 204 align = 1 << bit; 205 206 /* 207 * Allocate a virtual address in the consistent mapping region. 208 */ 209 c = arm_vmregion_alloc(&consistent_head, align, size, 210 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 211 if (c) { 212 pte_t *pte; 213 int idx = CONSISTENT_PTE_INDEX(c->vm_start); 214 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 215 216 pte = consistent_pte[idx] + off; 217 c->vm_pages = page; 218 219 do { 220 BUG_ON(!pte_none(*pte)); 221 222 set_pte_ext(pte, mk_pte(page, prot), 0); 223 page++; 224 pte++; 225 off++; 226 if (off >= PTRS_PER_PTE) { 227 off = 0; 228 pte = consistent_pte[++idx]; 229 } 230 } while (size -= PAGE_SIZE); 231 232 return (void *)c->vm_start; 233 } 234 return NULL; 235 } 236 237 static void __dma_free_remap(void *cpu_addr, size_t size) 238 { 239 struct arm_vmregion *c; 240 unsigned long addr; 241 pte_t *ptep; 242 int idx; 243 u32 off; 244 245 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); 246 if (!c) { 247 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", 248 __func__, cpu_addr); 249 dump_stack(); 250 return; 251 } 252 253 if ((c->vm_end - c->vm_start) != size) { 254 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", 255 __func__, c->vm_end - c->vm_start, size); 256 dump_stack(); 257 size = c->vm_end - c->vm_start; 258 } 259 260 idx = CONSISTENT_PTE_INDEX(c->vm_start); 261 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 262 ptep = consistent_pte[idx] + off; 263 addr = c->vm_start; 264 do { 265 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); 266 267 ptep++; 268 addr += PAGE_SIZE; 269 off++; 270 if (off >= PTRS_PER_PTE) { 271 off = 0; 272 ptep = consistent_pte[++idx]; 273 } 274 275 if (pte_none(pte) || !pte_present(pte)) 276 printk(KERN_CRIT "%s: bad page in kernel page table\n", 277 __func__); 278 } while (size -= PAGE_SIZE); 279 280 flush_tlb_kernel_range(c->vm_start, c->vm_end); 281 282 arm_vmregion_free(&consistent_head, c); 283 } 284 285 #else /* !CONFIG_MMU */ 286 287 #define __dma_alloc_remap(page, size, gfp, prot) page_address(page) 288 #define __dma_free_remap(addr, size) do { } while (0) 289 290 #endif /* CONFIG_MMU */ 291 292 static void * 293 __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, 294 pgprot_t prot) 295 { 296 struct page *page; 297 void *addr; 298 299 *handle = ~0; 300 size = PAGE_ALIGN(size); 301 302 page = __dma_alloc_buffer(dev, size, gfp); 303 if (!page) 304 return NULL; 305 306 if (!arch_is_coherent()) 307 addr = __dma_alloc_remap(page, size, gfp, prot); 308 else 309 addr = page_address(page); 310 311 if (addr) 312 *handle = page_to_dma(dev, page); 313 314 return addr; 315 } 316 317 /* 318 * Allocate DMA-coherent memory space and return both the kernel remapped 319 * virtual and bus address for that space. 320 */ 321 void * 322 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 323 { 324 void *memory; 325 326 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 327 return memory; 328 329 return __dma_alloc(dev, size, handle, gfp, 330 pgprot_dmacoherent(pgprot_kernel)); 331 } 332 EXPORT_SYMBOL(dma_alloc_coherent); 333 334 /* 335 * Allocate a writecombining region, in much the same way as 336 * dma_alloc_coherent above. 337 */ 338 void * 339 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 340 { 341 return __dma_alloc(dev, size, handle, gfp, 342 pgprot_writecombine(pgprot_kernel)); 343 } 344 EXPORT_SYMBOL(dma_alloc_writecombine); 345 346 static int dma_mmap(struct device *dev, struct vm_area_struct *vma, 347 void *cpu_addr, dma_addr_t dma_addr, size_t size) 348 { 349 int ret = -ENXIO; 350 #ifdef CONFIG_MMU 351 unsigned long user_size, kern_size; 352 struct arm_vmregion *c; 353 354 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 355 356 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); 357 if (c) { 358 unsigned long off = vma->vm_pgoff; 359 360 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; 361 362 if (off < kern_size && 363 user_size <= (kern_size - off)) { 364 ret = remap_pfn_range(vma, vma->vm_start, 365 page_to_pfn(c->vm_pages) + off, 366 user_size << PAGE_SHIFT, 367 vma->vm_page_prot); 368 } 369 } 370 #endif /* CONFIG_MMU */ 371 372 return ret; 373 } 374 375 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 376 void *cpu_addr, dma_addr_t dma_addr, size_t size) 377 { 378 vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); 379 return dma_mmap(dev, vma, cpu_addr, dma_addr, size); 380 } 381 EXPORT_SYMBOL(dma_mmap_coherent); 382 383 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, 384 void *cpu_addr, dma_addr_t dma_addr, size_t size) 385 { 386 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 387 return dma_mmap(dev, vma, cpu_addr, dma_addr, size); 388 } 389 EXPORT_SYMBOL(dma_mmap_writecombine); 390 391 /* 392 * free a page as defined by the above mapping. 393 * Must not be called with IRQs disabled. 394 */ 395 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 396 { 397 WARN_ON(irqs_disabled()); 398 399 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 400 return; 401 402 size = PAGE_ALIGN(size); 403 404 if (!arch_is_coherent()) 405 __dma_free_remap(cpu_addr, size); 406 407 __dma_free_buffer(dma_to_page(dev, handle), size); 408 } 409 EXPORT_SYMBOL(dma_free_coherent); 410 411 /* 412 * Make an area consistent for devices. 413 * Note: Drivers should NOT use this function directly, as it will break 414 * platforms with CONFIG_DMABOUNCE. 415 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 416 */ 417 void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, 418 enum dma_data_direction dir) 419 { 420 unsigned long paddr; 421 422 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); 423 424 dmac_map_area(kaddr, size, dir); 425 426 paddr = __pa(kaddr); 427 if (dir == DMA_FROM_DEVICE) { 428 outer_inv_range(paddr, paddr + size); 429 } else { 430 outer_clean_range(paddr, paddr + size); 431 } 432 /* FIXME: non-speculating: flush on bidirectional mappings? */ 433 } 434 EXPORT_SYMBOL(___dma_single_cpu_to_dev); 435 436 void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, 437 enum dma_data_direction dir) 438 { 439 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); 440 441 /* FIXME: non-speculating: not required */ 442 /* don't bother invalidating if DMA to device */ 443 if (dir != DMA_TO_DEVICE) { 444 unsigned long paddr = __pa(kaddr); 445 outer_inv_range(paddr, paddr + size); 446 } 447 448 dmac_unmap_area(kaddr, size, dir); 449 } 450 EXPORT_SYMBOL(___dma_single_dev_to_cpu); 451 452 static void dma_cache_maint_page(struct page *page, unsigned long offset, 453 size_t size, enum dma_data_direction dir, 454 void (*op)(const void *, size_t, int)) 455 { 456 /* 457 * A single sg entry may refer to multiple physically contiguous 458 * pages. But we still need to process highmem pages individually. 459 * If highmem is not configured then the bulk of this loop gets 460 * optimized out. 461 */ 462 size_t left = size; 463 do { 464 size_t len = left; 465 void *vaddr; 466 467 if (PageHighMem(page)) { 468 if (len + offset > PAGE_SIZE) { 469 if (offset >= PAGE_SIZE) { 470 page += offset / PAGE_SIZE; 471 offset %= PAGE_SIZE; 472 } 473 len = PAGE_SIZE - offset; 474 } 475 vaddr = kmap_high_get(page); 476 if (vaddr) { 477 vaddr += offset; 478 op(vaddr, len, dir); 479 kunmap_high(page); 480 } else if (cache_is_vipt()) { 481 pte_t saved_pte; 482 vaddr = kmap_high_l1_vipt(page, &saved_pte); 483 op(vaddr + offset, len, dir); 484 kunmap_high_l1_vipt(page, saved_pte); 485 } 486 } else { 487 vaddr = page_address(page) + offset; 488 op(vaddr, len, dir); 489 } 490 offset = 0; 491 page++; 492 left -= len; 493 } while (left); 494 } 495 496 void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, 497 size_t size, enum dma_data_direction dir) 498 { 499 unsigned long paddr; 500 501 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 502 503 paddr = page_to_phys(page) + off; 504 if (dir == DMA_FROM_DEVICE) { 505 outer_inv_range(paddr, paddr + size); 506 } else { 507 outer_clean_range(paddr, paddr + size); 508 } 509 /* FIXME: non-speculating: flush on bidirectional mappings? */ 510 } 511 EXPORT_SYMBOL(___dma_page_cpu_to_dev); 512 513 void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, 514 size_t size, enum dma_data_direction dir) 515 { 516 unsigned long paddr = page_to_phys(page) + off; 517 518 /* FIXME: non-speculating: not required */ 519 /* don't bother invalidating if DMA to device */ 520 if (dir != DMA_TO_DEVICE) 521 outer_inv_range(paddr, paddr + size); 522 523 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 524 } 525 EXPORT_SYMBOL(___dma_page_dev_to_cpu); 526 527 /** 528 * dma_map_sg - map a set of SG buffers for streaming mode DMA 529 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 530 * @sg: list of buffers 531 * @nents: number of buffers to map 532 * @dir: DMA transfer direction 533 * 534 * Map a set of buffers described by scatterlist in streaming mode for DMA. 535 * This is the scatter-gather version of the dma_map_single interface. 536 * Here the scatter gather list elements are each tagged with the 537 * appropriate dma address and length. They are obtained via 538 * sg_dma_{address,length}. 539 * 540 * Device ownership issues as mentioned for dma_map_single are the same 541 * here. 542 */ 543 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 544 enum dma_data_direction dir) 545 { 546 struct scatterlist *s; 547 int i, j; 548 549 for_each_sg(sg, s, nents, i) { 550 s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 551 s->length, dir); 552 if (dma_mapping_error(dev, s->dma_address)) 553 goto bad_mapping; 554 } 555 return nents; 556 557 bad_mapping: 558 for_each_sg(sg, s, i, j) 559 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 560 return 0; 561 } 562 EXPORT_SYMBOL(dma_map_sg); 563 564 /** 565 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 566 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 567 * @sg: list of buffers 568 * @nents: number of buffers to unmap (returned from dma_map_sg) 569 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 570 * 571 * Unmap a set of streaming mode DMA translations. Again, CPU access 572 * rules concerning calls here are the same as for dma_unmap_single(). 573 */ 574 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 575 enum dma_data_direction dir) 576 { 577 struct scatterlist *s; 578 int i; 579 580 for_each_sg(sg, s, nents, i) 581 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 582 } 583 EXPORT_SYMBOL(dma_unmap_sg); 584 585 /** 586 * dma_sync_sg_for_cpu 587 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 588 * @sg: list of buffers 589 * @nents: number of buffers to map (returned from dma_map_sg) 590 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 591 */ 592 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 593 int nents, enum dma_data_direction dir) 594 { 595 struct scatterlist *s; 596 int i; 597 598 for_each_sg(sg, s, nents, i) { 599 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, 600 sg_dma_len(s), dir)) 601 continue; 602 603 __dma_page_dev_to_cpu(sg_page(s), s->offset, 604 s->length, dir); 605 } 606 } 607 EXPORT_SYMBOL(dma_sync_sg_for_cpu); 608 609 /** 610 * dma_sync_sg_for_device 611 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 612 * @sg: list of buffers 613 * @nents: number of buffers to map (returned from dma_map_sg) 614 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 615 */ 616 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 617 int nents, enum dma_data_direction dir) 618 { 619 struct scatterlist *s; 620 int i; 621 622 for_each_sg(sg, s, nents, i) { 623 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, 624 sg_dma_len(s), dir)) 625 continue; 626 627 __dma_page_cpu_to_dev(sg_page(s), s->offset, 628 s->length, dir); 629 } 630 } 631 EXPORT_SYMBOL(dma_sync_sg_for_device); 632