1 /* 2 * PowerPC version derived from arch/arm/mm/consistent.c 3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) 4 * 5 * Copyright (C) 2000 Russell King 6 * 7 * Consistent memory allocators. Used for DMA devices that want to 8 * share uncached memory with the processor core. The function return 9 * is the virtual address and 'dma_handle' is the physical address. 10 * Mostly stolen from the ARM port, with some changes for PowerPC. 11 * -- Dan 12 * 13 * Reorganized to get rid of the arch-specific consistent_* functions 14 * and provide non-coherent implementations for the DMA API. -Matt 15 * 16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() 17 * implementation. This is pulled straight from ARM and barely 18 * modified. -Matt 19 * 20 * This program is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License version 2 as 22 * published by the Free Software Foundation. 23 */ 24 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/kernel.h> 28 #include <linux/errno.h> 29 #include <linux/string.h> 30 #include <linux/types.h> 31 #include <linux/highmem.h> 32 #include <linux/dma-direct.h> 33 #include <linux/dma-noncoherent.h> 34 #include <linux/export.h> 35 36 #include <asm/tlbflush.h> 37 #include <asm/dma.h> 38 39 #include <mm/mmu_decl.h> 40 41 /* 42 * This address range defaults to a value that is safe for all 43 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It 44 * can be further configured for specific applications under 45 * the "Advanced Setup" menu. -Matt 46 */ 47 #define CONSISTENT_BASE (IOREMAP_TOP) 48 #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE) 49 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 50 51 /* 52 * This is the page table (2MB) covering uncached, DMA consistent allocations 53 */ 54 static DEFINE_SPINLOCK(consistent_lock); 55 56 /* 57 * VM region handling support. 58 * 59 * This should become something generic, handling VM region allocations for 60 * vmalloc and similar (ioremap, module space, etc). 61 * 62 * I envisage vmalloc()'s supporting vm_struct becoming: 63 * 64 * struct vm_struct { 65 * struct vm_region region; 66 * unsigned long flags; 67 * struct page **pages; 68 * unsigned int nr_pages; 69 * unsigned long phys_addr; 70 * }; 71 * 72 * get_vm_area() would then call vm_region_alloc with an appropriate 73 * struct vm_region head (eg): 74 * 75 * struct vm_region vmalloc_head = { 76 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), 77 * .vm_start = VMALLOC_START, 78 * .vm_end = VMALLOC_END, 79 * }; 80 * 81 * However, vmalloc_head.vm_start is variable (typically, it is dependent on 82 * the amount of RAM found at boot time.) I would imagine that get_vm_area() 83 * would have to initialise this each time prior to calling vm_region_alloc(). 84 */ 85 struct ppc_vm_region { 86 struct list_head vm_list; 87 unsigned long vm_start; 88 unsigned long vm_end; 89 }; 90 91 static struct ppc_vm_region consistent_head = { 92 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), 93 .vm_start = CONSISTENT_BASE, 94 .vm_end = CONSISTENT_END, 95 }; 96 97 static struct ppc_vm_region * 98 ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) 99 { 100 unsigned long addr = head->vm_start, end = head->vm_end - size; 101 unsigned long flags; 102 struct ppc_vm_region *c, *new; 103 104 new = kmalloc(sizeof(struct ppc_vm_region), gfp); 105 if (!new) 106 goto out; 107 108 spin_lock_irqsave(&consistent_lock, flags); 109 110 list_for_each_entry(c, &head->vm_list, vm_list) { 111 if ((addr + size) < addr) 112 goto nospc; 113 if ((addr + size) <= c->vm_start) 114 goto found; 115 addr = c->vm_end; 116 if (addr > end) 117 goto nospc; 118 } 119 120 found: 121 /* 122 * Insert this entry _before_ the one we found. 123 */ 124 list_add_tail(&new->vm_list, &c->vm_list); 125 new->vm_start = addr; 126 new->vm_end = addr + size; 127 128 spin_unlock_irqrestore(&consistent_lock, flags); 129 return new; 130 131 nospc: 132 spin_unlock_irqrestore(&consistent_lock, flags); 133 kfree(new); 134 out: 135 return NULL; 136 } 137 138 static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr) 139 { 140 struct ppc_vm_region *c; 141 142 list_for_each_entry(c, &head->vm_list, vm_list) { 143 if (c->vm_start == addr) 144 goto out; 145 } 146 c = NULL; 147 out: 148 return c; 149 } 150 151 /* 152 * Allocate DMA-coherent memory space and return both the kernel remapped 153 * virtual and bus address for that space. 154 */ 155 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 156 gfp_t gfp, unsigned long attrs) 157 { 158 struct page *page; 159 struct ppc_vm_region *c; 160 unsigned long order; 161 u64 mask = ISA_DMA_THRESHOLD, limit; 162 163 if (dev) { 164 mask = dev->coherent_dma_mask; 165 166 /* 167 * Sanity check the DMA mask - it must be non-zero, and 168 * must be able to be satisfied by a DMA allocation. 169 */ 170 if (mask == 0) { 171 dev_warn(dev, "coherent DMA mask is unset\n"); 172 goto no_page; 173 } 174 175 if ((~mask) & ISA_DMA_THRESHOLD) { 176 dev_warn(dev, "coherent DMA mask %#llx is smaller " 177 "than system GFP_DMA mask %#llx\n", 178 mask, (unsigned long long)ISA_DMA_THRESHOLD); 179 goto no_page; 180 } 181 } 182 183 184 size = PAGE_ALIGN(size); 185 limit = (mask + 1) & ~mask; 186 if ((limit && size >= limit) || 187 size >= (CONSISTENT_END - CONSISTENT_BASE)) { 188 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", 189 size, mask); 190 return NULL; 191 } 192 193 order = get_order(size); 194 195 /* Might be useful if we ever have a real legacy DMA zone... */ 196 if (mask != 0xffffffff) 197 gfp |= GFP_DMA; 198 199 page = alloc_pages(gfp, order); 200 if (!page) 201 goto no_page; 202 203 /* 204 * Invalidate any data that might be lurking in the 205 * kernel direct-mapped region for device DMA. 206 */ 207 { 208 unsigned long kaddr = (unsigned long)page_address(page); 209 memset(page_address(page), 0, size); 210 flush_dcache_range(kaddr, kaddr + size); 211 } 212 213 /* 214 * Allocate a virtual address in the consistent mapping region. 215 */ 216 c = ppc_vm_region_alloc(&consistent_head, size, 217 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 218 if (c) { 219 unsigned long vaddr = c->vm_start; 220 struct page *end = page + (1 << order); 221 222 split_page(page, order); 223 224 /* 225 * Set the "dma handle" 226 */ 227 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 228 229 do { 230 SetPageReserved(page); 231 map_kernel_page(vaddr, page_to_phys(page), 232 pgprot_noncached(PAGE_KERNEL)); 233 page++; 234 vaddr += PAGE_SIZE; 235 } while (size -= PAGE_SIZE); 236 237 /* 238 * Free the otherwise unused pages. 239 */ 240 while (page < end) { 241 __free_page(page); 242 page++; 243 } 244 245 return (void *)c->vm_start; 246 } 247 248 if (page) 249 __free_pages(page, order); 250 no_page: 251 return NULL; 252 } 253 254 /* 255 * free a page as defined by the above mapping. 256 */ 257 void arch_dma_free(struct device *dev, size_t size, void *vaddr, 258 dma_addr_t dma_handle, unsigned long attrs) 259 { 260 struct ppc_vm_region *c; 261 unsigned long flags, addr; 262 263 size = PAGE_ALIGN(size); 264 265 spin_lock_irqsave(&consistent_lock, flags); 266 267 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr); 268 if (!c) 269 goto no_area; 270 271 if ((c->vm_end - c->vm_start) != size) { 272 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", 273 __func__, c->vm_end - c->vm_start, size); 274 dump_stack(); 275 size = c->vm_end - c->vm_start; 276 } 277 278 addr = c->vm_start; 279 do { 280 pte_t *ptep; 281 unsigned long pfn; 282 283 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr), 284 addr), 285 addr), 286 addr); 287 if (!pte_none(*ptep) && pte_present(*ptep)) { 288 pfn = pte_pfn(*ptep); 289 pte_clear(&init_mm, addr, ptep); 290 if (pfn_valid(pfn)) { 291 struct page *page = pfn_to_page(pfn); 292 __free_reserved_page(page); 293 } 294 } 295 addr += PAGE_SIZE; 296 } while (size -= PAGE_SIZE); 297 298 flush_tlb_kernel_range(c->vm_start, c->vm_end); 299 300 list_del(&c->vm_list); 301 302 spin_unlock_irqrestore(&consistent_lock, flags); 303 304 kfree(c); 305 return; 306 307 no_area: 308 spin_unlock_irqrestore(&consistent_lock, flags); 309 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", 310 __func__, vaddr); 311 dump_stack(); 312 } 313 314 /* 315 * make an area consistent. 316 */ 317 static void __dma_sync(void *vaddr, size_t size, int direction) 318 { 319 unsigned long start = (unsigned long)vaddr; 320 unsigned long end = start + size; 321 322 switch (direction) { 323 case DMA_NONE: 324 BUG(); 325 case DMA_FROM_DEVICE: 326 /* 327 * invalidate only when cache-line aligned otherwise there is 328 * the potential for discarding uncommitted data from the cache 329 */ 330 if ((start | end) & (L1_CACHE_BYTES - 1)) 331 flush_dcache_range(start, end); 332 else 333 invalidate_dcache_range(start, end); 334 break; 335 case DMA_TO_DEVICE: /* writeback only */ 336 clean_dcache_range(start, end); 337 break; 338 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 339 flush_dcache_range(start, end); 340 break; 341 } 342 } 343 344 #ifdef CONFIG_HIGHMEM 345 /* 346 * __dma_sync_page() implementation for systems using highmem. 347 * In this case, each page of a buffer must be kmapped/kunmapped 348 * in order to have a virtual address for __dma_sync(). This must 349 * not sleep so kmap_atomic()/kunmap_atomic() are used. 350 * 351 * Note: yes, it is possible and correct to have a buffer extend 352 * beyond the first page. 353 */ 354 static inline void __dma_sync_page_highmem(struct page *page, 355 unsigned long offset, size_t size, int direction) 356 { 357 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); 358 size_t cur_size = seg_size; 359 unsigned long flags, start, seg_offset = offset; 360 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; 361 int seg_nr = 0; 362 363 local_irq_save(flags); 364 365 do { 366 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset; 367 368 /* Sync this buffer segment */ 369 __dma_sync((void *)start, seg_size, direction); 370 kunmap_atomic((void *)start); 371 seg_nr++; 372 373 /* Calculate next buffer segment size */ 374 seg_size = min((size_t)PAGE_SIZE, size - cur_size); 375 376 /* Add the segment size to our running total */ 377 cur_size += seg_size; 378 seg_offset = 0; 379 } while (seg_nr < nr_segs); 380 381 local_irq_restore(flags); 382 } 383 #endif /* CONFIG_HIGHMEM */ 384 385 /* 386 * __dma_sync_page makes memory consistent. identical to __dma_sync, but 387 * takes a struct page instead of a virtual address 388 */ 389 static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir) 390 { 391 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); 392 unsigned offset = paddr & ~PAGE_MASK; 393 394 #ifdef CONFIG_HIGHMEM 395 __dma_sync_page_highmem(page, offset, size, dir); 396 #else 397 unsigned long start = (unsigned long)page_address(page) + offset; 398 __dma_sync((void *)start, size, dir); 399 #endif 400 } 401 402 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 403 size_t size, enum dma_data_direction dir) 404 { 405 __dma_sync_page(paddr, size, dir); 406 } 407 408 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 409 size_t size, enum dma_data_direction dir) 410 { 411 __dma_sync_page(paddr, size, dir); 412 } 413 414 /* 415 * Return the PFN for a given cpu virtual address returned by arch_dma_alloc. 416 */ 417 long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr, 418 dma_addr_t dma_addr) 419 { 420 /* This should always be populated, so we don't test every 421 * level. If that fails, we'll have a nice crash which 422 * will be as good as a BUG_ON() 423 */ 424 unsigned long cpu_addr = (unsigned long)vaddr; 425 pgd_t *pgd = pgd_offset_k(cpu_addr); 426 pud_t *pud = pud_offset(pgd, cpu_addr); 427 pmd_t *pmd = pmd_offset(pud, cpu_addr); 428 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr); 429 430 if (pte_none(*ptep) || !pte_present(*ptep)) 431 return 0; 432 return pte_pfn(*ptep); 433 } 434