1 /* 2 * OpenRISC Linux 3 * 4 * Linux architectural port borrowing liberally from similar works of 5 * others. All original copyrights apply as per the original source 6 * declaration. 7 * 8 * Modifications for the OpenRISC architecture: 9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 * 17 * DMA mapping callbacks... 18 * As alloc_coherent is the only DMA callback being used currently, that's 19 * the only thing implemented properly. The rest need looking into... 20 */ 21 22 #include <linux/dma-noncoherent.h> 23 24 #include <asm/cpuinfo.h> 25 #include <asm/spr_defs.h> 26 #include <asm/tlbflush.h> 27 28 static int 29 page_set_nocache(pte_t *pte, unsigned long addr, 30 unsigned long next, struct mm_walk *walk) 31 { 32 unsigned long cl; 33 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; 34 35 pte_val(*pte) |= _PAGE_CI; 36 37 /* 38 * Flush the page out of the TLB so that the new page flags get 39 * picked up next time there's an access 40 */ 41 flush_tlb_page(NULL, addr); 42 43 /* Flush page out of dcache */ 44 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) 45 mtspr(SPR_DCBFR, cl); 46 47 return 0; 48 } 49 50 static int 51 page_clear_nocache(pte_t *pte, unsigned long addr, 52 unsigned long next, struct mm_walk *walk) 53 { 54 pte_val(*pte) &= ~_PAGE_CI; 55 56 /* 57 * Flush the page out of the TLB so that the new page flags get 58 * picked up next time there's an access 59 */ 60 flush_tlb_page(NULL, addr); 61 62 return 0; 63 } 64 65 /* 66 * Alloc "coherent" memory, which for OpenRISC means simply uncached. 67 * 68 * This function effectively just calls __get_free_pages, sets the 69 * cache-inhibit bit on those pages, and makes sure that the pages are 70 * flushed out of the cache before they are used. 71 * 72 * If the NON_CONSISTENT attribute is set, then this function just 73 * returns "normal", cachable memory. 74 * 75 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take 76 * into consideration here, too. All current known implementations of 77 * the OR1K support only strongly ordered memory accesses, so that flag 78 * is being ignored for now; uncached but write-combined memory is a 79 * missing feature of the OR1K. 80 */ 81 void * 82 arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 83 gfp_t gfp, unsigned long attrs) 84 { 85 unsigned long va; 86 void *page; 87 struct mm_walk walk = { 88 .pte_entry = page_set_nocache, 89 .mm = &init_mm 90 }; 91 92 page = alloc_pages_exact(size, gfp | __GFP_ZERO); 93 if (!page) 94 return NULL; 95 96 /* This gives us the real physical address of the first page. */ 97 *dma_handle = __pa(page); 98 99 va = (unsigned long)page; 100 101 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { 102 /* 103 * We need to iterate through the pages, clearing the dcache for 104 * them and setting the cache-inhibit bit. 105 */ 106 if (walk_page_range(va, va + size, &walk)) { 107 free_pages_exact(page, size); 108 return NULL; 109 } 110 } 111 112 return (void *)va; 113 } 114 115 void 116 arch_dma_free(struct device *dev, size_t size, void *vaddr, 117 dma_addr_t dma_handle, unsigned long attrs) 118 { 119 unsigned long va = (unsigned long)vaddr; 120 struct mm_walk walk = { 121 .pte_entry = page_clear_nocache, 122 .mm = &init_mm 123 }; 124 125 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { 126 /* walk_page_range shouldn't be able to fail here */ 127 WARN_ON(walk_page_range(va, va + size, &walk)); 128 } 129 130 free_pages_exact(vaddr, size); 131 } 132 133 void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size, 134 enum dma_data_direction dir) 135 { 136 unsigned long cl; 137 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; 138 139 switch (dir) { 140 case DMA_TO_DEVICE: 141 /* Flush the dcache for the requested range */ 142 for (cl = addr; cl < addr + size; 143 cl += cpuinfo->dcache_block_size) 144 mtspr(SPR_DCBFR, cl); 145 break; 146 case DMA_FROM_DEVICE: 147 /* Invalidate the dcache for the requested range */ 148 for (cl = addr; cl < addr + size; 149 cl += cpuinfo->dcache_block_size) 150 mtspr(SPR_DCBIR, cl); 151 break; 152 default: 153 /* 154 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to 155 * flush nor invalidate the cache here as the area will need 156 * to be manually synced anyway. 157 */ 158 break; 159 } 160 } 161