1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * OpenRISC Linux 4 * 5 * Linux architectural port borrowing liberally from similar works of 6 * others. All original copyrights apply as per the original source 7 * declaration. 8 * 9 * Modifications for the OpenRISC architecture: 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 12 * 13 * DMA mapping callbacks... 14 * As alloc_coherent is the only DMA callback being used currently, that's 15 * the only thing implemented properly. The rest need looking into... 16 */ 17 18 #include <linux/dma-noncoherent.h> 19 20 #include <asm/cpuinfo.h> 21 #include <asm/spr_defs.h> 22 #include <asm/tlbflush.h> 23 24 static int 25 page_set_nocache(pte_t *pte, unsigned long addr, 26 unsigned long next, struct mm_walk *walk) 27 { 28 unsigned long cl; 29 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; 30 31 pte_val(*pte) |= _PAGE_CI; 32 33 /* 34 * Flush the page out of the TLB so that the new page flags get 35 * picked up next time there's an access 36 */ 37 flush_tlb_page(NULL, addr); 38 39 /* Flush page out of dcache */ 40 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) 41 mtspr(SPR_DCBFR, cl); 42 43 return 0; 44 } 45 46 static int 47 page_clear_nocache(pte_t *pte, unsigned long addr, 48 unsigned long next, struct mm_walk *walk) 49 { 50 pte_val(*pte) &= ~_PAGE_CI; 51 52 /* 53 * Flush the page out of the TLB so that the new page flags get 54 * picked up next time there's an access 55 */ 56 flush_tlb_page(NULL, addr); 57 58 return 0; 59 } 60 61 /* 62 * Alloc "coherent" memory, which for OpenRISC means simply uncached. 63 * 64 * This function effectively just calls __get_free_pages, sets the 65 * cache-inhibit bit on those pages, and makes sure that the pages are 66 * flushed out of the cache before they are used. 67 * 68 * If the NON_CONSISTENT attribute is set, then this function just 69 * returns "normal", cachable memory. 70 * 71 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take 72 * into consideration here, too. All current known implementations of 73 * the OR1K support only strongly ordered memory accesses, so that flag 74 * is being ignored for now; uncached but write-combined memory is a 75 * missing feature of the OR1K. 76 */ 77 void * 78 arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 79 gfp_t gfp, unsigned long attrs) 80 { 81 unsigned long va; 82 void *page; 83 struct mm_walk walk = { 84 .pte_entry = page_set_nocache, 85 .mm = &init_mm 86 }; 87 88 page = alloc_pages_exact(size, gfp | __GFP_ZERO); 89 if (!page) 90 return NULL; 91 92 /* This gives us the real physical address of the first page. */ 93 *dma_handle = __pa(page); 94 95 va = (unsigned long)page; 96 97 /* 98 * We need to iterate through the pages, clearing the dcache for 99 * them and setting the cache-inhibit bit. 100 */ 101 if (walk_page_range(va, va + size, &walk)) { 102 free_pages_exact(page, size); 103 return NULL; 104 } 105 106 return (void *)va; 107 } 108 109 void 110 arch_dma_free(struct device *dev, size_t size, void *vaddr, 111 dma_addr_t dma_handle, unsigned long attrs) 112 { 113 unsigned long va = (unsigned long)vaddr; 114 struct mm_walk walk = { 115 .pte_entry = page_clear_nocache, 116 .mm = &init_mm 117 }; 118 119 /* walk_page_range shouldn't be able to fail here */ 120 WARN_ON(walk_page_range(va, va + size, &walk)); 121 122 free_pages_exact(vaddr, size); 123 } 124 125 void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size, 126 enum dma_data_direction dir) 127 { 128 unsigned long cl; 129 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; 130 131 switch (dir) { 132 case DMA_TO_DEVICE: 133 /* Flush the dcache for the requested range */ 134 for (cl = addr; cl < addr + size; 135 cl += cpuinfo->dcache_block_size) 136 mtspr(SPR_DCBFR, cl); 137 break; 138 case DMA_FROM_DEVICE: 139 /* Invalidate the dcache for the requested range */ 140 for (cl = addr; cl < addr + size; 141 cl += cpuinfo->dcache_block_size) 142 mtspr(SPR_DCBIR, cl); 143 break; 144 default: 145 /* 146 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to 147 * flush nor invalidate the cache here as the area will need 148 * to be manually synced anyway. 149 */ 150 break; 151 } 152 } 153