xref: /openbmc/linux/arch/openrisc/kernel/dma.c (revision a520110e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * OpenRISC Linux
4  *
5  * Linux architectural port borrowing liberally from similar works of
6  * others.  All original copyrights apply as per the original source
7  * declaration.
8  *
9  * Modifications for the OpenRISC architecture:
10  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12  *
13  * DMA mapping callbacks...
14  * As alloc_coherent is the only DMA callback being used currently, that's
15  * the only thing implemented properly.  The rest need looking into...
16  */
17 
18 #include <linux/dma-noncoherent.h>
19 #include <linux/pagewalk.h>
20 
21 #include <asm/cpuinfo.h>
22 #include <asm/spr_defs.h>
23 #include <asm/tlbflush.h>
24 
25 static int
26 page_set_nocache(pte_t *pte, unsigned long addr,
27 		 unsigned long next, struct mm_walk *walk)
28 {
29 	unsigned long cl;
30 	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
31 
32 	pte_val(*pte) |= _PAGE_CI;
33 
34 	/*
35 	 * Flush the page out of the TLB so that the new page flags get
36 	 * picked up next time there's an access
37 	 */
38 	flush_tlb_page(NULL, addr);
39 
40 	/* Flush page out of dcache */
41 	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
42 		mtspr(SPR_DCBFR, cl);
43 
44 	return 0;
45 }
46 
47 static int
48 page_clear_nocache(pte_t *pte, unsigned long addr,
49 		   unsigned long next, struct mm_walk *walk)
50 {
51 	pte_val(*pte) &= ~_PAGE_CI;
52 
53 	/*
54 	 * Flush the page out of the TLB so that the new page flags get
55 	 * picked up next time there's an access
56 	 */
57 	flush_tlb_page(NULL, addr);
58 
59 	return 0;
60 }
61 
62 /*
63  * Alloc "coherent" memory, which for OpenRISC means simply uncached.
64  *
65  * This function effectively just calls __get_free_pages, sets the
66  * cache-inhibit bit on those pages, and makes sure that the pages are
67  * flushed out of the cache before they are used.
68  *
69  * If the NON_CONSISTENT attribute is set, then this function just
70  * returns "normal", cachable memory.
71  *
72  * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
73  * into consideration here, too.  All current known implementations of
74  * the OR1K support only strongly ordered memory accesses, so that flag
75  * is being ignored for now; uncached but write-combined memory is a
76  * missing feature of the OR1K.
77  */
78 void *
79 arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
80 		gfp_t gfp, unsigned long attrs)
81 {
82 	unsigned long va;
83 	void *page;
84 	struct mm_walk walk = {
85 		.pte_entry = page_set_nocache,
86 		.mm = &init_mm
87 	};
88 
89 	page = alloc_pages_exact(size, gfp | __GFP_ZERO);
90 	if (!page)
91 		return NULL;
92 
93 	/* This gives us the real physical address of the first page. */
94 	*dma_handle = __pa(page);
95 
96 	va = (unsigned long)page;
97 
98 	/*
99 	 * We need to iterate through the pages, clearing the dcache for
100 	 * them and setting the cache-inhibit bit.
101 	 */
102 	if (walk_page_range(va, va + size, &walk)) {
103 		free_pages_exact(page, size);
104 		return NULL;
105 	}
106 
107 	return (void *)va;
108 }
109 
110 void
111 arch_dma_free(struct device *dev, size_t size, void *vaddr,
112 		dma_addr_t dma_handle, unsigned long attrs)
113 {
114 	unsigned long va = (unsigned long)vaddr;
115 	struct mm_walk walk = {
116 		.pte_entry = page_clear_nocache,
117 		.mm = &init_mm
118 	};
119 
120 	/* walk_page_range shouldn't be able to fail here */
121 	WARN_ON(walk_page_range(va, va + size, &walk));
122 
123 	free_pages_exact(vaddr, size);
124 }
125 
126 void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
127 		enum dma_data_direction dir)
128 {
129 	unsigned long cl;
130 	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
131 
132 	switch (dir) {
133 	case DMA_TO_DEVICE:
134 		/* Flush the dcache for the requested range */
135 		for (cl = addr; cl < addr + size;
136 		     cl += cpuinfo->dcache_block_size)
137 			mtspr(SPR_DCBFR, cl);
138 		break;
139 	case DMA_FROM_DEVICE:
140 		/* Invalidate the dcache for the requested range */
141 		for (cl = addr; cl < addr + size;
142 		     cl += cpuinfo->dcache_block_size)
143 			mtspr(SPR_DCBIR, cl);
144 		break;
145 	default:
146 		/*
147 		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
148 		 * flush nor invalidate the cache here as the area will need
149 		 * to be manually synced anyway.
150 		 */
151 		break;
152 	}
153 }
154