xref: /openbmc/linux/arch/microblaze/mm/consistent.c (revision 95e9fd10)
1 /*
2  * Microblaze support for cache consistent memory.
3  * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4  * Copyright (C) 2010 PetaLogix
5  * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6  *
7  * Based on PowerPC version derived from arch/arm/mm/consistent.c
8  * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9  * Copyright (C) 2000 Russell King
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
25 #include <linux/mm.h>
26 #include <linux/swap.h>
27 #include <linux/stddef.h>
28 #include <linux/vmalloc.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/bootmem.h>
32 #include <linux/highmem.h>
33 #include <linux/pci.h>
34 #include <linux/interrupt.h>
35 #include <linux/gfp.h>
36 
37 #include <asm/pgalloc.h>
38 #include <linux/io.h>
39 #include <linux/hardirq.h>
40 #include <asm/mmu_context.h>
41 #include <asm/mmu.h>
42 #include <linux/uaccess.h>
43 #include <asm/pgtable.h>
44 #include <asm/cpuinfo.h>
45 #include <asm/tlbflush.h>
46 
47 #ifndef CONFIG_MMU
48 /* I have to use dcache values because I can't relate on ram size */
49 # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
50 #endif
51 
52 /*
53  * Consistent memory allocators. Used for DMA devices that want to
54  * share uncached memory with the processor core.
55  * My crufty no-MMU approach is simple. In the HW platform we can optionally
56  * mirror the DDR up above the processor cacheable region.  So, memory accessed
57  * in this mirror region will not be cached.  It's alloced from the same
58  * pool as normal memory, but the handle we return is shifted up into the
59  * uncached region.  This will no doubt cause big problems if memory allocated
60  * here is not also freed properly. -- JW
61  */
62 void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
63 {
64 	unsigned long order, vaddr;
65 	void *ret;
66 	unsigned int i, err = 0;
67 	struct page *page, *end;
68 
69 #ifdef CONFIG_MMU
70 	phys_addr_t pa;
71 	struct vm_struct *area;
72 	unsigned long va;
73 #endif
74 
75 	if (in_interrupt())
76 		BUG();
77 
78 	/* Only allocate page size areas. */
79 	size = PAGE_ALIGN(size);
80 	order = get_order(size);
81 
82 	vaddr = __get_free_pages(gfp, order);
83 	if (!vaddr)
84 		return NULL;
85 
86 	/*
87 	 * we need to ensure that there are no cachelines in use,
88 	 * or worse dirty in this area.
89 	 */
90 	flush_dcache_range(virt_to_phys((void *)vaddr),
91 					virt_to_phys((void *)vaddr) + size);
92 
93 #ifndef CONFIG_MMU
94 	ret = (void *)vaddr;
95 	/*
96 	 * Here's the magic!  Note if the uncached shadow is not implemented,
97 	 * it's up to the calling code to also test that condition and make
98 	 * other arranegments, such as manually flushing the cache and so on.
99 	 */
100 # ifdef CONFIG_XILINX_UNCACHED_SHADOW
101 	ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
102 # endif
103 	if ((unsigned int)ret > cpuinfo.dcache_base &&
104 				(unsigned int)ret < cpuinfo.dcache_high)
105 		printk(KERN_WARNING
106 			"ERROR: Your cache coherent area is CACHED!!!\n");
107 
108 	/* dma_handle is same as physical (shadowed) address */
109 	*dma_handle = (dma_addr_t)ret;
110 #else
111 	/* Allocate some common virtual space to map the new pages. */
112 	area = get_vm_area(size, VM_ALLOC);
113 	if (!area) {
114 		free_pages(vaddr, order);
115 		return NULL;
116 	}
117 	va = (unsigned long) area->addr;
118 	ret = (void *)va;
119 
120 	/* This gives us the real physical address of the first page. */
121 	*dma_handle = pa = virt_to_bus((void *)vaddr);
122 #endif
123 
124 	/*
125 	 * free wasted pages.  We skip the first page since we know
126 	 * that it will have count = 1 and won't require freeing.
127 	 * We also mark the pages in use as reserved so that
128 	 * remap_page_range works.
129 	 */
130 	page = virt_to_page(vaddr);
131 	end = page + (1 << order);
132 
133 	split_page(page, order);
134 
135 	for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
136 #ifdef CONFIG_MMU
137 		/* MS: This is the whole magic - use cache inhibit pages */
138 		err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
139 #endif
140 
141 		SetPageReserved(page);
142 		page++;
143 	}
144 
145 	/* Free the otherwise unused pages. */
146 	while (page < end) {
147 		__free_page(page);
148 		page++;
149 	}
150 
151 	if (err) {
152 		free_pages(vaddr, order);
153 		return NULL;
154 	}
155 
156 	return ret;
157 }
158 EXPORT_SYMBOL(consistent_alloc);
159 
160 /*
161  * free page(s) as defined by the above mapping.
162  */
163 void consistent_free(size_t size, void *vaddr)
164 {
165 	struct page *page;
166 
167 	if (in_interrupt())
168 		BUG();
169 
170 	size = PAGE_ALIGN(size);
171 
172 #ifndef CONFIG_MMU
173 	/* Clear SHADOW_MASK bit in address, and free as per usual */
174 # ifdef CONFIG_XILINX_UNCACHED_SHADOW
175 	vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
176 # endif
177 	page = virt_to_page(vaddr);
178 
179 	do {
180 		ClearPageReserved(page);
181 		__free_page(page);
182 		page++;
183 	} while (size -= PAGE_SIZE);
184 #else
185 	do {
186 		pte_t *ptep;
187 		unsigned long pfn;
188 
189 		ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
190 						(unsigned int)vaddr),
191 					(unsigned int)vaddr),
192 				(unsigned int)vaddr);
193 		if (!pte_none(*ptep) && pte_present(*ptep)) {
194 			pfn = pte_pfn(*ptep);
195 			pte_clear(&init_mm, (unsigned int)vaddr, ptep);
196 			if (pfn_valid(pfn)) {
197 				page = pfn_to_page(pfn);
198 
199 				ClearPageReserved(page);
200 				__free_page(page);
201 			}
202 		}
203 		vaddr += PAGE_SIZE;
204 	} while (size -= PAGE_SIZE);
205 
206 	/* flush tlb */
207 	flush_tlb_all();
208 #endif
209 }
210 EXPORT_SYMBOL(consistent_free);
211 
212 /*
213  * make an area consistent.
214  */
215 void consistent_sync(void *vaddr, size_t size, int direction)
216 {
217 	unsigned long start;
218 	unsigned long end;
219 
220 	start = (unsigned long)vaddr;
221 
222 	/* Convert start address back down to unshadowed memory region */
223 #ifdef CONFIG_XILINX_UNCACHED_SHADOW
224 	start &= ~UNCACHED_SHADOW_MASK;
225 #endif
226 	end = start + size;
227 
228 	switch (direction) {
229 	case PCI_DMA_NONE:
230 		BUG();
231 	case PCI_DMA_FROMDEVICE:	/* invalidate only */
232 		invalidate_dcache_range(start, end);
233 		break;
234 	case PCI_DMA_TODEVICE:		/* writeback only */
235 		flush_dcache_range(start, end);
236 		break;
237 	case PCI_DMA_BIDIRECTIONAL:	/* writeback and invalidate */
238 		flush_dcache_range(start, end);
239 		break;
240 	}
241 }
242 EXPORT_SYMBOL(consistent_sync);
243 
244 /*
245  * consistent_sync_page makes memory consistent. identical
246  * to consistent_sync, but takes a struct page instead of a
247  * virtual address
248  */
249 void consistent_sync_page(struct page *page, unsigned long offset,
250 	size_t size, int direction)
251 {
252 	unsigned long start = (unsigned long)page_address(page) + offset;
253 	consistent_sync((void *)start, size, direction);
254 }
255 EXPORT_SYMBOL(consistent_sync_page);
256