1 /*
2  *  PowerPC version derived from arch/arm/mm/consistent.c
3  *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4  *
5  *  Copyright (C) 2000 Russell King
6  *
7  * Consistent memory allocators.  Used for DMA devices that want to
8  * share uncached memory with the processor core.  The function return
9  * is the virtual address and 'dma_handle' is the physical address.
10  * Mostly stolen from the ARM port, with some changes for PowerPC.
11  *						-- Dan
12  *
13  * Reorganized to get rid of the arch-specific consistent_* functions
14  * and provide non-coherent implementations for the DMA API. -Matt
15  *
16  * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17  * implementation. This is pulled straight from ARM and barely
18  * modified. -Matt
19  *
20  * This program is free software; you can redistribute it and/or modify
21  * it under the terms of the GNU General Public License version 2 as
22  * published by the Free Software Foundation.
23  */
24 
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/highmem.h>
31 #include <linux/dma-mapping.h>
32 
33 #include <asm/tlbflush.h>
34 
35 #include "mmu_decl.h"
36 
37 /*
38  * This address range defaults to a value that is safe for all
39  * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
40  * can be further configured for specific applications under
41  * the "Advanced Setup" menu. -Matt
42  */
43 #define CONSISTENT_BASE		(IOREMAP_TOP)
44 #define CONSISTENT_END 		(CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
45 #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
46 
47 /*
48  * This is the page table (2MB) covering uncached, DMA consistent allocations
49  */
50 static DEFINE_SPINLOCK(consistent_lock);
51 
52 /*
53  * VM region handling support.
54  *
55  * This should become something generic, handling VM region allocations for
56  * vmalloc and similar (ioremap, module space, etc).
57  *
58  * I envisage vmalloc()'s supporting vm_struct becoming:
59  *
60  *  struct vm_struct {
61  *    struct vm_region	region;
62  *    unsigned long	flags;
63  *    struct page	**pages;
64  *    unsigned int	nr_pages;
65  *    unsigned long	phys_addr;
66  *  };
67  *
68  * get_vm_area() would then call vm_region_alloc with an appropriate
69  * struct vm_region head (eg):
70  *
71  *  struct vm_region vmalloc_head = {
72  *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list),
73  *	.vm_start	= VMALLOC_START,
74  *	.vm_end		= VMALLOC_END,
75  *  };
76  *
77  * However, vmalloc_head.vm_start is variable (typically, it is dependent on
78  * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
79  * would have to initialise this each time prior to calling vm_region_alloc().
80  */
81 struct ppc_vm_region {
82 	struct list_head	vm_list;
83 	unsigned long		vm_start;
84 	unsigned long		vm_end;
85 };
86 
87 static struct ppc_vm_region consistent_head = {
88 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
89 	.vm_start	= CONSISTENT_BASE,
90 	.vm_end		= CONSISTENT_END,
91 };
92 
93 static struct ppc_vm_region *
94 ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
95 {
96 	unsigned long addr = head->vm_start, end = head->vm_end - size;
97 	unsigned long flags;
98 	struct ppc_vm_region *c, *new;
99 
100 	new = kmalloc(sizeof(struct ppc_vm_region), gfp);
101 	if (!new)
102 		goto out;
103 
104 	spin_lock_irqsave(&consistent_lock, flags);
105 
106 	list_for_each_entry(c, &head->vm_list, vm_list) {
107 		if ((addr + size) < addr)
108 			goto nospc;
109 		if ((addr + size) <= c->vm_start)
110 			goto found;
111 		addr = c->vm_end;
112 		if (addr > end)
113 			goto nospc;
114 	}
115 
116  found:
117 	/*
118 	 * Insert this entry _before_ the one we found.
119 	 */
120 	list_add_tail(&new->vm_list, &c->vm_list);
121 	new->vm_start = addr;
122 	new->vm_end = addr + size;
123 
124 	spin_unlock_irqrestore(&consistent_lock, flags);
125 	return new;
126 
127  nospc:
128 	spin_unlock_irqrestore(&consistent_lock, flags);
129 	kfree(new);
130  out:
131 	return NULL;
132 }
133 
134 static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
135 {
136 	struct ppc_vm_region *c;
137 
138 	list_for_each_entry(c, &head->vm_list, vm_list) {
139 		if (c->vm_start == addr)
140 			goto out;
141 	}
142 	c = NULL;
143  out:
144 	return c;
145 }
146 
147 /*
148  * Allocate DMA-coherent memory space and return both the kernel remapped
149  * virtual and bus address for that space.
150  */
151 void *
152 __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
153 {
154 	struct page *page;
155 	struct ppc_vm_region *c;
156 	unsigned long order;
157 	u64 mask = ISA_DMA_THRESHOLD, limit;
158 
159 	if (dev) {
160 		mask = dev->coherent_dma_mask;
161 
162 		/*
163 		 * Sanity check the DMA mask - it must be non-zero, and
164 		 * must be able to be satisfied by a DMA allocation.
165 		 */
166 		if (mask == 0) {
167 			dev_warn(dev, "coherent DMA mask is unset\n");
168 			goto no_page;
169 		}
170 
171 		if ((~mask) & ISA_DMA_THRESHOLD) {
172 			dev_warn(dev, "coherent DMA mask %#llx is smaller "
173 				 "than system GFP_DMA mask %#llx\n",
174 				 mask, (unsigned long long)ISA_DMA_THRESHOLD);
175 			goto no_page;
176 		}
177 	}
178 
179 
180 	size = PAGE_ALIGN(size);
181 	limit = (mask + 1) & ~mask;
182 	if ((limit && size >= limit) ||
183 	    size >= (CONSISTENT_END - CONSISTENT_BASE)) {
184 		printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
185 		       size, mask);
186 		return NULL;
187 	}
188 
189 	order = get_order(size);
190 
191 	/* Might be useful if we ever have a real legacy DMA zone... */
192 	if (mask != 0xffffffff)
193 		gfp |= GFP_DMA;
194 
195 	page = alloc_pages(gfp, order);
196 	if (!page)
197 		goto no_page;
198 
199 	/*
200 	 * Invalidate any data that might be lurking in the
201 	 * kernel direct-mapped region for device DMA.
202 	 */
203 	{
204 		unsigned long kaddr = (unsigned long)page_address(page);
205 		memset(page_address(page), 0, size);
206 		flush_dcache_range(kaddr, kaddr + size);
207 	}
208 
209 	/*
210 	 * Allocate a virtual address in the consistent mapping region.
211 	 */
212 	c = ppc_vm_region_alloc(&consistent_head, size,
213 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
214 	if (c) {
215 		unsigned long vaddr = c->vm_start;
216 		struct page *end = page + (1 << order);
217 
218 		split_page(page, order);
219 
220 		/*
221 		 * Set the "dma handle"
222 		 */
223 		*handle = page_to_phys(page);
224 
225 		do {
226 			SetPageReserved(page);
227 			map_page(vaddr, page_to_phys(page),
228 				 pgprot_noncached(PAGE_KERNEL));
229 			page++;
230 			vaddr += PAGE_SIZE;
231 		} while (size -= PAGE_SIZE);
232 
233 		/*
234 		 * Free the otherwise unused pages.
235 		 */
236 		while (page < end) {
237 			__free_page(page);
238 			page++;
239 		}
240 
241 		return (void *)c->vm_start;
242 	}
243 
244 	if (page)
245 		__free_pages(page, order);
246  no_page:
247 	return NULL;
248 }
249 EXPORT_SYMBOL(__dma_alloc_coherent);
250 
251 /*
252  * free a page as defined by the above mapping.
253  */
254 void __dma_free_coherent(size_t size, void *vaddr)
255 {
256 	struct ppc_vm_region *c;
257 	unsigned long flags, addr;
258 
259 	size = PAGE_ALIGN(size);
260 
261 	spin_lock_irqsave(&consistent_lock, flags);
262 
263 	c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
264 	if (!c)
265 		goto no_area;
266 
267 	if ((c->vm_end - c->vm_start) != size) {
268 		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
269 		       __func__, c->vm_end - c->vm_start, size);
270 		dump_stack();
271 		size = c->vm_end - c->vm_start;
272 	}
273 
274 	addr = c->vm_start;
275 	do {
276 		pte_t *ptep;
277 		unsigned long pfn;
278 
279 		ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
280 							       addr),
281 						    addr),
282 					 addr);
283 		if (!pte_none(*ptep) && pte_present(*ptep)) {
284 			pfn = pte_pfn(*ptep);
285 			pte_clear(&init_mm, addr, ptep);
286 			if (pfn_valid(pfn)) {
287 				struct page *page = pfn_to_page(pfn);
288 
289 				ClearPageReserved(page);
290 				__free_page(page);
291 			}
292 		}
293 		addr += PAGE_SIZE;
294 	} while (size -= PAGE_SIZE);
295 
296 	flush_tlb_kernel_range(c->vm_start, c->vm_end);
297 
298 	list_del(&c->vm_list);
299 
300 	spin_unlock_irqrestore(&consistent_lock, flags);
301 
302 	kfree(c);
303 	return;
304 
305  no_area:
306 	spin_unlock_irqrestore(&consistent_lock, flags);
307 	printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
308 	       __func__, vaddr);
309 	dump_stack();
310 }
311 EXPORT_SYMBOL(__dma_free_coherent);
312 
313 /*
314  * make an area consistent.
315  */
316 void __dma_sync(void *vaddr, size_t size, int direction)
317 {
318 	unsigned long start = (unsigned long)vaddr;
319 	unsigned long end   = start + size;
320 
321 	switch (direction) {
322 	case DMA_NONE:
323 		BUG();
324 	case DMA_FROM_DEVICE:
325 		/*
326 		 * invalidate only when cache-line aligned otherwise there is
327 		 * the potential for discarding uncommitted data from the cache
328 		 */
329 		if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
330 			flush_dcache_range(start, end);
331 		else
332 			invalidate_dcache_range(start, end);
333 		break;
334 	case DMA_TO_DEVICE:		/* writeback only */
335 		clean_dcache_range(start, end);
336 		break;
337 	case DMA_BIDIRECTIONAL:	/* writeback and invalidate */
338 		flush_dcache_range(start, end);
339 		break;
340 	}
341 }
342 EXPORT_SYMBOL(__dma_sync);
343 
344 #ifdef CONFIG_HIGHMEM
345 /*
346  * __dma_sync_page() implementation for systems using highmem.
347  * In this case, each page of a buffer must be kmapped/kunmapped
348  * in order to have a virtual address for __dma_sync(). This must
349  * not sleep so kmap_atomic()/kunmap_atomic() are used.
350  *
351  * Note: yes, it is possible and correct to have a buffer extend
352  * beyond the first page.
353  */
354 static inline void __dma_sync_page_highmem(struct page *page,
355 		unsigned long offset, size_t size, int direction)
356 {
357 	size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
358 	size_t cur_size = seg_size;
359 	unsigned long flags, start, seg_offset = offset;
360 	int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
361 	int seg_nr = 0;
362 
363 	local_irq_save(flags);
364 
365 	do {
366 		start = (unsigned long)kmap_atomic(page + seg_nr,
367 				KM_PPC_SYNC_PAGE) + seg_offset;
368 
369 		/* Sync this buffer segment */
370 		__dma_sync((void *)start, seg_size, direction);
371 		kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
372 		seg_nr++;
373 
374 		/* Calculate next buffer segment size */
375 		seg_size = min((size_t)PAGE_SIZE, size - cur_size);
376 
377 		/* Add the segment size to our running total */
378 		cur_size += seg_size;
379 		seg_offset = 0;
380 	} while (seg_nr < nr_segs);
381 
382 	local_irq_restore(flags);
383 }
384 #endif /* CONFIG_HIGHMEM */
385 
386 /*
387  * __dma_sync_page makes memory consistent. identical to __dma_sync, but
388  * takes a struct page instead of a virtual address
389  */
390 void __dma_sync_page(struct page *page, unsigned long offset,
391 	size_t size, int direction)
392 {
393 #ifdef CONFIG_HIGHMEM
394 	__dma_sync_page_highmem(page, offset, size, direction);
395 #else
396 	unsigned long start = (unsigned long)page_address(page) + offset;
397 	__dma_sync((void *)start, size, direction);
398 #endif
399 }
400 EXPORT_SYMBOL(__dma_sync_page);
401