xref: /openbmc/linux/arch/sh/mm/consistent.c (revision 82ced6fd)
1 /*
2  * arch/sh/mm/consistent.c
3  *
4  * Copyright (C) 2004 - 2007  Paul Mundt
5  *
6  * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-debug.h>
17 #include <linux/io.h>
18 #include <asm/cacheflush.h>
19 #include <asm/addrspace.h>
20 
21 #define PREALLOC_DMA_DEBUG_ENTRIES	4096
22 
23 static int __init dma_init(void)
24 {
25 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
26 	return 0;
27 }
28 fs_initcall(dma_init);
29 
30 void *dma_alloc_coherent(struct device *dev, size_t size,
31 			   dma_addr_t *dma_handle, gfp_t gfp)
32 {
33 	void *ret, *ret_nocache;
34 	int order = get_order(size);
35 
36 	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
37 		return ret;
38 
39 	ret = (void *)__get_free_pages(gfp, order);
40 	if (!ret)
41 		return NULL;
42 
43 	memset(ret, 0, size);
44 	/*
45 	 * Pages from the page allocator may have data present in
46 	 * cache. So flush the cache before using uncached memory.
47 	 */
48 	dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
49 
50 	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
51 	if (!ret_nocache) {
52 		free_pages((unsigned long)ret, order);
53 		return NULL;
54 	}
55 
56 	split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
57 
58 	*dma_handle = virt_to_phys(ret);
59 
60 	debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);
61 
62 	return ret_nocache;
63 }
64 EXPORT_SYMBOL(dma_alloc_coherent);
65 
66 void dma_free_coherent(struct device *dev, size_t size,
67 			 void *vaddr, dma_addr_t dma_handle)
68 {
69 	int order = get_order(size);
70 	unsigned long pfn = dma_handle >> PAGE_SHIFT;
71 	int k;
72 
73 	WARN_ON(irqs_disabled());	/* for portability */
74 
75 	if (dma_release_from_coherent(dev, order, vaddr))
76 		return;
77 
78 	debug_dma_free_coherent(dev, size, vaddr, dma_handle);
79 	for (k = 0; k < (1 << order); k++)
80 		__free_pages(pfn_to_page(pfn + k), 0);
81 	iounmap(vaddr);
82 }
83 EXPORT_SYMBOL(dma_free_coherent);
84 
85 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
86 		    enum dma_data_direction direction)
87 {
88 #ifdef CONFIG_CPU_SH5
89 	void *p1addr = vaddr;
90 #else
91 	void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
92 #endif
93 
94 	switch (direction) {
95 	case DMA_FROM_DEVICE:		/* invalidate only */
96 		__flush_invalidate_region(p1addr, size);
97 		break;
98 	case DMA_TO_DEVICE:		/* writeback only */
99 		__flush_wback_region(p1addr, size);
100 		break;
101 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
102 		__flush_purge_region(p1addr, size);
103 		break;
104 	default:
105 		BUG();
106 	}
107 }
108 EXPORT_SYMBOL(dma_cache_sync);
109 
110 static int __init memchunk_setup(char *str)
111 {
112 	return 1; /* accept anything that begins with "memchunk." */
113 }
114 __setup("memchunk.", memchunk_setup);
115 
116 static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
117 {
118 	char *p = boot_command_line;
119 	int k = strlen(name);
120 
121 	while ((p = strstr(p, "memchunk."))) {
122 		p += 9; /* strlen("memchunk.") */
123 		if (!strncmp(name, p, k) && p[k] == '=') {
124 			p += k + 1;
125 			*sizep = memparse(p, NULL);
126 			pr_info("%s: forcing memory chunk size to 0x%08lx\n",
127 				name, *sizep);
128 			break;
129 		}
130 	}
131 }
132 
133 int __init platform_resource_setup_memory(struct platform_device *pdev,
134 					  char *name, unsigned long memsize)
135 {
136 	struct resource *r;
137 	dma_addr_t dma_handle;
138 	void *buf;
139 
140 	r = pdev->resource + pdev->num_resources - 1;
141 	if (r->flags) {
142 		pr_warning("%s: unable to find empty space for resource\n",
143 			name);
144 		return -EINVAL;
145 	}
146 
147 	memchunk_cmdline_override(name, &memsize);
148 	if (!memsize)
149 		return 0;
150 
151 	buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
152 	if (!buf) {
153 		pr_warning("%s: unable to allocate memory\n", name);
154 		return -ENOMEM;
155 	}
156 
157 	memset(buf, 0, memsize);
158 
159 	r->flags = IORESOURCE_MEM;
160 	r->start = dma_handle;
161 	r->end = r->start + memsize - 1;
162 	r->name = name;
163 	return 0;
164 }
165