xref: /openbmc/linux/arch/ia64/kernel/uncached.c (revision 615c36f5)
1 /*
2  * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License
6  * as published by the Free Software Foundation.
7  *
8  * A simple uncached page allocator using the generic allocator. This
9  * allocator first utilizes the spare (spill) pages found in the EFI
10  * memmap and will then start converting cached pages to uncached ones
11  * at a granule at a time. Node awareness is implemented by having a
12  * pool of pages per node.
13  */
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/efi.h>
22 #include <linux/genalloc.h>
23 #include <linux/gfp.h>
24 #include <asm/page.h>
25 #include <asm/pal.h>
26 #include <asm/system.h>
27 #include <asm/pgtable.h>
28 #include <linux/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/sn/arch.h>
31 
32 
33 extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
34 
35 struct uncached_pool {
36 	struct gen_pool *pool;
37 	struct mutex add_chunk_mutex;	/* serialize adding a converted chunk */
38 	int nchunks_added;		/* #of converted chunks added to pool */
39 	atomic_t status;		/* smp called function's return status*/
40 };
41 
42 #define MAX_CONVERTED_CHUNKS_PER_NODE	2
43 
44 struct uncached_pool uncached_pools[MAX_NUMNODES];
45 
46 
47 static void uncached_ipi_visibility(void *data)
48 {
49 	int status;
50 	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
51 
52 	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
53 	if ((status != PAL_VISIBILITY_OK) &&
54 	    (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
55 		atomic_inc(&uc_pool->status);
56 }
57 
58 
59 static void uncached_ipi_mc_drain(void *data)
60 {
61 	int status;
62 	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
63 
64 	status = ia64_pal_mc_drain();
65 	if (status != PAL_STATUS_SUCCESS)
66 		atomic_inc(&uc_pool->status);
67 }
68 
69 
70 /*
71  * Add a new chunk of uncached memory pages to the specified pool.
72  *
73  * @pool: pool to add new chunk of uncached memory to
74  * @nid: node id of node to allocate memory from, or -1
75  *
76  * This is accomplished by first allocating a granule of cached memory pages
77  * and then converting them to uncached memory pages.
78  */
79 static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
80 {
81 	struct page *page;
82 	int status, i, nchunks_added = uc_pool->nchunks_added;
83 	unsigned long c_addr, uc_addr;
84 
85 	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
86 		return -1;	/* interrupted by a signal */
87 
88 	if (uc_pool->nchunks_added > nchunks_added) {
89 		/* someone added a new chunk while we were waiting */
90 		mutex_unlock(&uc_pool->add_chunk_mutex);
91 		return 0;
92 	}
93 
94 	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
95 		mutex_unlock(&uc_pool->add_chunk_mutex);
96 		return -1;
97 	}
98 
99 	/* attempt to allocate a granule's worth of cached memory pages */
100 
101 	page = alloc_pages_exact_node(nid,
102 				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
103 				IA64_GRANULE_SHIFT-PAGE_SHIFT);
104 	if (!page) {
105 		mutex_unlock(&uc_pool->add_chunk_mutex);
106 		return -1;
107 	}
108 
109 	/* convert the memory pages from cached to uncached */
110 
111 	c_addr = (unsigned long)page_address(page);
112 	uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
113 
114 	/*
115 	 * There's a small race here where it's possible for someone to
116 	 * access the page through /dev/mem halfway through the conversion
117 	 * to uncached - not sure it's really worth bothering about
118 	 */
119 	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
120 		SetPageUncached(&page[i]);
121 
122 	flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
123 
124 	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
125 	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
126 		atomic_set(&uc_pool->status, 0);
127 		status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
128 		if (status || atomic_read(&uc_pool->status))
129 			goto failed;
130 	} else if (status != PAL_VISIBILITY_OK)
131 		goto failed;
132 
133 	preempt_disable();
134 
135 	if (ia64_platform_is("sn2"))
136 		sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
137 	else
138 		flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
139 
140 	/* flush the just introduced uncached translation from the TLB */
141 	local_flush_tlb_all();
142 
143 	preempt_enable();
144 
145 	status = ia64_pal_mc_drain();
146 	if (status != PAL_STATUS_SUCCESS)
147 		goto failed;
148 	atomic_set(&uc_pool->status, 0);
149 	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
150 	if (status || atomic_read(&uc_pool->status))
151 		goto failed;
152 
153 	/*
154 	 * The chunk of memory pages has been converted to uncached so now we
155 	 * can add it to the pool.
156 	 */
157 	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
158 	if (status)
159 		goto failed;
160 
161 	uc_pool->nchunks_added++;
162 	mutex_unlock(&uc_pool->add_chunk_mutex);
163 	return 0;
164 
165 	/* failed to convert or add the chunk so give it back to the kernel */
166 failed:
167 	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
168 		ClearPageUncached(&page[i]);
169 
170 	free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
171 	mutex_unlock(&uc_pool->add_chunk_mutex);
172 	return -1;
173 }
174 
175 
176 /*
177  * uncached_alloc_page
178  *
179  * @starting_nid: node id of node to start with, or -1
180  * @n_pages: number of contiguous pages to allocate
181  *
182  * Allocate the specified number of contiguous uncached pages on the
183  * the requested node. If not enough contiguous uncached pages are available
184  * on the requested node, roundrobin starting with the next higher node.
185  */
186 unsigned long uncached_alloc_page(int starting_nid, int n_pages)
187 {
188 	unsigned long uc_addr;
189 	struct uncached_pool *uc_pool;
190 	int nid;
191 
192 	if (unlikely(starting_nid >= MAX_NUMNODES))
193 		return 0;
194 
195 	if (starting_nid < 0)
196 		starting_nid = numa_node_id();
197 	nid = starting_nid;
198 
199 	do {
200 		if (!node_state(nid, N_HIGH_MEMORY))
201 			continue;
202 		uc_pool = &uncached_pools[nid];
203 		if (uc_pool->pool == NULL)
204 			continue;
205 		do {
206 			uc_addr = gen_pool_alloc(uc_pool->pool,
207 						 n_pages * PAGE_SIZE);
208 			if (uc_addr != 0)
209 				return uc_addr;
210 		} while (uncached_add_chunk(uc_pool, nid) == 0);
211 
212 	} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
213 
214 	return 0;
215 }
216 EXPORT_SYMBOL(uncached_alloc_page);
217 
218 
219 /*
220  * uncached_free_page
221  *
222  * @uc_addr: uncached address of first page to free
223  * @n_pages: number of contiguous pages to free
224  *
225  * Free the specified number of uncached pages.
226  */
227 void uncached_free_page(unsigned long uc_addr, int n_pages)
228 {
229 	int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
230 	struct gen_pool *pool = uncached_pools[nid].pool;
231 
232 	if (unlikely(pool == NULL))
233 		return;
234 
235 	if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
236 		panic("uncached_free_page invalid address %lx\n", uc_addr);
237 
238 	gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
239 }
240 EXPORT_SYMBOL(uncached_free_page);
241 
242 
243 /*
244  * uncached_build_memmap,
245  *
246  * @uc_start: uncached starting address of a chunk of uncached memory
247  * @uc_end: uncached ending address of a chunk of uncached memory
248  * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
249  *
250  * Called at boot time to build a map of pages that can be used for
251  * memory special operations.
252  */
253 static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
254 {
255 	int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
256 	struct gen_pool *pool = uncached_pools[nid].pool;
257 	size_t size = uc_end - uc_start;
258 
259 	touch_softlockup_watchdog();
260 
261 	if (pool != NULL) {
262 		memset((char *)uc_start, 0, size);
263 		(void) gen_pool_add(pool, uc_start, size, nid);
264 	}
265 	return 0;
266 }
267 
268 
269 static int __init uncached_init(void)
270 {
271 	int nid;
272 
273 	for_each_node_state(nid, N_ONLINE) {
274 		uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
275 		mutex_init(&uncached_pools[nid].add_chunk_mutex);
276 	}
277 
278 	efi_memmap_walk_uc(uncached_build_memmap, NULL);
279 	return 0;
280 }
281 
282 __initcall(uncached_init);
283