xref: /openbmc/linux/arch/s390/mm/vmem.c (revision 64c70b1c)
1 /*
2  *  arch/s390/mm/vmem.c
3  *
4  *    Copyright IBM Corp. 2006
5  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6  */
7 
8 #include <linux/bootmem.h>
9 #include <linux/pfn.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/setup.h>
16 #include <asm/tlbflush.h>
17 
18 unsigned long vmalloc_end;
19 EXPORT_SYMBOL(vmalloc_end);
20 
21 static struct page *vmem_map;
22 static DEFINE_MUTEX(vmem_mutex);
23 
24 struct memory_segment {
25 	struct list_head list;
26 	unsigned long start;
27 	unsigned long size;
28 };
29 
30 static LIST_HEAD(mem_segs);
31 
32 void memmap_init(unsigned long size, int nid, unsigned long zone,
33 		 unsigned long start_pfn)
34 {
35 	struct page *start, *end;
36 	struct page *map_start, *map_end;
37 	int i;
38 
39 	start = pfn_to_page(start_pfn);
40 	end = start + size;
41 
42 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
43 		unsigned long cstart, cend;
44 
45 		cstart = PFN_DOWN(memory_chunk[i].addr);
46 		cend = cstart + PFN_DOWN(memory_chunk[i].size);
47 
48 		map_start = mem_map + cstart;
49 		map_end = mem_map + cend;
50 
51 		if (map_start < start)
52 			map_start = start;
53 		if (map_end > end)
54 			map_end = end;
55 
56 		map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
57 			/ sizeof(struct page);
58 		map_end += ((PFN_ALIGN((unsigned long) map_end)
59 			     - (unsigned long) map_end)
60 			    / sizeof(struct page));
61 
62 		if (map_start < map_end)
63 			memmap_init_zone((unsigned long)(map_end - map_start),
64 					 nid, zone, page_to_pfn(map_start),
65 					 MEMMAP_EARLY);
66 	}
67 }
68 
69 static inline void *vmem_alloc_pages(unsigned int order)
70 {
71 	if (slab_is_available())
72 		return (void *)__get_free_pages(GFP_KERNEL, order);
73 	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
74 }
75 
76 static inline pmd_t *vmem_pmd_alloc(void)
77 {
78 	pmd_t *pmd;
79 	int i;
80 
81 	pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
82 	if (!pmd)
83 		return NULL;
84 	for (i = 0; i < PTRS_PER_PMD; i++)
85 		pmd_clear_kernel(pmd + i);
86 	return pmd;
87 }
88 
89 static inline pte_t *vmem_pte_alloc(void)
90 {
91 	pte_t *pte;
92 	pte_t empty_pte;
93 	int i;
94 
95 	pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
96 	if (!pte)
97 		return NULL;
98 	pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 	for (i = 0; i < PTRS_PER_PTE; i++)
100 		pte[i] = empty_pte;
101 	return pte;
102 }
103 
104 /*
105  * Add a physical memory range to the 1:1 mapping.
106  */
107 static int vmem_add_range(unsigned long start, unsigned long size)
108 {
109 	unsigned long address;
110 	pgd_t *pg_dir;
111 	pmd_t *pm_dir;
112 	pte_t *pt_dir;
113 	pte_t  pte;
114 	int ret = -ENOMEM;
115 
116 	for (address = start; address < start + size; address += PAGE_SIZE) {
117 		pg_dir = pgd_offset_k(address);
118 		if (pgd_none(*pg_dir)) {
119 			pm_dir = vmem_pmd_alloc();
120 			if (!pm_dir)
121 				goto out;
122 			pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
123 		}
124 
125 		pm_dir = pmd_offset(pg_dir, address);
126 		if (pmd_none(*pm_dir)) {
127 			pt_dir = vmem_pte_alloc();
128 			if (!pt_dir)
129 				goto out;
130 			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
131 		}
132 
133 		pt_dir = pte_offset_kernel(pm_dir, address);
134 		pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135 		*pt_dir = pte;
136 	}
137 	ret = 0;
138 out:
139 	flush_tlb_kernel_range(start, start + size);
140 	return ret;
141 }
142 
143 /*
144  * Remove a physical memory range from the 1:1 mapping.
145  * Currently only invalidates page table entries.
146  */
147 static void vmem_remove_range(unsigned long start, unsigned long size)
148 {
149 	unsigned long address;
150 	pgd_t *pg_dir;
151 	pmd_t *pm_dir;
152 	pte_t *pt_dir;
153 	pte_t  pte;
154 
155 	pte_val(pte) = _PAGE_TYPE_EMPTY;
156 	for (address = start; address < start + size; address += PAGE_SIZE) {
157 		pg_dir = pgd_offset_k(address);
158 		if (pgd_none(*pg_dir))
159 			continue;
160 		pm_dir = pmd_offset(pg_dir, address);
161 		if (pmd_none(*pm_dir))
162 			continue;
163 		pt_dir = pte_offset_kernel(pm_dir, address);
164 		*pt_dir = pte;
165 	}
166 	flush_tlb_kernel_range(start, start + size);
167 }
168 
169 /*
170  * Add a backed mem_map array to the virtual mem_map array.
171  */
172 static int vmem_add_mem_map(unsigned long start, unsigned long size)
173 {
174 	unsigned long address, start_addr, end_addr;
175 	struct page *map_start, *map_end;
176 	pgd_t *pg_dir;
177 	pmd_t *pm_dir;
178 	pte_t *pt_dir;
179 	pte_t  pte;
180 	int ret = -ENOMEM;
181 
182 	map_start = vmem_map + PFN_DOWN(start);
183 	map_end	= vmem_map + PFN_DOWN(start + size);
184 
185 	start_addr = (unsigned long) map_start & PAGE_MASK;
186 	end_addr = PFN_ALIGN((unsigned long) map_end);
187 
188 	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
189 		pg_dir = pgd_offset_k(address);
190 		if (pgd_none(*pg_dir)) {
191 			pm_dir = vmem_pmd_alloc();
192 			if (!pm_dir)
193 				goto out;
194 			pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
195 		}
196 
197 		pm_dir = pmd_offset(pg_dir, address);
198 		if (pmd_none(*pm_dir)) {
199 			pt_dir = vmem_pte_alloc();
200 			if (!pt_dir)
201 				goto out;
202 			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
203 		}
204 
205 		pt_dir = pte_offset_kernel(pm_dir, address);
206 		if (pte_none(*pt_dir)) {
207 			unsigned long new_page;
208 
209 			new_page =__pa(vmem_alloc_pages(0));
210 			if (!new_page)
211 				goto out;
212 			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213 			*pt_dir = pte;
214 		}
215 	}
216 	ret = 0;
217 out:
218 	flush_tlb_kernel_range(start_addr, end_addr);
219 	return ret;
220 }
221 
222 static int vmem_add_mem(unsigned long start, unsigned long size)
223 {
224 	int ret;
225 
226 	ret = vmem_add_range(start, size);
227 	if (ret)
228 		return ret;
229 	return vmem_add_mem_map(start, size);
230 }
231 
232 /*
233  * Add memory segment to the segment list if it doesn't overlap with
234  * an already present segment.
235  */
236 static int insert_memory_segment(struct memory_segment *seg)
237 {
238 	struct memory_segment *tmp;
239 
240 	if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
241 	    seg->start + seg->size < seg->start)
242 		return -ERANGE;
243 
244 	list_for_each_entry(tmp, &mem_segs, list) {
245 		if (seg->start >= tmp->start + tmp->size)
246 			continue;
247 		if (seg->start + seg->size <= tmp->start)
248 			continue;
249 		return -ENOSPC;
250 	}
251 	list_add(&seg->list, &mem_segs);
252 	return 0;
253 }
254 
255 /*
256  * Remove memory segment from the segment list.
257  */
258 static void remove_memory_segment(struct memory_segment *seg)
259 {
260 	list_del(&seg->list);
261 }
262 
263 static void __remove_shared_memory(struct memory_segment *seg)
264 {
265 	remove_memory_segment(seg);
266 	vmem_remove_range(seg->start, seg->size);
267 }
268 
269 int remove_shared_memory(unsigned long start, unsigned long size)
270 {
271 	struct memory_segment *seg;
272 	int ret;
273 
274 	mutex_lock(&vmem_mutex);
275 
276 	ret = -ENOENT;
277 	list_for_each_entry(seg, &mem_segs, list) {
278 		if (seg->start == start && seg->size == size)
279 			break;
280 	}
281 
282 	if (seg->start != start || seg->size != size)
283 		goto out;
284 
285 	ret = 0;
286 	__remove_shared_memory(seg);
287 	kfree(seg);
288 out:
289 	mutex_unlock(&vmem_mutex);
290 	return ret;
291 }
292 
293 int add_shared_memory(unsigned long start, unsigned long size)
294 {
295 	struct memory_segment *seg;
296 	struct page *page;
297 	unsigned long pfn, num_pfn, end_pfn;
298 	int ret;
299 
300 	mutex_lock(&vmem_mutex);
301 	ret = -ENOMEM;
302 	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
303 	if (!seg)
304 		goto out;
305 	seg->start = start;
306 	seg->size = size;
307 
308 	ret = insert_memory_segment(seg);
309 	if (ret)
310 		goto out_free;
311 
312 	ret = vmem_add_mem(start, size);
313 	if (ret)
314 		goto out_remove;
315 
316 	pfn = PFN_DOWN(start);
317 	num_pfn = PFN_DOWN(size);
318 	end_pfn = pfn + num_pfn;
319 
320 	page = pfn_to_page(pfn);
321 	memset(page, 0, num_pfn * sizeof(struct page));
322 
323 	for (; pfn < end_pfn; pfn++) {
324 		page = pfn_to_page(pfn);
325 		init_page_count(page);
326 		reset_page_mapcount(page);
327 		SetPageReserved(page);
328 		INIT_LIST_HEAD(&page->lru);
329 	}
330 	goto out;
331 
332 out_remove:
333 	__remove_shared_memory(seg);
334 out_free:
335 	kfree(seg);
336 out:
337 	mutex_unlock(&vmem_mutex);
338 	return ret;
339 }
340 
341 /*
342  * map whole physical memory to virtual memory (identity mapping)
343  */
344 void __init vmem_map_init(void)
345 {
346 	unsigned long map_size;
347 	int i;
348 
349 	map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
350 	vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
351 	vmem_map = (struct page *) vmalloc_end;
352 	NODE_DATA(0)->node_mem_map = vmem_map;
353 
354 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
355 		vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
356 }
357 
358 /*
359  * Convert memory chunk array to a memory segment list so there is a single
360  * list that contains both r/w memory and shared memory segments.
361  */
362 static int __init vmem_convert_memory_chunk(void)
363 {
364 	struct memory_segment *seg;
365 	int i;
366 
367 	mutex_lock(&vmem_mutex);
368 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
369 		if (!memory_chunk[i].size)
370 			continue;
371 		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372 		if (!seg)
373 			panic("Out of memory...\n");
374 		seg->start = memory_chunk[i].addr;
375 		seg->size = memory_chunk[i].size;
376 		insert_memory_segment(seg);
377 	}
378 	mutex_unlock(&vmem_mutex);
379 	return 0;
380 }
381 
382 core_initcall(vmem_convert_memory_chunk);
383