xref: /openbmc/linux/arch/s390/mm/vmem.c (revision 643d1f7f)
1 /*
2  *  arch/s390/mm/vmem.c
3  *
4  *    Copyright IBM Corp. 2006
5  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6  */
7 
8 #include <linux/bootmem.h>
9 #include <linux/pfn.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/setup.h>
16 #include <asm/tlbflush.h>
17 
18 static DEFINE_MUTEX(vmem_mutex);
19 
20 struct memory_segment {
21 	struct list_head list;
22 	unsigned long start;
23 	unsigned long size;
24 };
25 
26 static LIST_HEAD(mem_segs);
27 
28 void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
29 			   unsigned long start_pfn)
30 {
31 	struct page *start, *end;
32 	struct page *map_start, *map_end;
33 	int i;
34 
35 	start = pfn_to_page(start_pfn);
36 	end = start + size;
37 
38 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
39 		unsigned long cstart, cend;
40 
41 		cstart = PFN_DOWN(memory_chunk[i].addr);
42 		cend = cstart + PFN_DOWN(memory_chunk[i].size);
43 
44 		map_start = mem_map + cstart;
45 		map_end = mem_map + cend;
46 
47 		if (map_start < start)
48 			map_start = start;
49 		if (map_end > end)
50 			map_end = end;
51 
52 		map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
53 			/ sizeof(struct page);
54 		map_end += ((PFN_ALIGN((unsigned long) map_end)
55 			     - (unsigned long) map_end)
56 			    / sizeof(struct page));
57 
58 		if (map_start < map_end)
59 			memmap_init_zone((unsigned long)(map_end - map_start),
60 					 nid, zone, page_to_pfn(map_start),
61 					 MEMMAP_EARLY);
62 	}
63 }
64 
65 static void __init_refok *vmem_alloc_pages(unsigned int order)
66 {
67 	if (slab_is_available())
68 		return (void *)__get_free_pages(GFP_KERNEL, order);
69 	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
70 }
71 
72 #define vmem_pud_alloc()	({ BUG(); ((pud_t *) NULL); })
73 
74 static inline pmd_t *vmem_pmd_alloc(void)
75 {
76 	pmd_t *pmd = NULL;
77 
78 #ifdef CONFIG_64BIT
79 	pmd = vmem_alloc_pages(2);
80 	if (!pmd)
81 		return NULL;
82 	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
83 #endif
84 	return pmd;
85 }
86 
87 static inline pte_t *vmem_pte_alloc(void)
88 {
89 	pte_t *pte = vmem_alloc_pages(0);
90 
91 	if (!pte)
92 		return NULL;
93 	clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
94 	return pte;
95 }
96 
97 /*
98  * Add a physical memory range to the 1:1 mapping.
99  */
100 static int vmem_add_range(unsigned long start, unsigned long size)
101 {
102 	unsigned long address;
103 	pgd_t *pg_dir;
104 	pud_t *pu_dir;
105 	pmd_t *pm_dir;
106 	pte_t *pt_dir;
107 	pte_t  pte;
108 	int ret = -ENOMEM;
109 
110 	for (address = start; address < start + size; address += PAGE_SIZE) {
111 		pg_dir = pgd_offset_k(address);
112 		if (pgd_none(*pg_dir)) {
113 			pu_dir = vmem_pud_alloc();
114 			if (!pu_dir)
115 				goto out;
116 			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
117 		}
118 
119 		pu_dir = pud_offset(pg_dir, address);
120 		if (pud_none(*pu_dir)) {
121 			pm_dir = vmem_pmd_alloc();
122 			if (!pm_dir)
123 				goto out;
124 			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
125 		}
126 
127 		pm_dir = pmd_offset(pu_dir, address);
128 		if (pmd_none(*pm_dir)) {
129 			pt_dir = vmem_pte_alloc();
130 			if (!pt_dir)
131 				goto out;
132 			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
133 		}
134 
135 		pt_dir = pte_offset_kernel(pm_dir, address);
136 		pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
137 		*pt_dir = pte;
138 	}
139 	ret = 0;
140 out:
141 	flush_tlb_kernel_range(start, start + size);
142 	return ret;
143 }
144 
145 /*
146  * Remove a physical memory range from the 1:1 mapping.
147  * Currently only invalidates page table entries.
148  */
149 static void vmem_remove_range(unsigned long start, unsigned long size)
150 {
151 	unsigned long address;
152 	pgd_t *pg_dir;
153 	pud_t *pu_dir;
154 	pmd_t *pm_dir;
155 	pte_t *pt_dir;
156 	pte_t  pte;
157 
158 	pte_val(pte) = _PAGE_TYPE_EMPTY;
159 	for (address = start; address < start + size; address += PAGE_SIZE) {
160 		pg_dir = pgd_offset_k(address);
161 		pu_dir = pud_offset(pg_dir, address);
162 		if (pud_none(*pu_dir))
163 			continue;
164 		pm_dir = pmd_offset(pu_dir, address);
165 		if (pmd_none(*pm_dir))
166 			continue;
167 		pt_dir = pte_offset_kernel(pm_dir, address);
168 		*pt_dir = pte;
169 	}
170 	flush_tlb_kernel_range(start, start + size);
171 }
172 
173 /*
174  * Add a backed mem_map array to the virtual mem_map array.
175  */
176 static int vmem_add_mem_map(unsigned long start, unsigned long size)
177 {
178 	unsigned long address, start_addr, end_addr;
179 	struct page *map_start, *map_end;
180 	pgd_t *pg_dir;
181 	pud_t *pu_dir;
182 	pmd_t *pm_dir;
183 	pte_t *pt_dir;
184 	pte_t  pte;
185 	int ret = -ENOMEM;
186 
187 	map_start = VMEM_MAP + PFN_DOWN(start);
188 	map_end	= VMEM_MAP + PFN_DOWN(start + size);
189 
190 	start_addr = (unsigned long) map_start & PAGE_MASK;
191 	end_addr = PFN_ALIGN((unsigned long) map_end);
192 
193 	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
194 		pg_dir = pgd_offset_k(address);
195 		if (pgd_none(*pg_dir)) {
196 			pu_dir = vmem_pud_alloc();
197 			if (!pu_dir)
198 				goto out;
199 			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
200 		}
201 
202 		pu_dir = pud_offset(pg_dir, address);
203 		if (pud_none(*pu_dir)) {
204 			pm_dir = vmem_pmd_alloc();
205 			if (!pm_dir)
206 				goto out;
207 			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
208 		}
209 
210 		pm_dir = pmd_offset(pu_dir, address);
211 		if (pmd_none(*pm_dir)) {
212 			pt_dir = vmem_pte_alloc();
213 			if (!pt_dir)
214 				goto out;
215 			pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
216 		}
217 
218 		pt_dir = pte_offset_kernel(pm_dir, address);
219 		if (pte_none(*pt_dir)) {
220 			unsigned long new_page;
221 
222 			new_page =__pa(vmem_alloc_pages(0));
223 			if (!new_page)
224 				goto out;
225 			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
226 			*pt_dir = pte;
227 		}
228 	}
229 	ret = 0;
230 out:
231 	flush_tlb_kernel_range(start_addr, end_addr);
232 	return ret;
233 }
234 
235 static int vmem_add_mem(unsigned long start, unsigned long size)
236 {
237 	int ret;
238 
239 	ret = vmem_add_mem_map(start, size);
240 	if (ret)
241 		return ret;
242 	return vmem_add_range(start, size);
243 }
244 
245 /*
246  * Add memory segment to the segment list if it doesn't overlap with
247  * an already present segment.
248  */
249 static int insert_memory_segment(struct memory_segment *seg)
250 {
251 	struct memory_segment *tmp;
252 
253 	if (seg->start + seg->size >= VMALLOC_START ||
254 	    seg->start + seg->size < seg->start)
255 		return -ERANGE;
256 
257 	list_for_each_entry(tmp, &mem_segs, list) {
258 		if (seg->start >= tmp->start + tmp->size)
259 			continue;
260 		if (seg->start + seg->size <= tmp->start)
261 			continue;
262 		return -ENOSPC;
263 	}
264 	list_add(&seg->list, &mem_segs);
265 	return 0;
266 }
267 
268 /*
269  * Remove memory segment from the segment list.
270  */
271 static void remove_memory_segment(struct memory_segment *seg)
272 {
273 	list_del(&seg->list);
274 }
275 
276 static void __remove_shared_memory(struct memory_segment *seg)
277 {
278 	remove_memory_segment(seg);
279 	vmem_remove_range(seg->start, seg->size);
280 }
281 
282 int remove_shared_memory(unsigned long start, unsigned long size)
283 {
284 	struct memory_segment *seg;
285 	int ret;
286 
287 	mutex_lock(&vmem_mutex);
288 
289 	ret = -ENOENT;
290 	list_for_each_entry(seg, &mem_segs, list) {
291 		if (seg->start == start && seg->size == size)
292 			break;
293 	}
294 
295 	if (seg->start != start || seg->size != size)
296 		goto out;
297 
298 	ret = 0;
299 	__remove_shared_memory(seg);
300 	kfree(seg);
301 out:
302 	mutex_unlock(&vmem_mutex);
303 	return ret;
304 }
305 
306 int add_shared_memory(unsigned long start, unsigned long size)
307 {
308 	struct memory_segment *seg;
309 	struct page *page;
310 	unsigned long pfn, num_pfn, end_pfn;
311 	int ret;
312 
313 	mutex_lock(&vmem_mutex);
314 	ret = -ENOMEM;
315 	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
316 	if (!seg)
317 		goto out;
318 	seg->start = start;
319 	seg->size = size;
320 
321 	ret = insert_memory_segment(seg);
322 	if (ret)
323 		goto out_free;
324 
325 	ret = vmem_add_mem(start, size);
326 	if (ret)
327 		goto out_remove;
328 
329 	pfn = PFN_DOWN(start);
330 	num_pfn = PFN_DOWN(size);
331 	end_pfn = pfn + num_pfn;
332 
333 	page = pfn_to_page(pfn);
334 	memset(page, 0, num_pfn * sizeof(struct page));
335 
336 	for (; pfn < end_pfn; pfn++) {
337 		page = pfn_to_page(pfn);
338 		init_page_count(page);
339 		reset_page_mapcount(page);
340 		SetPageReserved(page);
341 		INIT_LIST_HEAD(&page->lru);
342 	}
343 	goto out;
344 
345 out_remove:
346 	__remove_shared_memory(seg);
347 out_free:
348 	kfree(seg);
349 out:
350 	mutex_unlock(&vmem_mutex);
351 	return ret;
352 }
353 
354 /*
355  * map whole physical memory to virtual memory (identity mapping)
356  * we reserve enough space in the vmalloc area for vmemmap to hotplug
357  * additional memory segments.
358  */
359 void __init vmem_map_init(void)
360 {
361 	int i;
362 
363 	BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
364 	NODE_DATA(0)->node_mem_map = VMEM_MAP;
365 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
366 		vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
367 }
368 
369 /*
370  * Convert memory chunk array to a memory segment list so there is a single
371  * list that contains both r/w memory and shared memory segments.
372  */
373 static int __init vmem_convert_memory_chunk(void)
374 {
375 	struct memory_segment *seg;
376 	int i;
377 
378 	mutex_lock(&vmem_mutex);
379 	for (i = 0; i < MEMORY_CHUNKS; i++) {
380 		if (!memory_chunk[i].size)
381 			continue;
382 		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
383 		if (!seg)
384 			panic("Out of memory...\n");
385 		seg->start = memory_chunk[i].addr;
386 		seg->size = memory_chunk[i].size;
387 		insert_memory_segment(seg);
388 	}
389 	mutex_unlock(&vmem_mutex);
390 	return 0;
391 }
392 
393 core_initcall(vmem_convert_memory_chunk);
394