xref: /openbmc/linux/arch/s390/mm/vmem.c (revision 95e9fd10)
1 /*
2  *    Copyright IBM Corp. 2006
3  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4  */
5 
6 #include <linux/bootmem.h>
7 #include <linux/pfn.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/setup.h>
16 #include <asm/tlbflush.h>
17 #include <asm/sections.h>
18 
19 static DEFINE_MUTEX(vmem_mutex);
20 
21 struct memory_segment {
22 	struct list_head list;
23 	unsigned long start;
24 	unsigned long size;
25 };
26 
27 static LIST_HEAD(mem_segs);
28 
29 static void __ref *vmem_alloc_pages(unsigned int order)
30 {
31 	if (slab_is_available())
32 		return (void *)__get_free_pages(GFP_KERNEL, order);
33 	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
34 }
35 
36 static inline pud_t *vmem_pud_alloc(void)
37 {
38 	pud_t *pud = NULL;
39 
40 #ifdef CONFIG_64BIT
41 	pud = vmem_alloc_pages(2);
42 	if (!pud)
43 		return NULL;
44 	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
45 #endif
46 	return pud;
47 }
48 
49 static inline pmd_t *vmem_pmd_alloc(void)
50 {
51 	pmd_t *pmd = NULL;
52 
53 #ifdef CONFIG_64BIT
54 	pmd = vmem_alloc_pages(2);
55 	if (!pmd)
56 		return NULL;
57 	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
58 #endif
59 	return pmd;
60 }
61 
62 static pte_t __ref *vmem_pte_alloc(unsigned long address)
63 {
64 	pte_t *pte;
65 
66 	if (slab_is_available())
67 		pte = (pte_t *) page_table_alloc(&init_mm, address);
68 	else
69 		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
70 	if (!pte)
71 		return NULL;
72 	clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
73 		    PTRS_PER_PTE * sizeof(pte_t));
74 	return pte;
75 }
76 
77 /*
78  * Add a physical memory range to the 1:1 mapping.
79  */
80 static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
81 {
82 	unsigned long address;
83 	pgd_t *pg_dir;
84 	pud_t *pu_dir;
85 	pmd_t *pm_dir;
86 	pte_t *pt_dir;
87 	pte_t  pte;
88 	int ret = -ENOMEM;
89 
90 	for (address = start; address < start + size; address += PAGE_SIZE) {
91 		pg_dir = pgd_offset_k(address);
92 		if (pgd_none(*pg_dir)) {
93 			pu_dir = vmem_pud_alloc();
94 			if (!pu_dir)
95 				goto out;
96 			pgd_populate(&init_mm, pg_dir, pu_dir);
97 		}
98 
99 		pu_dir = pud_offset(pg_dir, address);
100 		if (pud_none(*pu_dir)) {
101 			pm_dir = vmem_pmd_alloc();
102 			if (!pm_dir)
103 				goto out;
104 			pud_populate(&init_mm, pu_dir, pm_dir);
105 		}
106 
107 		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
108 		pm_dir = pmd_offset(pu_dir, address);
109 
110 #ifdef CONFIG_64BIT
111 		if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
112 		    (address + HPAGE_SIZE <= start + size) &&
113 		    (address >= HPAGE_SIZE)) {
114 			pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
115 			pmd_val(*pm_dir) = pte_val(pte);
116 			address += HPAGE_SIZE - PAGE_SIZE;
117 			continue;
118 		}
119 #endif
120 		if (pmd_none(*pm_dir)) {
121 			pt_dir = vmem_pte_alloc(address);
122 			if (!pt_dir)
123 				goto out;
124 			pmd_populate(&init_mm, pm_dir, pt_dir);
125 		}
126 
127 		pt_dir = pte_offset_kernel(pm_dir, address);
128 		*pt_dir = pte;
129 	}
130 	ret = 0;
131 out:
132 	flush_tlb_kernel_range(start, start + size);
133 	return ret;
134 }
135 
136 /*
137  * Remove a physical memory range from the 1:1 mapping.
138  * Currently only invalidates page table entries.
139  */
140 static void vmem_remove_range(unsigned long start, unsigned long size)
141 {
142 	unsigned long address;
143 	pgd_t *pg_dir;
144 	pud_t *pu_dir;
145 	pmd_t *pm_dir;
146 	pte_t *pt_dir;
147 	pte_t  pte;
148 
149 	pte_val(pte) = _PAGE_TYPE_EMPTY;
150 	for (address = start; address < start + size; address += PAGE_SIZE) {
151 		pg_dir = pgd_offset_k(address);
152 		pu_dir = pud_offset(pg_dir, address);
153 		if (pud_none(*pu_dir))
154 			continue;
155 		pm_dir = pmd_offset(pu_dir, address);
156 		if (pmd_none(*pm_dir))
157 			continue;
158 
159 		if (pmd_huge(*pm_dir)) {
160 			pmd_clear(pm_dir);
161 			address += HPAGE_SIZE - PAGE_SIZE;
162 			continue;
163 		}
164 
165 		pt_dir = pte_offset_kernel(pm_dir, address);
166 		*pt_dir = pte;
167 	}
168 	flush_tlb_kernel_range(start, start + size);
169 }
170 
171 /*
172  * Add a backed mem_map array to the virtual mem_map array.
173  */
174 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
175 {
176 	unsigned long address, start_addr, end_addr;
177 	pgd_t *pg_dir;
178 	pud_t *pu_dir;
179 	pmd_t *pm_dir;
180 	pte_t *pt_dir;
181 	pte_t  pte;
182 	int ret = -ENOMEM;
183 
184 	start_addr = (unsigned long) start;
185 	end_addr = (unsigned long) (start + nr);
186 
187 	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
188 		pg_dir = pgd_offset_k(address);
189 		if (pgd_none(*pg_dir)) {
190 			pu_dir = vmem_pud_alloc();
191 			if (!pu_dir)
192 				goto out;
193 			pgd_populate(&init_mm, pg_dir, pu_dir);
194 		}
195 
196 		pu_dir = pud_offset(pg_dir, address);
197 		if (pud_none(*pu_dir)) {
198 			pm_dir = vmem_pmd_alloc();
199 			if (!pm_dir)
200 				goto out;
201 			pud_populate(&init_mm, pu_dir, pm_dir);
202 		}
203 
204 		pm_dir = pmd_offset(pu_dir, address);
205 		if (pmd_none(*pm_dir)) {
206 			pt_dir = vmem_pte_alloc(address);
207 			if (!pt_dir)
208 				goto out;
209 			pmd_populate(&init_mm, pm_dir, pt_dir);
210 		}
211 
212 		pt_dir = pte_offset_kernel(pm_dir, address);
213 		if (pte_none(*pt_dir)) {
214 			unsigned long new_page;
215 
216 			new_page =__pa(vmem_alloc_pages(0));
217 			if (!new_page)
218 				goto out;
219 			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
220 			*pt_dir = pte;
221 		}
222 	}
223 	memset(start, 0, nr * sizeof(struct page));
224 	ret = 0;
225 out:
226 	flush_tlb_kernel_range(start_addr, end_addr);
227 	return ret;
228 }
229 
230 /*
231  * Add memory segment to the segment list if it doesn't overlap with
232  * an already present segment.
233  */
234 static int insert_memory_segment(struct memory_segment *seg)
235 {
236 	struct memory_segment *tmp;
237 
238 	if (seg->start + seg->size > VMEM_MAX_PHYS ||
239 	    seg->start + seg->size < seg->start)
240 		return -ERANGE;
241 
242 	list_for_each_entry(tmp, &mem_segs, list) {
243 		if (seg->start >= tmp->start + tmp->size)
244 			continue;
245 		if (seg->start + seg->size <= tmp->start)
246 			continue;
247 		return -ENOSPC;
248 	}
249 	list_add(&seg->list, &mem_segs);
250 	return 0;
251 }
252 
253 /*
254  * Remove memory segment from the segment list.
255  */
256 static void remove_memory_segment(struct memory_segment *seg)
257 {
258 	list_del(&seg->list);
259 }
260 
261 static void __remove_shared_memory(struct memory_segment *seg)
262 {
263 	remove_memory_segment(seg);
264 	vmem_remove_range(seg->start, seg->size);
265 }
266 
267 int vmem_remove_mapping(unsigned long start, unsigned long size)
268 {
269 	struct memory_segment *seg;
270 	int ret;
271 
272 	mutex_lock(&vmem_mutex);
273 
274 	ret = -ENOENT;
275 	list_for_each_entry(seg, &mem_segs, list) {
276 		if (seg->start == start && seg->size == size)
277 			break;
278 	}
279 
280 	if (seg->start != start || seg->size != size)
281 		goto out;
282 
283 	ret = 0;
284 	__remove_shared_memory(seg);
285 	kfree(seg);
286 out:
287 	mutex_unlock(&vmem_mutex);
288 	return ret;
289 }
290 
291 int vmem_add_mapping(unsigned long start, unsigned long size)
292 {
293 	struct memory_segment *seg;
294 	int ret;
295 
296 	mutex_lock(&vmem_mutex);
297 	ret = -ENOMEM;
298 	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
299 	if (!seg)
300 		goto out;
301 	seg->start = start;
302 	seg->size = size;
303 
304 	ret = insert_memory_segment(seg);
305 	if (ret)
306 		goto out_free;
307 
308 	ret = vmem_add_mem(start, size, 0);
309 	if (ret)
310 		goto out_remove;
311 	goto out;
312 
313 out_remove:
314 	__remove_shared_memory(seg);
315 out_free:
316 	kfree(seg);
317 out:
318 	mutex_unlock(&vmem_mutex);
319 	return ret;
320 }
321 
322 /*
323  * map whole physical memory to virtual memory (identity mapping)
324  * we reserve enough space in the vmalloc area for vmemmap to hotplug
325  * additional memory segments.
326  */
327 void __init vmem_map_init(void)
328 {
329 	unsigned long ro_start, ro_end;
330 	unsigned long start, end;
331 	int i;
332 
333 	ro_start = ((unsigned long)&_stext) & PAGE_MASK;
334 	ro_end = PFN_ALIGN((unsigned long)&_eshared);
335 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
336 		if (memory_chunk[i].type == CHUNK_CRASHK ||
337 		    memory_chunk[i].type == CHUNK_OLDMEM)
338 			continue;
339 		start = memory_chunk[i].addr;
340 		end = memory_chunk[i].addr + memory_chunk[i].size;
341 		if (start >= ro_end || end <= ro_start)
342 			vmem_add_mem(start, end - start, 0);
343 		else if (start >= ro_start && end <= ro_end)
344 			vmem_add_mem(start, end - start, 1);
345 		else if (start >= ro_start) {
346 			vmem_add_mem(start, ro_end - start, 1);
347 			vmem_add_mem(ro_end, end - ro_end, 0);
348 		} else if (end < ro_end) {
349 			vmem_add_mem(start, ro_start - start, 0);
350 			vmem_add_mem(ro_start, end - ro_start, 1);
351 		} else {
352 			vmem_add_mem(start, ro_start - start, 0);
353 			vmem_add_mem(ro_start, ro_end - ro_start, 1);
354 			vmem_add_mem(ro_end, end - ro_end, 0);
355 		}
356 	}
357 }
358 
359 /*
360  * Convert memory chunk array to a memory segment list so there is a single
361  * list that contains both r/w memory and shared memory segments.
362  */
363 static int __init vmem_convert_memory_chunk(void)
364 {
365 	struct memory_segment *seg;
366 	int i;
367 
368 	mutex_lock(&vmem_mutex);
369 	for (i = 0; i < MEMORY_CHUNKS; i++) {
370 		if (!memory_chunk[i].size)
371 			continue;
372 		if (memory_chunk[i].type == CHUNK_CRASHK ||
373 		    memory_chunk[i].type == CHUNK_OLDMEM)
374 			continue;
375 		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
376 		if (!seg)
377 			panic("Out of memory...\n");
378 		seg->start = memory_chunk[i].addr;
379 		seg->size = memory_chunk[i].size;
380 		insert_memory_segment(seg);
381 	}
382 	mutex_unlock(&vmem_mutex);
383 	return 0;
384 }
385 
386 core_initcall(vmem_convert_memory_chunk);
387