xref: /openbmc/linux/arch/s390/mm/vmem.c (revision 82003e04)
1 /*
2  *    Copyright IBM Corp. 2006
3  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4  */
5 
6 #include <linux/bootmem.h>
7 #include <linux/pfn.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 
21 static DEFINE_MUTEX(vmem_mutex);
22 
23 struct memory_segment {
24 	struct list_head list;
25 	unsigned long start;
26 	unsigned long size;
27 };
28 
29 static LIST_HEAD(mem_segs);
30 
31 static void __ref *vmem_alloc_pages(unsigned int order)
32 {
33 	unsigned long size = PAGE_SIZE << order;
34 
35 	if (slab_is_available())
36 		return (void *)__get_free_pages(GFP_KERNEL, order);
37 	return alloc_bootmem_align(size, size);
38 }
39 
40 static inline pud_t *vmem_pud_alloc(void)
41 {
42 	pud_t *pud = NULL;
43 
44 	pud = vmem_alloc_pages(2);
45 	if (!pud)
46 		return NULL;
47 	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
48 	return pud;
49 }
50 
51 pmd_t *vmem_pmd_alloc(void)
52 {
53 	pmd_t *pmd = NULL;
54 
55 	pmd = vmem_alloc_pages(2);
56 	if (!pmd)
57 		return NULL;
58 	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
59 	return pmd;
60 }
61 
62 pte_t __ref *vmem_pte_alloc(void)
63 {
64 	pte_t *pte;
65 
66 	if (slab_is_available())
67 		pte = (pte_t *) page_table_alloc(&init_mm);
68 	else
69 		pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
70 					  PTRS_PER_PTE * sizeof(pte_t));
71 	if (!pte)
72 		return NULL;
73 	clear_table((unsigned long *) pte, _PAGE_INVALID,
74 		    PTRS_PER_PTE * sizeof(pte_t));
75 	return pte;
76 }
77 
78 /*
79  * Add a physical memory range to the 1:1 mapping.
80  */
81 static int vmem_add_mem(unsigned long start, unsigned long size)
82 {
83 	unsigned long pages4k, pages1m, pages2g;
84 	unsigned long end = start + size;
85 	unsigned long address = start;
86 	pgd_t *pg_dir;
87 	pud_t *pu_dir;
88 	pmd_t *pm_dir;
89 	pte_t *pt_dir;
90 	int ret = -ENOMEM;
91 
92 	pages4k = pages1m = pages2g = 0;
93 	while (address < end) {
94 		pg_dir = pgd_offset_k(address);
95 		if (pgd_none(*pg_dir)) {
96 			pu_dir = vmem_pud_alloc();
97 			if (!pu_dir)
98 				goto out;
99 			pgd_populate(&init_mm, pg_dir, pu_dir);
100 		}
101 		pu_dir = pud_offset(pg_dir, address);
102 		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
103 		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
104 		     !debug_pagealloc_enabled()) {
105 			pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
106 			address += PUD_SIZE;
107 			pages2g++;
108 			continue;
109 		}
110 		if (pud_none(*pu_dir)) {
111 			pm_dir = vmem_pmd_alloc();
112 			if (!pm_dir)
113 				goto out;
114 			pud_populate(&init_mm, pu_dir, pm_dir);
115 		}
116 		pm_dir = pmd_offset(pu_dir, address);
117 		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
118 		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
119 		    !debug_pagealloc_enabled()) {
120 			pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
121 			address += PMD_SIZE;
122 			pages1m++;
123 			continue;
124 		}
125 		if (pmd_none(*pm_dir)) {
126 			pt_dir = vmem_pte_alloc();
127 			if (!pt_dir)
128 				goto out;
129 			pmd_populate(&init_mm, pm_dir, pt_dir);
130 		}
131 
132 		pt_dir = pte_offset_kernel(pm_dir, address);
133 		pte_val(*pt_dir) = address |  pgprot_val(PAGE_KERNEL);
134 		address += PAGE_SIZE;
135 		pages4k++;
136 	}
137 	ret = 0;
138 out:
139 	update_page_count(PG_DIRECT_MAP_4K, pages4k);
140 	update_page_count(PG_DIRECT_MAP_1M, pages1m);
141 	update_page_count(PG_DIRECT_MAP_2G, pages2g);
142 	return ret;
143 }
144 
145 /*
146  * Remove a physical memory range from the 1:1 mapping.
147  * Currently only invalidates page table entries.
148  */
149 static void vmem_remove_range(unsigned long start, unsigned long size)
150 {
151 	unsigned long pages4k, pages1m, pages2g;
152 	unsigned long end = start + size;
153 	unsigned long address = start;
154 	pgd_t *pg_dir;
155 	pud_t *pu_dir;
156 	pmd_t *pm_dir;
157 	pte_t *pt_dir;
158 
159 	pages4k = pages1m = pages2g = 0;
160 	while (address < end) {
161 		pg_dir = pgd_offset_k(address);
162 		if (pgd_none(*pg_dir)) {
163 			address += PGDIR_SIZE;
164 			continue;
165 		}
166 		pu_dir = pud_offset(pg_dir, address);
167 		if (pud_none(*pu_dir)) {
168 			address += PUD_SIZE;
169 			continue;
170 		}
171 		if (pud_large(*pu_dir)) {
172 			pud_clear(pu_dir);
173 			address += PUD_SIZE;
174 			pages2g++;
175 			continue;
176 		}
177 		pm_dir = pmd_offset(pu_dir, address);
178 		if (pmd_none(*pm_dir)) {
179 			address += PMD_SIZE;
180 			continue;
181 		}
182 		if (pmd_large(*pm_dir)) {
183 			pmd_clear(pm_dir);
184 			address += PMD_SIZE;
185 			pages1m++;
186 			continue;
187 		}
188 		pt_dir = pte_offset_kernel(pm_dir, address);
189 		pte_clear(&init_mm, address, pt_dir);
190 		address += PAGE_SIZE;
191 		pages4k++;
192 	}
193 	flush_tlb_kernel_range(start, end);
194 	update_page_count(PG_DIRECT_MAP_4K, -pages4k);
195 	update_page_count(PG_DIRECT_MAP_1M, -pages1m);
196 	update_page_count(PG_DIRECT_MAP_2G, -pages2g);
197 }
198 
199 /*
200  * Add a backed mem_map array to the virtual mem_map array.
201  */
202 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
203 {
204 	unsigned long address = start;
205 	pgd_t *pg_dir;
206 	pud_t *pu_dir;
207 	pmd_t *pm_dir;
208 	pte_t *pt_dir;
209 	int ret = -ENOMEM;
210 
211 	for (address = start; address < end;) {
212 		pg_dir = pgd_offset_k(address);
213 		if (pgd_none(*pg_dir)) {
214 			pu_dir = vmem_pud_alloc();
215 			if (!pu_dir)
216 				goto out;
217 			pgd_populate(&init_mm, pg_dir, pu_dir);
218 		}
219 
220 		pu_dir = pud_offset(pg_dir, address);
221 		if (pud_none(*pu_dir)) {
222 			pm_dir = vmem_pmd_alloc();
223 			if (!pm_dir)
224 				goto out;
225 			pud_populate(&init_mm, pu_dir, pm_dir);
226 		}
227 
228 		pm_dir = pmd_offset(pu_dir, address);
229 		if (pmd_none(*pm_dir)) {
230 			/* Use 1MB frames for vmemmap if available. We always
231 			 * use large frames even if they are only partially
232 			 * used.
233 			 * Otherwise we would have also page tables since
234 			 * vmemmap_populate gets called for each section
235 			 * separately. */
236 			if (MACHINE_HAS_EDAT1) {
237 				void *new_page;
238 
239 				new_page = vmemmap_alloc_block(PMD_SIZE, node);
240 				if (!new_page)
241 					goto out;
242 				pmd_val(*pm_dir) = __pa(new_page) |
243 					_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
244 				address = (address + PMD_SIZE) & PMD_MASK;
245 				continue;
246 			}
247 			pt_dir = vmem_pte_alloc();
248 			if (!pt_dir)
249 				goto out;
250 			pmd_populate(&init_mm, pm_dir, pt_dir);
251 		} else if (pmd_large(*pm_dir)) {
252 			address = (address + PMD_SIZE) & PMD_MASK;
253 			continue;
254 		}
255 
256 		pt_dir = pte_offset_kernel(pm_dir, address);
257 		if (pte_none(*pt_dir)) {
258 			void *new_page;
259 
260 			new_page = vmemmap_alloc_block(PAGE_SIZE, node);
261 			if (!new_page)
262 				goto out;
263 			pte_val(*pt_dir) =
264 				__pa(new_page) | pgprot_val(PAGE_KERNEL);
265 		}
266 		address += PAGE_SIZE;
267 	}
268 	ret = 0;
269 out:
270 	return ret;
271 }
272 
273 void vmemmap_free(unsigned long start, unsigned long end)
274 {
275 }
276 
277 /*
278  * Add memory segment to the segment list if it doesn't overlap with
279  * an already present segment.
280  */
281 static int insert_memory_segment(struct memory_segment *seg)
282 {
283 	struct memory_segment *tmp;
284 
285 	if (seg->start + seg->size > VMEM_MAX_PHYS ||
286 	    seg->start + seg->size < seg->start)
287 		return -ERANGE;
288 
289 	list_for_each_entry(tmp, &mem_segs, list) {
290 		if (seg->start >= tmp->start + tmp->size)
291 			continue;
292 		if (seg->start + seg->size <= tmp->start)
293 			continue;
294 		return -ENOSPC;
295 	}
296 	list_add(&seg->list, &mem_segs);
297 	return 0;
298 }
299 
300 /*
301  * Remove memory segment from the segment list.
302  */
303 static void remove_memory_segment(struct memory_segment *seg)
304 {
305 	list_del(&seg->list);
306 }
307 
308 static void __remove_shared_memory(struct memory_segment *seg)
309 {
310 	remove_memory_segment(seg);
311 	vmem_remove_range(seg->start, seg->size);
312 }
313 
314 int vmem_remove_mapping(unsigned long start, unsigned long size)
315 {
316 	struct memory_segment *seg;
317 	int ret;
318 
319 	mutex_lock(&vmem_mutex);
320 
321 	ret = -ENOENT;
322 	list_for_each_entry(seg, &mem_segs, list) {
323 		if (seg->start == start && seg->size == size)
324 			break;
325 	}
326 
327 	if (seg->start != start || seg->size != size)
328 		goto out;
329 
330 	ret = 0;
331 	__remove_shared_memory(seg);
332 	kfree(seg);
333 out:
334 	mutex_unlock(&vmem_mutex);
335 	return ret;
336 }
337 
338 int vmem_add_mapping(unsigned long start, unsigned long size)
339 {
340 	struct memory_segment *seg;
341 	int ret;
342 
343 	mutex_lock(&vmem_mutex);
344 	ret = -ENOMEM;
345 	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
346 	if (!seg)
347 		goto out;
348 	seg->start = start;
349 	seg->size = size;
350 
351 	ret = insert_memory_segment(seg);
352 	if (ret)
353 		goto out_free;
354 
355 	ret = vmem_add_mem(start, size);
356 	if (ret)
357 		goto out_remove;
358 	goto out;
359 
360 out_remove:
361 	__remove_shared_memory(seg);
362 out_free:
363 	kfree(seg);
364 out:
365 	mutex_unlock(&vmem_mutex);
366 	return ret;
367 }
368 
369 /*
370  * map whole physical memory to virtual memory (identity mapping)
371  * we reserve enough space in the vmalloc area for vmemmap to hotplug
372  * additional memory segments.
373  */
374 void __init vmem_map_init(void)
375 {
376 	unsigned long size = _eshared - _stext;
377 	struct memblock_region *reg;
378 
379 	for_each_memblock(memory, reg)
380 		vmem_add_mem(reg->base, reg->size);
381 	set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
382 	pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
383 }
384 
385 /*
386  * Convert memblock.memory  to a memory segment list so there is a single
387  * list that contains all memory segments.
388  */
389 static int __init vmem_convert_memory_chunk(void)
390 {
391 	struct memblock_region *reg;
392 	struct memory_segment *seg;
393 
394 	mutex_lock(&vmem_mutex);
395 	for_each_memblock(memory, reg) {
396 		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
397 		if (!seg)
398 			panic("Out of memory...\n");
399 		seg->start = reg->base;
400 		seg->size = reg->size;
401 		insert_memory_segment(seg);
402 	}
403 	mutex_unlock(&vmem_mutex);
404 	return 0;
405 }
406 
407 core_initcall(vmem_convert_memory_chunk);
408