xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision 72ce7780)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 
18 #ifdef CONFIG_PGSTE
19 
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22 
23 static struct ctl_table page_table_sysctl[] = {
24 	{
25 		.procname	= "allocate_pgste",
26 		.data		= &page_table_allocate_pgste,
27 		.maxlen		= sizeof(int),
28 		.mode		= S_IRUGO | S_IWUSR,
29 		.proc_handler	= proc_dointvec_minmax,
30 		.extra1		= SYSCTL_ZERO,
31 		.extra2		= SYSCTL_ONE,
32 	},
33 	{ }
34 };
35 
36 static struct ctl_table page_table_sysctl_dir[] = {
37 	{
38 		.procname	= "vm",
39 		.maxlen		= 0,
40 		.mode		= 0555,
41 		.child		= page_table_sysctl,
42 	},
43 	{ }
44 };
45 
46 static int __init page_table_register_sysctl(void)
47 {
48 	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49 }
50 __initcall(page_table_register_sysctl);
51 
52 #endif /* CONFIG_PGSTE */
53 
54 unsigned long *crst_table_alloc(struct mm_struct *mm)
55 {
56 	struct page *page = alloc_pages(GFP_KERNEL, 2);
57 
58 	if (!page)
59 		return NULL;
60 	arch_set_page_dat(page, 2);
61 	return (unsigned long *) page_to_phys(page);
62 }
63 
64 void crst_table_free(struct mm_struct *mm, unsigned long *table)
65 {
66 	free_pages((unsigned long) table, 2);
67 }
68 
69 static void __crst_table_upgrade(void *arg)
70 {
71 	struct mm_struct *mm = arg;
72 
73 	/* we must change all active ASCEs to avoid the creation of new TLBs */
74 	if (current->active_mm == mm) {
75 		S390_lowcore.user_asce = mm->context.asce;
76 		if (current->thread.mm_segment == USER_DS) {
77 			__ctl_load(S390_lowcore.user_asce, 1, 1);
78 			/* Mark user-ASCE present in CR1 */
79 			clear_cpu_flag(CIF_ASCE_PRIMARY);
80 		}
81 		if (current->thread.mm_segment == USER_DS_SACF) {
82 			__ctl_load(S390_lowcore.user_asce, 7, 7);
83 			/* enable_sacf_uaccess does all or nothing */
84 			WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
85 		}
86 	}
87 	__tlb_flush_local();
88 }
89 
90 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
91 {
92 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
93 	unsigned long asce_limit = mm->context.asce_limit;
94 
95 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
96 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
97 
98 	if (end <= asce_limit)
99 		return 0;
100 
101 	if (asce_limit == _REGION2_SIZE) {
102 		p4d = crst_table_alloc(mm);
103 		if (unlikely(!p4d))
104 			goto err_p4d;
105 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
106 	}
107 	if (end > _REGION1_SIZE) {
108 		pgd = crst_table_alloc(mm);
109 		if (unlikely(!pgd))
110 			goto err_pgd;
111 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
112 	}
113 
114 	spin_lock_bh(&mm->page_table_lock);
115 
116 	/*
117 	 * This routine gets called with mmap_sem lock held and there is
118 	 * no reason to optimize for the case of otherwise. However, if
119 	 * that would ever change, the below check will let us know.
120 	 */
121 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
122 
123 	if (p4d) {
124 		__pgd = (unsigned long *) mm->pgd;
125 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
126 		mm->pgd = (pgd_t *) p4d;
127 		mm->context.asce_limit = _REGION1_SIZE;
128 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
129 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
130 		mm_inc_nr_puds(mm);
131 	}
132 	if (pgd) {
133 		__pgd = (unsigned long *) mm->pgd;
134 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
135 		mm->pgd = (pgd_t *) pgd;
136 		mm->context.asce_limit = TASK_SIZE_MAX;
137 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
138 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
139 	}
140 
141 	spin_unlock_bh(&mm->page_table_lock);
142 
143 	on_each_cpu(__crst_table_upgrade, mm, 0);
144 
145 	return 0;
146 
147 err_pgd:
148 	crst_table_free(mm, p4d);
149 err_p4d:
150 	return -ENOMEM;
151 }
152 
153 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
154 {
155 	unsigned int old, new;
156 
157 	do {
158 		old = atomic_read(v);
159 		new = old ^ bits;
160 	} while (atomic_cmpxchg(v, old, new) != old);
161 	return new;
162 }
163 
164 #ifdef CONFIG_PGSTE
165 
166 struct page *page_table_alloc_pgste(struct mm_struct *mm)
167 {
168 	struct page *page;
169 	u64 *table;
170 
171 	page = alloc_page(GFP_KERNEL);
172 	if (page) {
173 		table = (u64 *)page_to_phys(page);
174 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
175 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
176 	}
177 	return page;
178 }
179 
180 void page_table_free_pgste(struct page *page)
181 {
182 	__free_page(page);
183 }
184 
185 #endif /* CONFIG_PGSTE */
186 
187 /*
188  * page table entry allocation/free routines.
189  */
190 unsigned long *page_table_alloc(struct mm_struct *mm)
191 {
192 	unsigned long *table;
193 	struct page *page;
194 	unsigned int mask, bit;
195 
196 	/* Try to get a fragment of a 4K page as a 2K page table */
197 	if (!mm_alloc_pgste(mm)) {
198 		table = NULL;
199 		spin_lock_bh(&mm->context.lock);
200 		if (!list_empty(&mm->context.pgtable_list)) {
201 			page = list_first_entry(&mm->context.pgtable_list,
202 						struct page, lru);
203 			mask = atomic_read(&page->_refcount) >> 24;
204 			mask = (mask | (mask >> 4)) & 3;
205 			if (mask != 3) {
206 				table = (unsigned long *) page_to_phys(page);
207 				bit = mask & 1;		/* =1 -> second 2K */
208 				if (bit)
209 					table += PTRS_PER_PTE;
210 				atomic_xor_bits(&page->_refcount,
211 							1U << (bit + 24));
212 				list_del(&page->lru);
213 			}
214 		}
215 		spin_unlock_bh(&mm->context.lock);
216 		if (table)
217 			return table;
218 	}
219 	/* Allocate a fresh page */
220 	page = alloc_page(GFP_KERNEL);
221 	if (!page)
222 		return NULL;
223 	if (!pgtable_pte_page_ctor(page)) {
224 		__free_page(page);
225 		return NULL;
226 	}
227 	arch_set_page_dat(page, 0);
228 	/* Initialize page table */
229 	table = (unsigned long *) page_to_phys(page);
230 	if (mm_alloc_pgste(mm)) {
231 		/* Return 4K page table with PGSTEs */
232 		atomic_xor_bits(&page->_refcount, 3 << 24);
233 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
234 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
235 	} else {
236 		/* Return the first 2K fragment of the page */
237 		atomic_xor_bits(&page->_refcount, 1 << 24);
238 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
239 		spin_lock_bh(&mm->context.lock);
240 		list_add(&page->lru, &mm->context.pgtable_list);
241 		spin_unlock_bh(&mm->context.lock);
242 	}
243 	return table;
244 }
245 
246 void page_table_free(struct mm_struct *mm, unsigned long *table)
247 {
248 	struct page *page;
249 	unsigned int bit, mask;
250 
251 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
252 	if (!mm_alloc_pgste(mm)) {
253 		/* Free 2K page table fragment of a 4K page */
254 		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
255 		spin_lock_bh(&mm->context.lock);
256 		mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
257 		mask >>= 24;
258 		if (mask & 3)
259 			list_add(&page->lru, &mm->context.pgtable_list);
260 		else
261 			list_del(&page->lru);
262 		spin_unlock_bh(&mm->context.lock);
263 		if (mask != 0)
264 			return;
265 	} else {
266 		atomic_xor_bits(&page->_refcount, 3U << 24);
267 	}
268 
269 	pgtable_pte_page_dtor(page);
270 	__free_page(page);
271 }
272 
273 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
274 			 unsigned long vmaddr)
275 {
276 	struct mm_struct *mm;
277 	struct page *page;
278 	unsigned int bit, mask;
279 
280 	mm = tlb->mm;
281 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
282 	if (mm_alloc_pgste(mm)) {
283 		gmap_unlink(mm, table, vmaddr);
284 		table = (unsigned long *) (__pa(table) | 3);
285 		tlb_remove_table(tlb, table);
286 		return;
287 	}
288 	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
289 	spin_lock_bh(&mm->context.lock);
290 	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
291 	mask >>= 24;
292 	if (mask & 3)
293 		list_add_tail(&page->lru, &mm->context.pgtable_list);
294 	else
295 		list_del(&page->lru);
296 	spin_unlock_bh(&mm->context.lock);
297 	table = (unsigned long *) (__pa(table) | (1U << bit));
298 	tlb_remove_table(tlb, table);
299 }
300 
301 void __tlb_remove_table(void *_table)
302 {
303 	unsigned int mask = (unsigned long) _table & 3;
304 	void *table = (void *)((unsigned long) _table ^ mask);
305 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
306 
307 	switch (mask) {
308 	case 0:		/* pmd, pud, or p4d */
309 		free_pages((unsigned long) table, 2);
310 		break;
311 	case 1:		/* lower 2K of a 4K page table */
312 	case 2:		/* higher 2K of a 4K page table */
313 		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
314 		mask >>= 24;
315 		if (mask != 0)
316 			break;
317 		fallthrough;
318 	case 3:		/* 4K page table with pgstes */
319 		if (mask & 3)
320 			atomic_xor_bits(&page->_refcount, 3 << 24);
321 		pgtable_pte_page_dtor(page);
322 		__free_page(page);
323 		break;
324 	}
325 }
326 
327 /*
328  * Base infrastructure required to generate basic asces, region, segment,
329  * and page tables that do not make use of enhanced features like EDAT1.
330  */
331 
332 static struct kmem_cache *base_pgt_cache;
333 
334 static unsigned long base_pgt_alloc(void)
335 {
336 	u64 *table;
337 
338 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
339 	if (table)
340 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
341 	return (unsigned long) table;
342 }
343 
344 static void base_pgt_free(unsigned long table)
345 {
346 	kmem_cache_free(base_pgt_cache, (void *) table);
347 }
348 
349 static unsigned long base_crst_alloc(unsigned long val)
350 {
351 	unsigned long table;
352 
353 	table =	 __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
354 	if (table)
355 		crst_table_init((unsigned long *)table, val);
356 	return table;
357 }
358 
359 static void base_crst_free(unsigned long table)
360 {
361 	free_pages(table, CRST_ALLOC_ORDER);
362 }
363 
364 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
365 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
366 						   unsigned long end)	\
367 {									\
368 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
369 									\
370 	return (next - 1) < (end - 1) ? next : end;			\
371 }
372 
373 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
374 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
375 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
376 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
377 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
378 
379 static inline unsigned long base_lra(unsigned long address)
380 {
381 	unsigned long real;
382 
383 	asm volatile(
384 		"	lra	%0,0(%1)\n"
385 		: "=d" (real) : "a" (address) : "cc");
386 	return real;
387 }
388 
389 static int base_page_walk(unsigned long origin, unsigned long addr,
390 			  unsigned long end, int alloc)
391 {
392 	unsigned long *pte, next;
393 
394 	if (!alloc)
395 		return 0;
396 	pte = (unsigned long *) origin;
397 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
398 	do {
399 		next = base_page_addr_end(addr, end);
400 		*pte = base_lra(addr);
401 	} while (pte++, addr = next, addr < end);
402 	return 0;
403 }
404 
405 static int base_segment_walk(unsigned long origin, unsigned long addr,
406 			     unsigned long end, int alloc)
407 {
408 	unsigned long *ste, next, table;
409 	int rc;
410 
411 	ste = (unsigned long *) origin;
412 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
413 	do {
414 		next = base_segment_addr_end(addr, end);
415 		if (*ste & _SEGMENT_ENTRY_INVALID) {
416 			if (!alloc)
417 				continue;
418 			table = base_pgt_alloc();
419 			if (!table)
420 				return -ENOMEM;
421 			*ste = table | _SEGMENT_ENTRY;
422 		}
423 		table = *ste & _SEGMENT_ENTRY_ORIGIN;
424 		rc = base_page_walk(table, addr, next, alloc);
425 		if (rc)
426 			return rc;
427 		if (!alloc)
428 			base_pgt_free(table);
429 		cond_resched();
430 	} while (ste++, addr = next, addr < end);
431 	return 0;
432 }
433 
434 static int base_region3_walk(unsigned long origin, unsigned long addr,
435 			     unsigned long end, int alloc)
436 {
437 	unsigned long *rtte, next, table;
438 	int rc;
439 
440 	rtte = (unsigned long *) origin;
441 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
442 	do {
443 		next = base_region3_addr_end(addr, end);
444 		if (*rtte & _REGION_ENTRY_INVALID) {
445 			if (!alloc)
446 				continue;
447 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
448 			if (!table)
449 				return -ENOMEM;
450 			*rtte = table | _REGION3_ENTRY;
451 		}
452 		table = *rtte & _REGION_ENTRY_ORIGIN;
453 		rc = base_segment_walk(table, addr, next, alloc);
454 		if (rc)
455 			return rc;
456 		if (!alloc)
457 			base_crst_free(table);
458 	} while (rtte++, addr = next, addr < end);
459 	return 0;
460 }
461 
462 static int base_region2_walk(unsigned long origin, unsigned long addr,
463 			     unsigned long end, int alloc)
464 {
465 	unsigned long *rste, next, table;
466 	int rc;
467 
468 	rste = (unsigned long *) origin;
469 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
470 	do {
471 		next = base_region2_addr_end(addr, end);
472 		if (*rste & _REGION_ENTRY_INVALID) {
473 			if (!alloc)
474 				continue;
475 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
476 			if (!table)
477 				return -ENOMEM;
478 			*rste = table | _REGION2_ENTRY;
479 		}
480 		table = *rste & _REGION_ENTRY_ORIGIN;
481 		rc = base_region3_walk(table, addr, next, alloc);
482 		if (rc)
483 			return rc;
484 		if (!alloc)
485 			base_crst_free(table);
486 	} while (rste++, addr = next, addr < end);
487 	return 0;
488 }
489 
490 static int base_region1_walk(unsigned long origin, unsigned long addr,
491 			     unsigned long end, int alloc)
492 {
493 	unsigned long *rfte, next, table;
494 	int rc;
495 
496 	rfte = (unsigned long *) origin;
497 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
498 	do {
499 		next = base_region1_addr_end(addr, end);
500 		if (*rfte & _REGION_ENTRY_INVALID) {
501 			if (!alloc)
502 				continue;
503 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
504 			if (!table)
505 				return -ENOMEM;
506 			*rfte = table | _REGION1_ENTRY;
507 		}
508 		table = *rfte & _REGION_ENTRY_ORIGIN;
509 		rc = base_region2_walk(table, addr, next, alloc);
510 		if (rc)
511 			return rc;
512 		if (!alloc)
513 			base_crst_free(table);
514 	} while (rfte++, addr = next, addr < end);
515 	return 0;
516 }
517 
518 /**
519  * base_asce_free - free asce and tables returned from base_asce_alloc()
520  * @asce: asce to be freed
521  *
522  * Frees all region, segment, and page tables that were allocated with a
523  * corresponding base_asce_alloc() call.
524  */
525 void base_asce_free(unsigned long asce)
526 {
527 	unsigned long table = asce & _ASCE_ORIGIN;
528 
529 	if (!asce)
530 		return;
531 	switch (asce & _ASCE_TYPE_MASK) {
532 	case _ASCE_TYPE_SEGMENT:
533 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
534 		break;
535 	case _ASCE_TYPE_REGION3:
536 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
537 		break;
538 	case _ASCE_TYPE_REGION2:
539 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
540 		break;
541 	case _ASCE_TYPE_REGION1:
542 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
543 		break;
544 	}
545 	base_crst_free(table);
546 }
547 
548 static int base_pgt_cache_init(void)
549 {
550 	static DEFINE_MUTEX(base_pgt_cache_mutex);
551 	unsigned long sz = _PAGE_TABLE_SIZE;
552 
553 	if (base_pgt_cache)
554 		return 0;
555 	mutex_lock(&base_pgt_cache_mutex);
556 	if (!base_pgt_cache)
557 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
558 	mutex_unlock(&base_pgt_cache_mutex);
559 	return base_pgt_cache ? 0 : -ENOMEM;
560 }
561 
562 /**
563  * base_asce_alloc - create kernel mapping without enhanced DAT features
564  * @addr: virtual start address of kernel mapping
565  * @num_pages: number of consecutive pages
566  *
567  * Generate an asce, including all required region, segment and page tables,
568  * that can be used to access the virtual kernel mapping. The difference is
569  * that the returned asce does not make use of any enhanced DAT features like
570  * e.g. large pages. This is required for some I/O functions that pass an
571  * asce, like e.g. some service call requests.
572  *
573  * Note: the returned asce may NEVER be attached to any cpu. It may only be
574  *	 used for I/O requests. tlb entries that might result because the
575  *	 asce was attached to a cpu won't be cleared.
576  */
577 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
578 {
579 	unsigned long asce, table, end;
580 	int rc;
581 
582 	if (base_pgt_cache_init())
583 		return 0;
584 	end = addr + num_pages * PAGE_SIZE;
585 	if (end <= _REGION3_SIZE) {
586 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
587 		if (!table)
588 			return 0;
589 		rc = base_segment_walk(table, addr, end, 1);
590 		asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
591 	} else if (end <= _REGION2_SIZE) {
592 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
593 		if (!table)
594 			return 0;
595 		rc = base_region3_walk(table, addr, end, 1);
596 		asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
597 	} else if (end <= _REGION1_SIZE) {
598 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
599 		if (!table)
600 			return 0;
601 		rc = base_region2_walk(table, addr, end, 1);
602 		asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
603 	} else {
604 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
605 		if (!table)
606 			return 0;
607 		rc = base_region1_walk(table, addr, end, 1);
608 		asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
609 	}
610 	if (rc) {
611 		base_asce_free(asce);
612 		asce = 0;
613 	}
614 	return asce;
615 }
616