xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision 3ddc8b84)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 
18 #ifdef CONFIG_PGSTE
19 
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22 
23 static struct ctl_table page_table_sysctl[] = {
24 	{
25 		.procname	= "allocate_pgste",
26 		.data		= &page_table_allocate_pgste,
27 		.maxlen		= sizeof(int),
28 		.mode		= S_IRUGO | S_IWUSR,
29 		.proc_handler	= proc_dointvec_minmax,
30 		.extra1		= SYSCTL_ZERO,
31 		.extra2		= SYSCTL_ONE,
32 	},
33 	{ }
34 };
35 
36 static int __init page_table_register_sysctl(void)
37 {
38 	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39 }
40 __initcall(page_table_register_sysctl);
41 
42 #endif /* CONFIG_PGSTE */
43 
44 unsigned long *crst_table_alloc(struct mm_struct *mm)
45 {
46 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
47 
48 	if (!ptdesc)
49 		return NULL;
50 	arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER);
51 	return (unsigned long *) ptdesc_to_virt(ptdesc);
52 }
53 
54 void crst_table_free(struct mm_struct *mm, unsigned long *table)
55 {
56 	pagetable_free(virt_to_ptdesc(table));
57 }
58 
59 static void __crst_table_upgrade(void *arg)
60 {
61 	struct mm_struct *mm = arg;
62 
63 	/* change all active ASCEs to avoid the creation of new TLBs */
64 	if (current->active_mm == mm) {
65 		S390_lowcore.user_asce = mm->context.asce;
66 		__ctl_load(S390_lowcore.user_asce, 7, 7);
67 	}
68 	__tlb_flush_local();
69 }
70 
71 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
72 {
73 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
74 	unsigned long asce_limit = mm->context.asce_limit;
75 
76 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
77 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
78 
79 	if (end <= asce_limit)
80 		return 0;
81 
82 	if (asce_limit == _REGION2_SIZE) {
83 		p4d = crst_table_alloc(mm);
84 		if (unlikely(!p4d))
85 			goto err_p4d;
86 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
87 	}
88 	if (end > _REGION1_SIZE) {
89 		pgd = crst_table_alloc(mm);
90 		if (unlikely(!pgd))
91 			goto err_pgd;
92 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
93 	}
94 
95 	spin_lock_bh(&mm->page_table_lock);
96 
97 	/*
98 	 * This routine gets called with mmap_lock lock held and there is
99 	 * no reason to optimize for the case of otherwise. However, if
100 	 * that would ever change, the below check will let us know.
101 	 */
102 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
103 
104 	if (p4d) {
105 		__pgd = (unsigned long *) mm->pgd;
106 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
107 		mm->pgd = (pgd_t *) p4d;
108 		mm->context.asce_limit = _REGION1_SIZE;
109 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
111 		mm_inc_nr_puds(mm);
112 	}
113 	if (pgd) {
114 		__pgd = (unsigned long *) mm->pgd;
115 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
116 		mm->pgd = (pgd_t *) pgd;
117 		mm->context.asce_limit = TASK_SIZE_MAX;
118 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
119 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
120 	}
121 
122 	spin_unlock_bh(&mm->page_table_lock);
123 
124 	on_each_cpu(__crst_table_upgrade, mm, 0);
125 
126 	return 0;
127 
128 err_pgd:
129 	crst_table_free(mm, p4d);
130 err_p4d:
131 	return -ENOMEM;
132 }
133 
134 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
135 {
136 	return atomic_fetch_xor(bits, v) ^ bits;
137 }
138 
139 #ifdef CONFIG_PGSTE
140 
141 struct page *page_table_alloc_pgste(struct mm_struct *mm)
142 {
143 	struct ptdesc *ptdesc;
144 	u64 *table;
145 
146 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
147 	if (ptdesc) {
148 		table = (u64 *)ptdesc_to_virt(ptdesc);
149 		arch_set_page_dat(virt_to_page(table), 0);
150 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
151 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
152 	}
153 	return ptdesc_page(ptdesc);
154 }
155 
156 void page_table_free_pgste(struct page *page)
157 {
158 	pagetable_free(page_ptdesc(page));
159 }
160 
161 #endif /* CONFIG_PGSTE */
162 
163 /*
164  * A 2KB-pgtable is either upper or lower half of a normal page.
165  * The second half of the page may be unused or used as another
166  * 2KB-pgtable.
167  *
168  * Whenever possible the parent page for a new 2KB-pgtable is picked
169  * from the list of partially allocated pages mm_context_t::pgtable_list.
170  * In case the list is empty a new parent page is allocated and added to
171  * the list.
172  *
173  * When a parent page gets fully allocated it contains 2KB-pgtables in both
174  * upper and lower halves and is removed from mm_context_t::pgtable_list.
175  *
176  * When 2KB-pgtable is freed from to fully allocated parent page that
177  * page turns partially allocated and added to mm_context_t::pgtable_list.
178  *
179  * If 2KB-pgtable is freed from the partially allocated parent page that
180  * page turns unused and gets removed from mm_context_t::pgtable_list.
181  * Furthermore, the unused parent page is released.
182  *
183  * As follows from the above, no unallocated or fully allocated parent
184  * pages are contained in mm_context_t::pgtable_list.
185  *
186  * The upper byte (bits 24-31) of the parent page _refcount is used
187  * for tracking contained 2KB-pgtables and has the following format:
188  *
189  *   PP  AA
190  * 01234567    upper byte (bits 24-31) of struct page::_refcount
191  *   ||  ||
192  *   ||  |+--- upper 2KB-pgtable is allocated
193  *   ||  +---- lower 2KB-pgtable is allocated
194  *   |+------- upper 2KB-pgtable is pending for removal
195  *   +-------- lower 2KB-pgtable is pending for removal
196  *
197  * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
198  * using _refcount is possible).
199  *
200  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
201  * The parent page is either:
202  *   - added to mm_context_t::pgtable_list in case the second half of the
203  *     parent page is still unallocated;
204  *   - removed from mm_context_t::pgtable_list in case both hales of the
205  *     parent page are allocated;
206  * These operations are protected with mm_context_t::lock.
207  *
208  * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
209  * and the corresponding PP bit is set to 1 in a single atomic operation.
210  * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
211  * exclusive and may never be both set to 1!
212  * The parent page is either:
213  *   - added to mm_context_t::pgtable_list in case the second half of the
214  *     parent page is still allocated;
215  *   - removed from mm_context_t::pgtable_list in case the second half of
216  *     the parent page is unallocated;
217  * These operations are protected with mm_context_t::lock.
218  *
219  * It is important to understand that mm_context_t::lock only protects
220  * mm_context_t::pgtable_list and AA bits, but not the parent page itself
221  * and PP bits.
222  *
223  * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
224  * while both AA bits and the second PP bit are already unset. Then the
225  * parent page does not contain any 2KB-pgtable fragment anymore, and it has
226  * also been removed from mm_context_t::pgtable_list. It is safe to release
227  * the page therefore.
228  *
229  * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
230  * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
231  * while the PP bits are never used, nor such a page is added to or removed
232  * from mm_context_t::pgtable_list.
233  *
234  * pte_free_defer() overrides those rules: it takes the page off pgtable_list,
235  * and prevents both 2K fragments from being reused. pte_free_defer() has to
236  * guarantee that its pgtable cannot be reused before the RCU grace period
237  * has elapsed (which page_table_free_rcu() does not actually guarantee).
238  * But for simplicity, because page->rcu_head overlays page->lru, and because
239  * the RCU callback might not be called before the mm_context_t has been freed,
240  * pte_free_defer() in this implementation prevents both fragments from being
241  * reused, and delays making the call to RCU until both fragments are freed.
242  */
243 unsigned long *page_table_alloc(struct mm_struct *mm)
244 {
245 	unsigned long *table;
246 	struct ptdesc *ptdesc;
247 	unsigned int mask, bit;
248 
249 	/* Try to get a fragment of a 4K page as a 2K page table */
250 	if (!mm_alloc_pgste(mm)) {
251 		table = NULL;
252 		spin_lock_bh(&mm->context.lock);
253 		if (!list_empty(&mm->context.pgtable_list)) {
254 			ptdesc = list_first_entry(&mm->context.pgtable_list,
255 						struct ptdesc, pt_list);
256 			mask = atomic_read(&ptdesc->_refcount) >> 24;
257 			/*
258 			 * The pending removal bits must also be checked.
259 			 * Failure to do so might lead to an impossible
260 			 * value of (i.e 0x13 or 0x23) written to _refcount.
261 			 * Such values violate the assumption that pending and
262 			 * allocation bits are mutually exclusive, and the rest
263 			 * of the code unrails as result. That could lead to
264 			 * a whole bunch of races and corruptions.
265 			 */
266 			mask = (mask | (mask >> 4)) & 0x03U;
267 			if (mask != 0x03U) {
268 				table = (unsigned long *) ptdesc_to_virt(ptdesc);
269 				bit = mask & 1;		/* =1 -> second 2K */
270 				if (bit)
271 					table += PTRS_PER_PTE;
272 				atomic_xor_bits(&ptdesc->_refcount,
273 							0x01U << (bit + 24));
274 				list_del_init(&ptdesc->pt_list);
275 			}
276 		}
277 		spin_unlock_bh(&mm->context.lock);
278 		if (table)
279 			return table;
280 	}
281 	/* Allocate a fresh page */
282 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
283 	if (!ptdesc)
284 		return NULL;
285 	if (!pagetable_pte_ctor(ptdesc)) {
286 		pagetable_free(ptdesc);
287 		return NULL;
288 	}
289 	arch_set_page_dat(ptdesc_page(ptdesc), 0);
290 	/* Initialize page table */
291 	table = (unsigned long *) ptdesc_to_virt(ptdesc);
292 	if (mm_alloc_pgste(mm)) {
293 		/* Return 4K page table with PGSTEs */
294 		INIT_LIST_HEAD(&ptdesc->pt_list);
295 		atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
296 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
297 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
298 	} else {
299 		/* Return the first 2K fragment of the page */
300 		atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24);
301 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
302 		spin_lock_bh(&mm->context.lock);
303 		list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
304 		spin_unlock_bh(&mm->context.lock);
305 	}
306 	return table;
307 }
308 
309 static void page_table_release_check(struct page *page, void *table,
310 				     unsigned int half, unsigned int mask)
311 {
312 	char msg[128];
313 
314 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
315 		return;
316 	if (!mask && list_empty(&page->lru))
317 		return;
318 	snprintf(msg, sizeof(msg),
319 		 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
320 		 table, half, mask);
321 	dump_page(page, msg);
322 }
323 
324 static void pte_free_now(struct rcu_head *head)
325 {
326 	struct ptdesc *ptdesc;
327 
328 	ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
329 	pagetable_pte_dtor(ptdesc);
330 	pagetable_free(ptdesc);
331 }
332 
333 void page_table_free(struct mm_struct *mm, unsigned long *table)
334 {
335 	unsigned int mask, bit, half;
336 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
337 
338 	if (!mm_alloc_pgste(mm)) {
339 		/* Free 2K page table fragment of a 4K page */
340 		bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
341 		spin_lock_bh(&mm->context.lock);
342 		/*
343 		 * Mark the page for delayed release. The actual release
344 		 * will happen outside of the critical section from this
345 		 * function or from __tlb_remove_table()
346 		 */
347 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
348 		mask >>= 24;
349 		if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
350 			/*
351 			 * Other half is allocated, and neither half has had
352 			 * its free deferred: add page to head of list, to make
353 			 * this freed half available for immediate reuse.
354 			 */
355 			list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
356 		} else {
357 			/* If page is on list, now remove it. */
358 			list_del_init(&ptdesc->pt_list);
359 		}
360 		spin_unlock_bh(&mm->context.lock);
361 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24));
362 		mask >>= 24;
363 		if (mask != 0x00U)
364 			return;
365 		half = 0x01U << bit;
366 	} else {
367 		half = 0x03U;
368 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
369 		mask >>= 24;
370 	}
371 
372 	page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
373 	if (folio_test_clear_active(ptdesc_folio(ptdesc)))
374 		call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
375 	else
376 		pte_free_now(&ptdesc->pt_rcu_head);
377 }
378 
379 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
380 			 unsigned long vmaddr)
381 {
382 	struct mm_struct *mm;
383 	unsigned int bit, mask;
384 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
385 
386 	mm = tlb->mm;
387 	if (mm_alloc_pgste(mm)) {
388 		gmap_unlink(mm, table, vmaddr);
389 		table = (unsigned long *) ((unsigned long)table | 0x03U);
390 		tlb_remove_ptdesc(tlb, table);
391 		return;
392 	}
393 	bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
394 	spin_lock_bh(&mm->context.lock);
395 	/*
396 	 * Mark the page for delayed release. The actual release will happen
397 	 * outside of the critical section from __tlb_remove_table() or from
398 	 * page_table_free()
399 	 */
400 	mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
401 	mask >>= 24;
402 	if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
403 		/*
404 		 * Other half is allocated, and neither half has had
405 		 * its free deferred: add page to end of list, to make
406 		 * this freed half available for reuse once its pending
407 		 * bit has been cleared by __tlb_remove_table().
408 		 */
409 		list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list);
410 	} else {
411 		/* If page is on list, now remove it. */
412 		list_del_init(&ptdesc->pt_list);
413 	}
414 	spin_unlock_bh(&mm->context.lock);
415 	table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
416 	tlb_remove_table(tlb, table);
417 }
418 
419 void __tlb_remove_table(void *_table)
420 {
421 	unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
422 	void *table = (void *)((unsigned long) _table ^ mask);
423 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
424 
425 	switch (half) {
426 	case 0x00U:	/* pmd, pud, or p4d */
427 		pagetable_free(ptdesc);
428 		return;
429 	case 0x01U:	/* lower 2K of a 4K page table */
430 	case 0x02U:	/* higher 2K of a 4K page table */
431 		mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24));
432 		mask >>= 24;
433 		if (mask != 0x00U)
434 			return;
435 		break;
436 	case 0x03U:	/* 4K page table with pgstes */
437 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
438 		mask >>= 24;
439 		break;
440 	}
441 
442 	page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
443 	if (folio_test_clear_active(ptdesc_folio(ptdesc)))
444 		call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
445 	else
446 		pte_free_now(&ptdesc->pt_rcu_head);
447 }
448 
449 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
450 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
451 {
452 	struct page *page;
453 
454 	page = virt_to_page(pgtable);
455 	SetPageActive(page);
456 	page_table_free(mm, (unsigned long *)pgtable);
457 	/*
458 	 * page_table_free() does not do the pgste gmap_unlink() which
459 	 * page_table_free_rcu() does: warn us if pgste ever reaches here.
460 	 */
461 	WARN_ON_ONCE(mm_has_pgste(mm));
462 }
463 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
464 
465 /*
466  * Base infrastructure required to generate basic asces, region, segment,
467  * and page tables that do not make use of enhanced features like EDAT1.
468  */
469 
470 static struct kmem_cache *base_pgt_cache;
471 
472 static unsigned long *base_pgt_alloc(void)
473 {
474 	unsigned long *table;
475 
476 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
477 	if (table)
478 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
479 	return table;
480 }
481 
482 static void base_pgt_free(unsigned long *table)
483 {
484 	kmem_cache_free(base_pgt_cache, table);
485 }
486 
487 static unsigned long *base_crst_alloc(unsigned long val)
488 {
489 	unsigned long *table;
490 	struct ptdesc *ptdesc;
491 
492 	ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER);
493 	if (!ptdesc)
494 		return NULL;
495 	table = ptdesc_address(ptdesc);
496 
497 	crst_table_init(table, val);
498 	return table;
499 }
500 
501 static void base_crst_free(unsigned long *table)
502 {
503 	pagetable_free(virt_to_ptdesc(table));
504 }
505 
506 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
507 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
508 						   unsigned long end)	\
509 {									\
510 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
511 									\
512 	return (next - 1) < (end - 1) ? next : end;			\
513 }
514 
515 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
516 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
517 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
518 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
519 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
520 
521 static inline unsigned long base_lra(unsigned long address)
522 {
523 	unsigned long real;
524 
525 	asm volatile(
526 		"	lra	%0,0(%1)\n"
527 		: "=d" (real) : "a" (address) : "cc");
528 	return real;
529 }
530 
531 static int base_page_walk(unsigned long *origin, unsigned long addr,
532 			  unsigned long end, int alloc)
533 {
534 	unsigned long *pte, next;
535 
536 	if (!alloc)
537 		return 0;
538 	pte = origin;
539 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
540 	do {
541 		next = base_page_addr_end(addr, end);
542 		*pte = base_lra(addr);
543 	} while (pte++, addr = next, addr < end);
544 	return 0;
545 }
546 
547 static int base_segment_walk(unsigned long *origin, unsigned long addr,
548 			     unsigned long end, int alloc)
549 {
550 	unsigned long *ste, next, *table;
551 	int rc;
552 
553 	ste = origin;
554 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
555 	do {
556 		next = base_segment_addr_end(addr, end);
557 		if (*ste & _SEGMENT_ENTRY_INVALID) {
558 			if (!alloc)
559 				continue;
560 			table = base_pgt_alloc();
561 			if (!table)
562 				return -ENOMEM;
563 			*ste = __pa(table) | _SEGMENT_ENTRY;
564 		}
565 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
566 		rc = base_page_walk(table, addr, next, alloc);
567 		if (rc)
568 			return rc;
569 		if (!alloc)
570 			base_pgt_free(table);
571 		cond_resched();
572 	} while (ste++, addr = next, addr < end);
573 	return 0;
574 }
575 
576 static int base_region3_walk(unsigned long *origin, unsigned long addr,
577 			     unsigned long end, int alloc)
578 {
579 	unsigned long *rtte, next, *table;
580 	int rc;
581 
582 	rtte = origin;
583 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
584 	do {
585 		next = base_region3_addr_end(addr, end);
586 		if (*rtte & _REGION_ENTRY_INVALID) {
587 			if (!alloc)
588 				continue;
589 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
590 			if (!table)
591 				return -ENOMEM;
592 			*rtte = __pa(table) | _REGION3_ENTRY;
593 		}
594 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
595 		rc = base_segment_walk(table, addr, next, alloc);
596 		if (rc)
597 			return rc;
598 		if (!alloc)
599 			base_crst_free(table);
600 	} while (rtte++, addr = next, addr < end);
601 	return 0;
602 }
603 
604 static int base_region2_walk(unsigned long *origin, unsigned long addr,
605 			     unsigned long end, int alloc)
606 {
607 	unsigned long *rste, next, *table;
608 	int rc;
609 
610 	rste = origin;
611 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
612 	do {
613 		next = base_region2_addr_end(addr, end);
614 		if (*rste & _REGION_ENTRY_INVALID) {
615 			if (!alloc)
616 				continue;
617 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
618 			if (!table)
619 				return -ENOMEM;
620 			*rste = __pa(table) | _REGION2_ENTRY;
621 		}
622 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
623 		rc = base_region3_walk(table, addr, next, alloc);
624 		if (rc)
625 			return rc;
626 		if (!alloc)
627 			base_crst_free(table);
628 	} while (rste++, addr = next, addr < end);
629 	return 0;
630 }
631 
632 static int base_region1_walk(unsigned long *origin, unsigned long addr,
633 			     unsigned long end, int alloc)
634 {
635 	unsigned long *rfte, next, *table;
636 	int rc;
637 
638 	rfte = origin;
639 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
640 	do {
641 		next = base_region1_addr_end(addr, end);
642 		if (*rfte & _REGION_ENTRY_INVALID) {
643 			if (!alloc)
644 				continue;
645 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
646 			if (!table)
647 				return -ENOMEM;
648 			*rfte = __pa(table) | _REGION1_ENTRY;
649 		}
650 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
651 		rc = base_region2_walk(table, addr, next, alloc);
652 		if (rc)
653 			return rc;
654 		if (!alloc)
655 			base_crst_free(table);
656 	} while (rfte++, addr = next, addr < end);
657 	return 0;
658 }
659 
660 /**
661  * base_asce_free - free asce and tables returned from base_asce_alloc()
662  * @asce: asce to be freed
663  *
664  * Frees all region, segment, and page tables that were allocated with a
665  * corresponding base_asce_alloc() call.
666  */
667 void base_asce_free(unsigned long asce)
668 {
669 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
670 
671 	if (!asce)
672 		return;
673 	switch (asce & _ASCE_TYPE_MASK) {
674 	case _ASCE_TYPE_SEGMENT:
675 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
676 		break;
677 	case _ASCE_TYPE_REGION3:
678 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
679 		break;
680 	case _ASCE_TYPE_REGION2:
681 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
682 		break;
683 	case _ASCE_TYPE_REGION1:
684 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
685 		break;
686 	}
687 	base_crst_free(table);
688 }
689 
690 static int base_pgt_cache_init(void)
691 {
692 	static DEFINE_MUTEX(base_pgt_cache_mutex);
693 	unsigned long sz = _PAGE_TABLE_SIZE;
694 
695 	if (base_pgt_cache)
696 		return 0;
697 	mutex_lock(&base_pgt_cache_mutex);
698 	if (!base_pgt_cache)
699 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
700 	mutex_unlock(&base_pgt_cache_mutex);
701 	return base_pgt_cache ? 0 : -ENOMEM;
702 }
703 
704 /**
705  * base_asce_alloc - create kernel mapping without enhanced DAT features
706  * @addr: virtual start address of kernel mapping
707  * @num_pages: number of consecutive pages
708  *
709  * Generate an asce, including all required region, segment and page tables,
710  * that can be used to access the virtual kernel mapping. The difference is
711  * that the returned asce does not make use of any enhanced DAT features like
712  * e.g. large pages. This is required for some I/O functions that pass an
713  * asce, like e.g. some service call requests.
714  *
715  * Note: the returned asce may NEVER be attached to any cpu. It may only be
716  *	 used for I/O requests. tlb entries that might result because the
717  *	 asce was attached to a cpu won't be cleared.
718  */
719 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
720 {
721 	unsigned long asce, *table, end;
722 	int rc;
723 
724 	if (base_pgt_cache_init())
725 		return 0;
726 	end = addr + num_pages * PAGE_SIZE;
727 	if (end <= _REGION3_SIZE) {
728 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
729 		if (!table)
730 			return 0;
731 		rc = base_segment_walk(table, addr, end, 1);
732 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
733 	} else if (end <= _REGION2_SIZE) {
734 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
735 		if (!table)
736 			return 0;
737 		rc = base_region3_walk(table, addr, end, 1);
738 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
739 	} else if (end <= _REGION1_SIZE) {
740 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
741 		if (!table)
742 			return 0;
743 		rc = base_region2_walk(table, addr, end, 1);
744 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
745 	} else {
746 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
747 		if (!table)
748 			return 0;
749 		rc = base_region1_walk(table, addr, end, 1);
750 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
751 	}
752 	if (rc) {
753 		base_asce_free(asce);
754 		asce = 0;
755 	}
756 	return asce;
757 }
758