xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision 2ae1beb3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 
18 #ifdef CONFIG_PGSTE
19 
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22 
23 static struct ctl_table page_table_sysctl[] = {
24 	{
25 		.procname	= "allocate_pgste",
26 		.data		= &page_table_allocate_pgste,
27 		.maxlen		= sizeof(int),
28 		.mode		= S_IRUGO | S_IWUSR,
29 		.proc_handler	= proc_dointvec_minmax,
30 		.extra1		= SYSCTL_ZERO,
31 		.extra2		= SYSCTL_ONE,
32 	},
33 	{ }
34 };
35 
36 static int __init page_table_register_sysctl(void)
37 {
38 	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39 }
40 __initcall(page_table_register_sysctl);
41 
42 #endif /* CONFIG_PGSTE */
43 
44 unsigned long *crst_table_alloc(struct mm_struct *mm)
45 {
46 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
47 
48 	if (!ptdesc)
49 		return NULL;
50 	arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER);
51 	return (unsigned long *) ptdesc_to_virt(ptdesc);
52 }
53 
54 void crst_table_free(struct mm_struct *mm, unsigned long *table)
55 {
56 	if (!table)
57 		return;
58 	pagetable_free(virt_to_ptdesc(table));
59 }
60 
61 static void __crst_table_upgrade(void *arg)
62 {
63 	struct mm_struct *mm = arg;
64 
65 	/* change all active ASCEs to avoid the creation of new TLBs */
66 	if (current->active_mm == mm) {
67 		S390_lowcore.user_asce = mm->context.asce;
68 		__ctl_load(S390_lowcore.user_asce, 7, 7);
69 	}
70 	__tlb_flush_local();
71 }
72 
73 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
74 {
75 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
76 	unsigned long asce_limit = mm->context.asce_limit;
77 
78 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
79 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
80 
81 	if (end <= asce_limit)
82 		return 0;
83 
84 	if (asce_limit == _REGION2_SIZE) {
85 		p4d = crst_table_alloc(mm);
86 		if (unlikely(!p4d))
87 			goto err_p4d;
88 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
89 	}
90 	if (end > _REGION1_SIZE) {
91 		pgd = crst_table_alloc(mm);
92 		if (unlikely(!pgd))
93 			goto err_pgd;
94 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
95 	}
96 
97 	spin_lock_bh(&mm->page_table_lock);
98 
99 	/*
100 	 * This routine gets called with mmap_lock lock held and there is
101 	 * no reason to optimize for the case of otherwise. However, if
102 	 * that would ever change, the below check will let us know.
103 	 */
104 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
105 
106 	if (p4d) {
107 		__pgd = (unsigned long *) mm->pgd;
108 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
109 		mm->pgd = (pgd_t *) p4d;
110 		mm->context.asce_limit = _REGION1_SIZE;
111 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
112 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
113 		mm_inc_nr_puds(mm);
114 	}
115 	if (pgd) {
116 		__pgd = (unsigned long *) mm->pgd;
117 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
118 		mm->pgd = (pgd_t *) pgd;
119 		mm->context.asce_limit = TASK_SIZE_MAX;
120 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
121 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
122 	}
123 
124 	spin_unlock_bh(&mm->page_table_lock);
125 
126 	on_each_cpu(__crst_table_upgrade, mm, 0);
127 
128 	return 0;
129 
130 err_pgd:
131 	crst_table_free(mm, p4d);
132 err_p4d:
133 	return -ENOMEM;
134 }
135 
136 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
137 {
138 	return atomic_fetch_xor(bits, v) ^ bits;
139 }
140 
141 #ifdef CONFIG_PGSTE
142 
143 struct page *page_table_alloc_pgste(struct mm_struct *mm)
144 {
145 	struct ptdesc *ptdesc;
146 	u64 *table;
147 
148 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
149 	if (ptdesc) {
150 		table = (u64 *)ptdesc_to_virt(ptdesc);
151 		arch_set_page_dat(virt_to_page(table), 0);
152 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
153 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
154 	}
155 	return ptdesc_page(ptdesc);
156 }
157 
158 void page_table_free_pgste(struct page *page)
159 {
160 	pagetable_free(page_ptdesc(page));
161 }
162 
163 #endif /* CONFIG_PGSTE */
164 
165 /*
166  * A 2KB-pgtable is either upper or lower half of a normal page.
167  * The second half of the page may be unused or used as another
168  * 2KB-pgtable.
169  *
170  * Whenever possible the parent page for a new 2KB-pgtable is picked
171  * from the list of partially allocated pages mm_context_t::pgtable_list.
172  * In case the list is empty a new parent page is allocated and added to
173  * the list.
174  *
175  * When a parent page gets fully allocated it contains 2KB-pgtables in both
176  * upper and lower halves and is removed from mm_context_t::pgtable_list.
177  *
178  * When 2KB-pgtable is freed from to fully allocated parent page that
179  * page turns partially allocated and added to mm_context_t::pgtable_list.
180  *
181  * If 2KB-pgtable is freed from the partially allocated parent page that
182  * page turns unused and gets removed from mm_context_t::pgtable_list.
183  * Furthermore, the unused parent page is released.
184  *
185  * As follows from the above, no unallocated or fully allocated parent
186  * pages are contained in mm_context_t::pgtable_list.
187  *
188  * The upper byte (bits 24-31) of the parent page _refcount is used
189  * for tracking contained 2KB-pgtables and has the following format:
190  *
191  *   PP  AA
192  * 01234567    upper byte (bits 24-31) of struct page::_refcount
193  *   ||  ||
194  *   ||  |+--- upper 2KB-pgtable is allocated
195  *   ||  +---- lower 2KB-pgtable is allocated
196  *   |+------- upper 2KB-pgtable is pending for removal
197  *   +-------- lower 2KB-pgtable is pending for removal
198  *
199  * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
200  * using _refcount is possible).
201  *
202  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
203  * The parent page is either:
204  *   - added to mm_context_t::pgtable_list in case the second half of the
205  *     parent page is still unallocated;
206  *   - removed from mm_context_t::pgtable_list in case both hales of the
207  *     parent page are allocated;
208  * These operations are protected with mm_context_t::lock.
209  *
210  * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
211  * and the corresponding PP bit is set to 1 in a single atomic operation.
212  * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
213  * exclusive and may never be both set to 1!
214  * The parent page is either:
215  *   - added to mm_context_t::pgtable_list in case the second half of the
216  *     parent page is still allocated;
217  *   - removed from mm_context_t::pgtable_list in case the second half of
218  *     the parent page is unallocated;
219  * These operations are protected with mm_context_t::lock.
220  *
221  * It is important to understand that mm_context_t::lock only protects
222  * mm_context_t::pgtable_list and AA bits, but not the parent page itself
223  * and PP bits.
224  *
225  * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
226  * while both AA bits and the second PP bit are already unset. Then the
227  * parent page does not contain any 2KB-pgtable fragment anymore, and it has
228  * also been removed from mm_context_t::pgtable_list. It is safe to release
229  * the page therefore.
230  *
231  * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
232  * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
233  * while the PP bits are never used, nor such a page is added to or removed
234  * from mm_context_t::pgtable_list.
235  *
236  * pte_free_defer() overrides those rules: it takes the page off pgtable_list,
237  * and prevents both 2K fragments from being reused. pte_free_defer() has to
238  * guarantee that its pgtable cannot be reused before the RCU grace period
239  * has elapsed (which page_table_free_rcu() does not actually guarantee).
240  * But for simplicity, because page->rcu_head overlays page->lru, and because
241  * the RCU callback might not be called before the mm_context_t has been freed,
242  * pte_free_defer() in this implementation prevents both fragments from being
243  * reused, and delays making the call to RCU until both fragments are freed.
244  */
245 unsigned long *page_table_alloc(struct mm_struct *mm)
246 {
247 	unsigned long *table;
248 	struct ptdesc *ptdesc;
249 	unsigned int mask, bit;
250 
251 	/* Try to get a fragment of a 4K page as a 2K page table */
252 	if (!mm_alloc_pgste(mm)) {
253 		table = NULL;
254 		spin_lock_bh(&mm->context.lock);
255 		if (!list_empty(&mm->context.pgtable_list)) {
256 			ptdesc = list_first_entry(&mm->context.pgtable_list,
257 						struct ptdesc, pt_list);
258 			mask = atomic_read(&ptdesc->_refcount) >> 24;
259 			/*
260 			 * The pending removal bits must also be checked.
261 			 * Failure to do so might lead to an impossible
262 			 * value of (i.e 0x13 or 0x23) written to _refcount.
263 			 * Such values violate the assumption that pending and
264 			 * allocation bits are mutually exclusive, and the rest
265 			 * of the code unrails as result. That could lead to
266 			 * a whole bunch of races and corruptions.
267 			 */
268 			mask = (mask | (mask >> 4)) & 0x03U;
269 			if (mask != 0x03U) {
270 				table = (unsigned long *) ptdesc_to_virt(ptdesc);
271 				bit = mask & 1;		/* =1 -> second 2K */
272 				if (bit)
273 					table += PTRS_PER_PTE;
274 				atomic_xor_bits(&ptdesc->_refcount,
275 							0x01U << (bit + 24));
276 				list_del_init(&ptdesc->pt_list);
277 			}
278 		}
279 		spin_unlock_bh(&mm->context.lock);
280 		if (table)
281 			return table;
282 	}
283 	/* Allocate a fresh page */
284 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
285 	if (!ptdesc)
286 		return NULL;
287 	if (!pagetable_pte_ctor(ptdesc)) {
288 		pagetable_free(ptdesc);
289 		return NULL;
290 	}
291 	arch_set_page_dat(ptdesc_page(ptdesc), 0);
292 	/* Initialize page table */
293 	table = (unsigned long *) ptdesc_to_virt(ptdesc);
294 	if (mm_alloc_pgste(mm)) {
295 		/* Return 4K page table with PGSTEs */
296 		INIT_LIST_HEAD(&ptdesc->pt_list);
297 		atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
298 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
299 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
300 	} else {
301 		/* Return the first 2K fragment of the page */
302 		atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24);
303 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
304 		spin_lock_bh(&mm->context.lock);
305 		list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
306 		spin_unlock_bh(&mm->context.lock);
307 	}
308 	return table;
309 }
310 
311 static void page_table_release_check(struct page *page, void *table,
312 				     unsigned int half, unsigned int mask)
313 {
314 	char msg[128];
315 
316 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
317 		return;
318 	if (!mask && list_empty(&page->lru))
319 		return;
320 	snprintf(msg, sizeof(msg),
321 		 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
322 		 table, half, mask);
323 	dump_page(page, msg);
324 }
325 
326 static void pte_free_now(struct rcu_head *head)
327 {
328 	struct ptdesc *ptdesc;
329 
330 	ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
331 	pagetable_pte_dtor(ptdesc);
332 	pagetable_free(ptdesc);
333 }
334 
335 void page_table_free(struct mm_struct *mm, unsigned long *table)
336 {
337 	unsigned int mask, bit, half;
338 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
339 
340 	if (!mm_alloc_pgste(mm)) {
341 		/* Free 2K page table fragment of a 4K page */
342 		bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
343 		spin_lock_bh(&mm->context.lock);
344 		/*
345 		 * Mark the page for delayed release. The actual release
346 		 * will happen outside of the critical section from this
347 		 * function or from __tlb_remove_table()
348 		 */
349 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
350 		mask >>= 24;
351 		if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
352 			/*
353 			 * Other half is allocated, and neither half has had
354 			 * its free deferred: add page to head of list, to make
355 			 * this freed half available for immediate reuse.
356 			 */
357 			list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
358 		} else {
359 			/* If page is on list, now remove it. */
360 			list_del_init(&ptdesc->pt_list);
361 		}
362 		spin_unlock_bh(&mm->context.lock);
363 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24));
364 		mask >>= 24;
365 		if (mask != 0x00U)
366 			return;
367 		half = 0x01U << bit;
368 	} else {
369 		half = 0x03U;
370 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
371 		mask >>= 24;
372 	}
373 
374 	page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
375 	if (folio_test_clear_active(ptdesc_folio(ptdesc)))
376 		call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
377 	else
378 		pte_free_now(&ptdesc->pt_rcu_head);
379 }
380 
381 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
382 			 unsigned long vmaddr)
383 {
384 	struct mm_struct *mm;
385 	unsigned int bit, mask;
386 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
387 
388 	mm = tlb->mm;
389 	if (mm_alloc_pgste(mm)) {
390 		gmap_unlink(mm, table, vmaddr);
391 		table = (unsigned long *) ((unsigned long)table | 0x03U);
392 		tlb_remove_ptdesc(tlb, table);
393 		return;
394 	}
395 	bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
396 	spin_lock_bh(&mm->context.lock);
397 	/*
398 	 * Mark the page for delayed release. The actual release will happen
399 	 * outside of the critical section from __tlb_remove_table() or from
400 	 * page_table_free()
401 	 */
402 	mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
403 	mask >>= 24;
404 	if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
405 		/*
406 		 * Other half is allocated, and neither half has had
407 		 * its free deferred: add page to end of list, to make
408 		 * this freed half available for reuse once its pending
409 		 * bit has been cleared by __tlb_remove_table().
410 		 */
411 		list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list);
412 	} else {
413 		/* If page is on list, now remove it. */
414 		list_del_init(&ptdesc->pt_list);
415 	}
416 	spin_unlock_bh(&mm->context.lock);
417 	table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
418 	tlb_remove_table(tlb, table);
419 }
420 
421 void __tlb_remove_table(void *_table)
422 {
423 	unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
424 	void *table = (void *)((unsigned long) _table ^ mask);
425 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
426 
427 	switch (half) {
428 	case 0x00U:	/* pmd, pud, or p4d */
429 		pagetable_free(ptdesc);
430 		return;
431 	case 0x01U:	/* lower 2K of a 4K page table */
432 	case 0x02U:	/* higher 2K of a 4K page table */
433 		mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24));
434 		mask >>= 24;
435 		if (mask != 0x00U)
436 			return;
437 		break;
438 	case 0x03U:	/* 4K page table with pgstes */
439 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
440 		mask >>= 24;
441 		break;
442 	}
443 
444 	page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
445 	if (folio_test_clear_active(ptdesc_folio(ptdesc)))
446 		call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
447 	else
448 		pte_free_now(&ptdesc->pt_rcu_head);
449 }
450 
451 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
452 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
453 {
454 	struct page *page;
455 
456 	page = virt_to_page(pgtable);
457 	SetPageActive(page);
458 	page_table_free(mm, (unsigned long *)pgtable);
459 	/*
460 	 * page_table_free() does not do the pgste gmap_unlink() which
461 	 * page_table_free_rcu() does: warn us if pgste ever reaches here.
462 	 */
463 	WARN_ON_ONCE(mm_has_pgste(mm));
464 }
465 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
466 
467 /*
468  * Base infrastructure required to generate basic asces, region, segment,
469  * and page tables that do not make use of enhanced features like EDAT1.
470  */
471 
472 static struct kmem_cache *base_pgt_cache;
473 
474 static unsigned long *base_pgt_alloc(void)
475 {
476 	unsigned long *table;
477 
478 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
479 	if (table)
480 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
481 	return table;
482 }
483 
484 static void base_pgt_free(unsigned long *table)
485 {
486 	kmem_cache_free(base_pgt_cache, table);
487 }
488 
489 static unsigned long *base_crst_alloc(unsigned long val)
490 {
491 	unsigned long *table;
492 	struct ptdesc *ptdesc;
493 
494 	ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER);
495 	if (!ptdesc)
496 		return NULL;
497 	table = ptdesc_address(ptdesc);
498 
499 	crst_table_init(table, val);
500 	return table;
501 }
502 
503 static void base_crst_free(unsigned long *table)
504 {
505 	if (!table)
506 		return;
507 	pagetable_free(virt_to_ptdesc(table));
508 }
509 
510 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
511 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
512 						   unsigned long end)	\
513 {									\
514 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
515 									\
516 	return (next - 1) < (end - 1) ? next : end;			\
517 }
518 
519 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
520 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
521 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
522 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
523 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
524 
525 static inline unsigned long base_lra(unsigned long address)
526 {
527 	unsigned long real;
528 
529 	asm volatile(
530 		"	lra	%0,0(%1)\n"
531 		: "=d" (real) : "a" (address) : "cc");
532 	return real;
533 }
534 
535 static int base_page_walk(unsigned long *origin, unsigned long addr,
536 			  unsigned long end, int alloc)
537 {
538 	unsigned long *pte, next;
539 
540 	if (!alloc)
541 		return 0;
542 	pte = origin;
543 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
544 	do {
545 		next = base_page_addr_end(addr, end);
546 		*pte = base_lra(addr);
547 	} while (pte++, addr = next, addr < end);
548 	return 0;
549 }
550 
551 static int base_segment_walk(unsigned long *origin, unsigned long addr,
552 			     unsigned long end, int alloc)
553 {
554 	unsigned long *ste, next, *table;
555 	int rc;
556 
557 	ste = origin;
558 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
559 	do {
560 		next = base_segment_addr_end(addr, end);
561 		if (*ste & _SEGMENT_ENTRY_INVALID) {
562 			if (!alloc)
563 				continue;
564 			table = base_pgt_alloc();
565 			if (!table)
566 				return -ENOMEM;
567 			*ste = __pa(table) | _SEGMENT_ENTRY;
568 		}
569 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
570 		rc = base_page_walk(table, addr, next, alloc);
571 		if (rc)
572 			return rc;
573 		if (!alloc)
574 			base_pgt_free(table);
575 		cond_resched();
576 	} while (ste++, addr = next, addr < end);
577 	return 0;
578 }
579 
580 static int base_region3_walk(unsigned long *origin, unsigned long addr,
581 			     unsigned long end, int alloc)
582 {
583 	unsigned long *rtte, next, *table;
584 	int rc;
585 
586 	rtte = origin;
587 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
588 	do {
589 		next = base_region3_addr_end(addr, end);
590 		if (*rtte & _REGION_ENTRY_INVALID) {
591 			if (!alloc)
592 				continue;
593 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
594 			if (!table)
595 				return -ENOMEM;
596 			*rtte = __pa(table) | _REGION3_ENTRY;
597 		}
598 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
599 		rc = base_segment_walk(table, addr, next, alloc);
600 		if (rc)
601 			return rc;
602 		if (!alloc)
603 			base_crst_free(table);
604 	} while (rtte++, addr = next, addr < end);
605 	return 0;
606 }
607 
608 static int base_region2_walk(unsigned long *origin, unsigned long addr,
609 			     unsigned long end, int alloc)
610 {
611 	unsigned long *rste, next, *table;
612 	int rc;
613 
614 	rste = origin;
615 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
616 	do {
617 		next = base_region2_addr_end(addr, end);
618 		if (*rste & _REGION_ENTRY_INVALID) {
619 			if (!alloc)
620 				continue;
621 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
622 			if (!table)
623 				return -ENOMEM;
624 			*rste = __pa(table) | _REGION2_ENTRY;
625 		}
626 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
627 		rc = base_region3_walk(table, addr, next, alloc);
628 		if (rc)
629 			return rc;
630 		if (!alloc)
631 			base_crst_free(table);
632 	} while (rste++, addr = next, addr < end);
633 	return 0;
634 }
635 
636 static int base_region1_walk(unsigned long *origin, unsigned long addr,
637 			     unsigned long end, int alloc)
638 {
639 	unsigned long *rfte, next, *table;
640 	int rc;
641 
642 	rfte = origin;
643 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
644 	do {
645 		next = base_region1_addr_end(addr, end);
646 		if (*rfte & _REGION_ENTRY_INVALID) {
647 			if (!alloc)
648 				continue;
649 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
650 			if (!table)
651 				return -ENOMEM;
652 			*rfte = __pa(table) | _REGION1_ENTRY;
653 		}
654 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
655 		rc = base_region2_walk(table, addr, next, alloc);
656 		if (rc)
657 			return rc;
658 		if (!alloc)
659 			base_crst_free(table);
660 	} while (rfte++, addr = next, addr < end);
661 	return 0;
662 }
663 
664 /**
665  * base_asce_free - free asce and tables returned from base_asce_alloc()
666  * @asce: asce to be freed
667  *
668  * Frees all region, segment, and page tables that were allocated with a
669  * corresponding base_asce_alloc() call.
670  */
671 void base_asce_free(unsigned long asce)
672 {
673 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
674 
675 	if (!asce)
676 		return;
677 	switch (asce & _ASCE_TYPE_MASK) {
678 	case _ASCE_TYPE_SEGMENT:
679 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
680 		break;
681 	case _ASCE_TYPE_REGION3:
682 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
683 		break;
684 	case _ASCE_TYPE_REGION2:
685 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
686 		break;
687 	case _ASCE_TYPE_REGION1:
688 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
689 		break;
690 	}
691 	base_crst_free(table);
692 }
693 
694 static int base_pgt_cache_init(void)
695 {
696 	static DEFINE_MUTEX(base_pgt_cache_mutex);
697 	unsigned long sz = _PAGE_TABLE_SIZE;
698 
699 	if (base_pgt_cache)
700 		return 0;
701 	mutex_lock(&base_pgt_cache_mutex);
702 	if (!base_pgt_cache)
703 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
704 	mutex_unlock(&base_pgt_cache_mutex);
705 	return base_pgt_cache ? 0 : -ENOMEM;
706 }
707 
708 /**
709  * base_asce_alloc - create kernel mapping without enhanced DAT features
710  * @addr: virtual start address of kernel mapping
711  * @num_pages: number of consecutive pages
712  *
713  * Generate an asce, including all required region, segment and page tables,
714  * that can be used to access the virtual kernel mapping. The difference is
715  * that the returned asce does not make use of any enhanced DAT features like
716  * e.g. large pages. This is required for some I/O functions that pass an
717  * asce, like e.g. some service call requests.
718  *
719  * Note: the returned asce may NEVER be attached to any cpu. It may only be
720  *	 used for I/O requests. tlb entries that might result because the
721  *	 asce was attached to a cpu won't be cleared.
722  */
723 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
724 {
725 	unsigned long asce, *table, end;
726 	int rc;
727 
728 	if (base_pgt_cache_init())
729 		return 0;
730 	end = addr + num_pages * PAGE_SIZE;
731 	if (end <= _REGION3_SIZE) {
732 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
733 		if (!table)
734 			return 0;
735 		rc = base_segment_walk(table, addr, end, 1);
736 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
737 	} else if (end <= _REGION2_SIZE) {
738 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
739 		if (!table)
740 			return 0;
741 		rc = base_region3_walk(table, addr, end, 1);
742 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
743 	} else if (end <= _REGION1_SIZE) {
744 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
745 		if (!table)
746 			return 0;
747 		rc = base_region2_walk(table, addr, end, 1);
748 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
749 	} else {
750 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
751 		if (!table)
752 			return 0;
753 		rc = base_region1_walk(table, addr, end, 1);
754 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
755 	}
756 	if (rc) {
757 		base_asce_free(asce);
758 		asce = 0;
759 	}
760 	return asce;
761 }
762