xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision 18afb028)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 
18 #ifdef CONFIG_PGSTE
19 
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22 
23 static struct ctl_table page_table_sysctl[] = {
24 	{
25 		.procname	= "allocate_pgste",
26 		.data		= &page_table_allocate_pgste,
27 		.maxlen		= sizeof(int),
28 		.mode		= S_IRUGO | S_IWUSR,
29 		.proc_handler	= proc_dointvec_minmax,
30 		.extra1		= SYSCTL_ZERO,
31 		.extra2		= SYSCTL_ONE,
32 	},
33 	{ }
34 };
35 
36 static int __init page_table_register_sysctl(void)
37 {
38 	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39 }
40 __initcall(page_table_register_sysctl);
41 
42 #endif /* CONFIG_PGSTE */
43 
44 unsigned long *crst_table_alloc(struct mm_struct *mm)
45 {
46 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
47 
48 	if (!ptdesc)
49 		return NULL;
50 	arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER);
51 	return (unsigned long *) ptdesc_to_virt(ptdesc);
52 }
53 
54 void crst_table_free(struct mm_struct *mm, unsigned long *table)
55 {
56 	pagetable_free(virt_to_ptdesc(table));
57 }
58 
59 static void __crst_table_upgrade(void *arg)
60 {
61 	struct mm_struct *mm = arg;
62 
63 	/* change all active ASCEs to avoid the creation of new TLBs */
64 	if (current->active_mm == mm) {
65 		S390_lowcore.user_asce = mm->context.asce;
66 		__ctl_load(S390_lowcore.user_asce, 7, 7);
67 	}
68 	__tlb_flush_local();
69 }
70 
71 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
72 {
73 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
74 	unsigned long asce_limit = mm->context.asce_limit;
75 
76 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
77 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
78 
79 	if (end <= asce_limit)
80 		return 0;
81 
82 	if (asce_limit == _REGION2_SIZE) {
83 		p4d = crst_table_alloc(mm);
84 		if (unlikely(!p4d))
85 			goto err_p4d;
86 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
87 	}
88 	if (end > _REGION1_SIZE) {
89 		pgd = crst_table_alloc(mm);
90 		if (unlikely(!pgd))
91 			goto err_pgd;
92 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
93 	}
94 
95 	spin_lock_bh(&mm->page_table_lock);
96 
97 	/*
98 	 * This routine gets called with mmap_lock lock held and there is
99 	 * no reason to optimize for the case of otherwise. However, if
100 	 * that would ever change, the below check will let us know.
101 	 */
102 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
103 
104 	if (p4d) {
105 		__pgd = (unsigned long *) mm->pgd;
106 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
107 		mm->pgd = (pgd_t *) p4d;
108 		mm->context.asce_limit = _REGION1_SIZE;
109 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
111 		mm_inc_nr_puds(mm);
112 	}
113 	if (pgd) {
114 		__pgd = (unsigned long *) mm->pgd;
115 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
116 		mm->pgd = (pgd_t *) pgd;
117 		mm->context.asce_limit = TASK_SIZE_MAX;
118 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
119 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
120 	}
121 
122 	spin_unlock_bh(&mm->page_table_lock);
123 
124 	on_each_cpu(__crst_table_upgrade, mm, 0);
125 
126 	return 0;
127 
128 err_pgd:
129 	crst_table_free(mm, p4d);
130 err_p4d:
131 	return -ENOMEM;
132 }
133 
134 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
135 {
136 	return atomic_fetch_xor(bits, v) ^ bits;
137 }
138 
139 #ifdef CONFIG_PGSTE
140 
141 struct page *page_table_alloc_pgste(struct mm_struct *mm)
142 {
143 	struct ptdesc *ptdesc;
144 	u64 *table;
145 
146 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
147 	if (ptdesc) {
148 		table = (u64 *)ptdesc_to_virt(ptdesc);
149 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
150 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
151 	}
152 	return ptdesc_page(ptdesc);
153 }
154 
155 void page_table_free_pgste(struct page *page)
156 {
157 	pagetable_free(page_ptdesc(page));
158 }
159 
160 #endif /* CONFIG_PGSTE */
161 
162 /*
163  * A 2KB-pgtable is either upper or lower half of a normal page.
164  * The second half of the page may be unused or used as another
165  * 2KB-pgtable.
166  *
167  * Whenever possible the parent page for a new 2KB-pgtable is picked
168  * from the list of partially allocated pages mm_context_t::pgtable_list.
169  * In case the list is empty a new parent page is allocated and added to
170  * the list.
171  *
172  * When a parent page gets fully allocated it contains 2KB-pgtables in both
173  * upper and lower halves and is removed from mm_context_t::pgtable_list.
174  *
175  * When 2KB-pgtable is freed from to fully allocated parent page that
176  * page turns partially allocated and added to mm_context_t::pgtable_list.
177  *
178  * If 2KB-pgtable is freed from the partially allocated parent page that
179  * page turns unused and gets removed from mm_context_t::pgtable_list.
180  * Furthermore, the unused parent page is released.
181  *
182  * As follows from the above, no unallocated or fully allocated parent
183  * pages are contained in mm_context_t::pgtable_list.
184  *
185  * The upper byte (bits 24-31) of the parent page _refcount is used
186  * for tracking contained 2KB-pgtables and has the following format:
187  *
188  *   PP  AA
189  * 01234567    upper byte (bits 24-31) of struct page::_refcount
190  *   ||  ||
191  *   ||  |+--- upper 2KB-pgtable is allocated
192  *   ||  +---- lower 2KB-pgtable is allocated
193  *   |+------- upper 2KB-pgtable is pending for removal
194  *   +-------- lower 2KB-pgtable is pending for removal
195  *
196  * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
197  * using _refcount is possible).
198  *
199  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
200  * The parent page is either:
201  *   - added to mm_context_t::pgtable_list in case the second half of the
202  *     parent page is still unallocated;
203  *   - removed from mm_context_t::pgtable_list in case both hales of the
204  *     parent page are allocated;
205  * These operations are protected with mm_context_t::lock.
206  *
207  * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
208  * and the corresponding PP bit is set to 1 in a single atomic operation.
209  * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
210  * exclusive and may never be both set to 1!
211  * The parent page is either:
212  *   - added to mm_context_t::pgtable_list in case the second half of the
213  *     parent page is still allocated;
214  *   - removed from mm_context_t::pgtable_list in case the second half of
215  *     the parent page is unallocated;
216  * These operations are protected with mm_context_t::lock.
217  *
218  * It is important to understand that mm_context_t::lock only protects
219  * mm_context_t::pgtable_list and AA bits, but not the parent page itself
220  * and PP bits.
221  *
222  * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
223  * while both AA bits and the second PP bit are already unset. Then the
224  * parent page does not contain any 2KB-pgtable fragment anymore, and it has
225  * also been removed from mm_context_t::pgtable_list. It is safe to release
226  * the page therefore.
227  *
228  * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
229  * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
230  * while the PP bits are never used, nor such a page is added to or removed
231  * from mm_context_t::pgtable_list.
232  *
233  * pte_free_defer() overrides those rules: it takes the page off pgtable_list,
234  * and prevents both 2K fragments from being reused. pte_free_defer() has to
235  * guarantee that its pgtable cannot be reused before the RCU grace period
236  * has elapsed (which page_table_free_rcu() does not actually guarantee).
237  * But for simplicity, because page->rcu_head overlays page->lru, and because
238  * the RCU callback might not be called before the mm_context_t has been freed,
239  * pte_free_defer() in this implementation prevents both fragments from being
240  * reused, and delays making the call to RCU until both fragments are freed.
241  */
242 unsigned long *page_table_alloc(struct mm_struct *mm)
243 {
244 	unsigned long *table;
245 	struct ptdesc *ptdesc;
246 	unsigned int mask, bit;
247 
248 	/* Try to get a fragment of a 4K page as a 2K page table */
249 	if (!mm_alloc_pgste(mm)) {
250 		table = NULL;
251 		spin_lock_bh(&mm->context.lock);
252 		if (!list_empty(&mm->context.pgtable_list)) {
253 			ptdesc = list_first_entry(&mm->context.pgtable_list,
254 						struct ptdesc, pt_list);
255 			mask = atomic_read(&ptdesc->_refcount) >> 24;
256 			/*
257 			 * The pending removal bits must also be checked.
258 			 * Failure to do so might lead to an impossible
259 			 * value of (i.e 0x13 or 0x23) written to _refcount.
260 			 * Such values violate the assumption that pending and
261 			 * allocation bits are mutually exclusive, and the rest
262 			 * of the code unrails as result. That could lead to
263 			 * a whole bunch of races and corruptions.
264 			 */
265 			mask = (mask | (mask >> 4)) & 0x03U;
266 			if (mask != 0x03U) {
267 				table = (unsigned long *) ptdesc_to_virt(ptdesc);
268 				bit = mask & 1;		/* =1 -> second 2K */
269 				if (bit)
270 					table += PTRS_PER_PTE;
271 				atomic_xor_bits(&ptdesc->_refcount,
272 							0x01U << (bit + 24));
273 				list_del_init(&ptdesc->pt_list);
274 			}
275 		}
276 		spin_unlock_bh(&mm->context.lock);
277 		if (table)
278 			return table;
279 	}
280 	/* Allocate a fresh page */
281 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
282 	if (!ptdesc)
283 		return NULL;
284 	if (!pagetable_pte_ctor(ptdesc)) {
285 		pagetable_free(ptdesc);
286 		return NULL;
287 	}
288 	arch_set_page_dat(ptdesc_page(ptdesc), 0);
289 	/* Initialize page table */
290 	table = (unsigned long *) ptdesc_to_virt(ptdesc);
291 	if (mm_alloc_pgste(mm)) {
292 		/* Return 4K page table with PGSTEs */
293 		INIT_LIST_HEAD(&ptdesc->pt_list);
294 		atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
295 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
296 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
297 	} else {
298 		/* Return the first 2K fragment of the page */
299 		atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24);
300 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
301 		spin_lock_bh(&mm->context.lock);
302 		list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
303 		spin_unlock_bh(&mm->context.lock);
304 	}
305 	return table;
306 }
307 
308 static void page_table_release_check(struct page *page, void *table,
309 				     unsigned int half, unsigned int mask)
310 {
311 	char msg[128];
312 
313 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
314 		return;
315 	if (!mask && list_empty(&page->lru))
316 		return;
317 	snprintf(msg, sizeof(msg),
318 		 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
319 		 table, half, mask);
320 	dump_page(page, msg);
321 }
322 
323 static void pte_free_now(struct rcu_head *head)
324 {
325 	struct ptdesc *ptdesc;
326 
327 	ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
328 	pagetable_pte_dtor(ptdesc);
329 	pagetable_free(ptdesc);
330 }
331 
332 void page_table_free(struct mm_struct *mm, unsigned long *table)
333 {
334 	unsigned int mask, bit, half;
335 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
336 
337 	if (!mm_alloc_pgste(mm)) {
338 		/* Free 2K page table fragment of a 4K page */
339 		bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
340 		spin_lock_bh(&mm->context.lock);
341 		/*
342 		 * Mark the page for delayed release. The actual release
343 		 * will happen outside of the critical section from this
344 		 * function or from __tlb_remove_table()
345 		 */
346 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
347 		mask >>= 24;
348 		if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
349 			/*
350 			 * Other half is allocated, and neither half has had
351 			 * its free deferred: add page to head of list, to make
352 			 * this freed half available for immediate reuse.
353 			 */
354 			list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
355 		} else {
356 			/* If page is on list, now remove it. */
357 			list_del_init(&ptdesc->pt_list);
358 		}
359 		spin_unlock_bh(&mm->context.lock);
360 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24));
361 		mask >>= 24;
362 		if (mask != 0x00U)
363 			return;
364 		half = 0x01U << bit;
365 	} else {
366 		half = 0x03U;
367 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
368 		mask >>= 24;
369 	}
370 
371 	page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
372 	if (folio_test_clear_active(ptdesc_folio(ptdesc)))
373 		call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
374 	else
375 		pte_free_now(&ptdesc->pt_rcu_head);
376 }
377 
378 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
379 			 unsigned long vmaddr)
380 {
381 	struct mm_struct *mm;
382 	unsigned int bit, mask;
383 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
384 
385 	mm = tlb->mm;
386 	if (mm_alloc_pgste(mm)) {
387 		gmap_unlink(mm, table, vmaddr);
388 		table = (unsigned long *) ((unsigned long)table | 0x03U);
389 		tlb_remove_ptdesc(tlb, table);
390 		return;
391 	}
392 	bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
393 	spin_lock_bh(&mm->context.lock);
394 	/*
395 	 * Mark the page for delayed release. The actual release will happen
396 	 * outside of the critical section from __tlb_remove_table() or from
397 	 * page_table_free()
398 	 */
399 	mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
400 	mask >>= 24;
401 	if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
402 		/*
403 		 * Other half is allocated, and neither half has had
404 		 * its free deferred: add page to end of list, to make
405 		 * this freed half available for reuse once its pending
406 		 * bit has been cleared by __tlb_remove_table().
407 		 */
408 		list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list);
409 	} else {
410 		/* If page is on list, now remove it. */
411 		list_del_init(&ptdesc->pt_list);
412 	}
413 	spin_unlock_bh(&mm->context.lock);
414 	table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
415 	tlb_remove_table(tlb, table);
416 }
417 
418 void __tlb_remove_table(void *_table)
419 {
420 	unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
421 	void *table = (void *)((unsigned long) _table ^ mask);
422 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
423 
424 	switch (half) {
425 	case 0x00U:	/* pmd, pud, or p4d */
426 		pagetable_free(ptdesc);
427 		return;
428 	case 0x01U:	/* lower 2K of a 4K page table */
429 	case 0x02U:	/* higher 2K of a 4K page table */
430 		mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24));
431 		mask >>= 24;
432 		if (mask != 0x00U)
433 			return;
434 		break;
435 	case 0x03U:	/* 4K page table with pgstes */
436 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
437 		mask >>= 24;
438 		break;
439 	}
440 
441 	page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
442 	if (folio_test_clear_active(ptdesc_folio(ptdesc)))
443 		call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
444 	else
445 		pte_free_now(&ptdesc->pt_rcu_head);
446 }
447 
448 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
449 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
450 {
451 	struct page *page;
452 
453 	page = virt_to_page(pgtable);
454 	SetPageActive(page);
455 	page_table_free(mm, (unsigned long *)pgtable);
456 	/*
457 	 * page_table_free() does not do the pgste gmap_unlink() which
458 	 * page_table_free_rcu() does: warn us if pgste ever reaches here.
459 	 */
460 	WARN_ON_ONCE(mm_has_pgste(mm));
461 }
462 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
463 
464 /*
465  * Base infrastructure required to generate basic asces, region, segment,
466  * and page tables that do not make use of enhanced features like EDAT1.
467  */
468 
469 static struct kmem_cache *base_pgt_cache;
470 
471 static unsigned long *base_pgt_alloc(void)
472 {
473 	unsigned long *table;
474 
475 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
476 	if (table)
477 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
478 	return table;
479 }
480 
481 static void base_pgt_free(unsigned long *table)
482 {
483 	kmem_cache_free(base_pgt_cache, table);
484 }
485 
486 static unsigned long *base_crst_alloc(unsigned long val)
487 {
488 	unsigned long *table;
489 	struct ptdesc *ptdesc;
490 
491 	ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER);
492 	if (!ptdesc)
493 		return NULL;
494 	table = ptdesc_address(ptdesc);
495 
496 	crst_table_init(table, val);
497 	return table;
498 }
499 
500 static void base_crst_free(unsigned long *table)
501 {
502 	pagetable_free(virt_to_ptdesc(table));
503 }
504 
505 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
506 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
507 						   unsigned long end)	\
508 {									\
509 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
510 									\
511 	return (next - 1) < (end - 1) ? next : end;			\
512 }
513 
514 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
515 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
516 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
517 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
518 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
519 
520 static inline unsigned long base_lra(unsigned long address)
521 {
522 	unsigned long real;
523 
524 	asm volatile(
525 		"	lra	%0,0(%1)\n"
526 		: "=d" (real) : "a" (address) : "cc");
527 	return real;
528 }
529 
530 static int base_page_walk(unsigned long *origin, unsigned long addr,
531 			  unsigned long end, int alloc)
532 {
533 	unsigned long *pte, next;
534 
535 	if (!alloc)
536 		return 0;
537 	pte = origin;
538 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
539 	do {
540 		next = base_page_addr_end(addr, end);
541 		*pte = base_lra(addr);
542 	} while (pte++, addr = next, addr < end);
543 	return 0;
544 }
545 
546 static int base_segment_walk(unsigned long *origin, unsigned long addr,
547 			     unsigned long end, int alloc)
548 {
549 	unsigned long *ste, next, *table;
550 	int rc;
551 
552 	ste = origin;
553 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
554 	do {
555 		next = base_segment_addr_end(addr, end);
556 		if (*ste & _SEGMENT_ENTRY_INVALID) {
557 			if (!alloc)
558 				continue;
559 			table = base_pgt_alloc();
560 			if (!table)
561 				return -ENOMEM;
562 			*ste = __pa(table) | _SEGMENT_ENTRY;
563 		}
564 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
565 		rc = base_page_walk(table, addr, next, alloc);
566 		if (rc)
567 			return rc;
568 		if (!alloc)
569 			base_pgt_free(table);
570 		cond_resched();
571 	} while (ste++, addr = next, addr < end);
572 	return 0;
573 }
574 
575 static int base_region3_walk(unsigned long *origin, unsigned long addr,
576 			     unsigned long end, int alloc)
577 {
578 	unsigned long *rtte, next, *table;
579 	int rc;
580 
581 	rtte = origin;
582 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
583 	do {
584 		next = base_region3_addr_end(addr, end);
585 		if (*rtte & _REGION_ENTRY_INVALID) {
586 			if (!alloc)
587 				continue;
588 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
589 			if (!table)
590 				return -ENOMEM;
591 			*rtte = __pa(table) | _REGION3_ENTRY;
592 		}
593 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
594 		rc = base_segment_walk(table, addr, next, alloc);
595 		if (rc)
596 			return rc;
597 		if (!alloc)
598 			base_crst_free(table);
599 	} while (rtte++, addr = next, addr < end);
600 	return 0;
601 }
602 
603 static int base_region2_walk(unsigned long *origin, unsigned long addr,
604 			     unsigned long end, int alloc)
605 {
606 	unsigned long *rste, next, *table;
607 	int rc;
608 
609 	rste = origin;
610 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
611 	do {
612 		next = base_region2_addr_end(addr, end);
613 		if (*rste & _REGION_ENTRY_INVALID) {
614 			if (!alloc)
615 				continue;
616 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
617 			if (!table)
618 				return -ENOMEM;
619 			*rste = __pa(table) | _REGION2_ENTRY;
620 		}
621 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
622 		rc = base_region3_walk(table, addr, next, alloc);
623 		if (rc)
624 			return rc;
625 		if (!alloc)
626 			base_crst_free(table);
627 	} while (rste++, addr = next, addr < end);
628 	return 0;
629 }
630 
631 static int base_region1_walk(unsigned long *origin, unsigned long addr,
632 			     unsigned long end, int alloc)
633 {
634 	unsigned long *rfte, next, *table;
635 	int rc;
636 
637 	rfte = origin;
638 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
639 	do {
640 		next = base_region1_addr_end(addr, end);
641 		if (*rfte & _REGION_ENTRY_INVALID) {
642 			if (!alloc)
643 				continue;
644 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
645 			if (!table)
646 				return -ENOMEM;
647 			*rfte = __pa(table) | _REGION1_ENTRY;
648 		}
649 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
650 		rc = base_region2_walk(table, addr, next, alloc);
651 		if (rc)
652 			return rc;
653 		if (!alloc)
654 			base_crst_free(table);
655 	} while (rfte++, addr = next, addr < end);
656 	return 0;
657 }
658 
659 /**
660  * base_asce_free - free asce and tables returned from base_asce_alloc()
661  * @asce: asce to be freed
662  *
663  * Frees all region, segment, and page tables that were allocated with a
664  * corresponding base_asce_alloc() call.
665  */
666 void base_asce_free(unsigned long asce)
667 {
668 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
669 
670 	if (!asce)
671 		return;
672 	switch (asce & _ASCE_TYPE_MASK) {
673 	case _ASCE_TYPE_SEGMENT:
674 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
675 		break;
676 	case _ASCE_TYPE_REGION3:
677 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
678 		break;
679 	case _ASCE_TYPE_REGION2:
680 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
681 		break;
682 	case _ASCE_TYPE_REGION1:
683 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
684 		break;
685 	}
686 	base_crst_free(table);
687 }
688 
689 static int base_pgt_cache_init(void)
690 {
691 	static DEFINE_MUTEX(base_pgt_cache_mutex);
692 	unsigned long sz = _PAGE_TABLE_SIZE;
693 
694 	if (base_pgt_cache)
695 		return 0;
696 	mutex_lock(&base_pgt_cache_mutex);
697 	if (!base_pgt_cache)
698 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
699 	mutex_unlock(&base_pgt_cache_mutex);
700 	return base_pgt_cache ? 0 : -ENOMEM;
701 }
702 
703 /**
704  * base_asce_alloc - create kernel mapping without enhanced DAT features
705  * @addr: virtual start address of kernel mapping
706  * @num_pages: number of consecutive pages
707  *
708  * Generate an asce, including all required region, segment and page tables,
709  * that can be used to access the virtual kernel mapping. The difference is
710  * that the returned asce does not make use of any enhanced DAT features like
711  * e.g. large pages. This is required for some I/O functions that pass an
712  * asce, like e.g. some service call requests.
713  *
714  * Note: the returned asce may NEVER be attached to any cpu. It may only be
715  *	 used for I/O requests. tlb entries that might result because the
716  *	 asce was attached to a cpu won't be cleared.
717  */
718 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
719 {
720 	unsigned long asce, *table, end;
721 	int rc;
722 
723 	if (base_pgt_cache_init())
724 		return 0;
725 	end = addr + num_pages * PAGE_SIZE;
726 	if (end <= _REGION3_SIZE) {
727 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
728 		if (!table)
729 			return 0;
730 		rc = base_segment_walk(table, addr, end, 1);
731 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
732 	} else if (end <= _REGION2_SIZE) {
733 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
734 		if (!table)
735 			return 0;
736 		rc = base_region3_walk(table, addr, end, 1);
737 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
738 	} else if (end <= _REGION1_SIZE) {
739 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
740 		if (!table)
741 			return 0;
742 		rc = base_region2_walk(table, addr, end, 1);
743 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
744 	} else {
745 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
746 		if (!table)
747 			return 0;
748 		rc = base_region1_walk(table, addr, end, 1);
749 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
750 	}
751 	if (rc) {
752 		base_asce_free(asce);
753 		asce = 0;
754 	}
755 	return asce;
756 }
757