xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision 31d49ba0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 
18 #ifdef CONFIG_PGSTE
19 
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22 
23 static struct ctl_table page_table_sysctl[] = {
24 	{
25 		.procname	= "allocate_pgste",
26 		.data		= &page_table_allocate_pgste,
27 		.maxlen		= sizeof(int),
28 		.mode		= S_IRUGO | S_IWUSR,
29 		.proc_handler	= proc_dointvec_minmax,
30 		.extra1		= SYSCTL_ZERO,
31 		.extra2		= SYSCTL_ONE,
32 	},
33 	{ }
34 };
35 
36 static int __init page_table_register_sysctl(void)
37 {
38 	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39 }
40 __initcall(page_table_register_sysctl);
41 
42 #endif /* CONFIG_PGSTE */
43 
44 unsigned long *crst_table_alloc(struct mm_struct *mm)
45 {
46 	struct page *page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
47 
48 	if (!page)
49 		return NULL;
50 	arch_set_page_dat(page, CRST_ALLOC_ORDER);
51 	return (unsigned long *) page_to_virt(page);
52 }
53 
54 void crst_table_free(struct mm_struct *mm, unsigned long *table)
55 {
56 	free_pages((unsigned long)table, CRST_ALLOC_ORDER);
57 }
58 
59 static void __crst_table_upgrade(void *arg)
60 {
61 	struct mm_struct *mm = arg;
62 
63 	/* change all active ASCEs to avoid the creation of new TLBs */
64 	if (current->active_mm == mm) {
65 		S390_lowcore.user_asce = mm->context.asce;
66 		__ctl_load(S390_lowcore.user_asce, 7, 7);
67 	}
68 	__tlb_flush_local();
69 }
70 
71 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
72 {
73 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
74 	unsigned long asce_limit = mm->context.asce_limit;
75 
76 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
77 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
78 
79 	if (end <= asce_limit)
80 		return 0;
81 
82 	if (asce_limit == _REGION2_SIZE) {
83 		p4d = crst_table_alloc(mm);
84 		if (unlikely(!p4d))
85 			goto err_p4d;
86 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
87 	}
88 	if (end > _REGION1_SIZE) {
89 		pgd = crst_table_alloc(mm);
90 		if (unlikely(!pgd))
91 			goto err_pgd;
92 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
93 	}
94 
95 	spin_lock_bh(&mm->page_table_lock);
96 
97 	/*
98 	 * This routine gets called with mmap_lock lock held and there is
99 	 * no reason to optimize for the case of otherwise. However, if
100 	 * that would ever change, the below check will let us know.
101 	 */
102 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
103 
104 	if (p4d) {
105 		__pgd = (unsigned long *) mm->pgd;
106 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
107 		mm->pgd = (pgd_t *) p4d;
108 		mm->context.asce_limit = _REGION1_SIZE;
109 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
111 		mm_inc_nr_puds(mm);
112 	}
113 	if (pgd) {
114 		__pgd = (unsigned long *) mm->pgd;
115 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
116 		mm->pgd = (pgd_t *) pgd;
117 		mm->context.asce_limit = TASK_SIZE_MAX;
118 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
119 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
120 	}
121 
122 	spin_unlock_bh(&mm->page_table_lock);
123 
124 	on_each_cpu(__crst_table_upgrade, mm, 0);
125 
126 	return 0;
127 
128 err_pgd:
129 	crst_table_free(mm, p4d);
130 err_p4d:
131 	return -ENOMEM;
132 }
133 
134 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
135 {
136 	return atomic_fetch_xor(bits, v) ^ bits;
137 }
138 
139 #ifdef CONFIG_PGSTE
140 
141 struct page *page_table_alloc_pgste(struct mm_struct *mm)
142 {
143 	struct page *page;
144 	u64 *table;
145 
146 	page = alloc_page(GFP_KERNEL);
147 	if (page) {
148 		table = (u64 *)page_to_virt(page);
149 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
150 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
151 	}
152 	return page;
153 }
154 
155 void page_table_free_pgste(struct page *page)
156 {
157 	__free_page(page);
158 }
159 
160 #endif /* CONFIG_PGSTE */
161 
162 /*
163  * A 2KB-pgtable is either upper or lower half of a normal page.
164  * The second half of the page may be unused or used as another
165  * 2KB-pgtable.
166  *
167  * Whenever possible the parent page for a new 2KB-pgtable is picked
168  * from the list of partially allocated pages mm_context_t::pgtable_list.
169  * In case the list is empty a new parent page is allocated and added to
170  * the list.
171  *
172  * When a parent page gets fully allocated it contains 2KB-pgtables in both
173  * upper and lower halves and is removed from mm_context_t::pgtable_list.
174  *
175  * When 2KB-pgtable is freed from to fully allocated parent page that
176  * page turns partially allocated and added to mm_context_t::pgtable_list.
177  *
178  * If 2KB-pgtable is freed from the partially allocated parent page that
179  * page turns unused and gets removed from mm_context_t::pgtable_list.
180  * Furthermore, the unused parent page is released.
181  *
182  * As follows from the above, no unallocated or fully allocated parent
183  * pages are contained in mm_context_t::pgtable_list.
184  *
185  * The upper byte (bits 24-31) of the parent page _refcount is used
186  * for tracking contained 2KB-pgtables and has the following format:
187  *
188  *   PP  AA
189  * 01234567    upper byte (bits 24-31) of struct page::_refcount
190  *   ||  ||
191  *   ||  |+--- upper 2KB-pgtable is allocated
192  *   ||  +---- lower 2KB-pgtable is allocated
193  *   |+------- upper 2KB-pgtable is pending for removal
194  *   +-------- lower 2KB-pgtable is pending for removal
195  *
196  * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
197  * using _refcount is possible).
198  *
199  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
200  * The parent page is either:
201  *   - added to mm_context_t::pgtable_list in case the second half of the
202  *     parent page is still unallocated;
203  *   - removed from mm_context_t::pgtable_list in case both hales of the
204  *     parent page are allocated;
205  * These operations are protected with mm_context_t::lock.
206  *
207  * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
208  * and the corresponding PP bit is set to 1 in a single atomic operation.
209  * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
210  * exclusive and may never be both set to 1!
211  * The parent page is either:
212  *   - added to mm_context_t::pgtable_list in case the second half of the
213  *     parent page is still allocated;
214  *   - removed from mm_context_t::pgtable_list in case the second half of
215  *     the parent page is unallocated;
216  * These operations are protected with mm_context_t::lock.
217  *
218  * It is important to understand that mm_context_t::lock only protects
219  * mm_context_t::pgtable_list and AA bits, but not the parent page itself
220  * and PP bits.
221  *
222  * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
223  * while both AA bits and the second PP bit are already unset. Then the
224  * parent page does not contain any 2KB-pgtable fragment anymore, and it has
225  * also been removed from mm_context_t::pgtable_list. It is safe to release
226  * the page therefore.
227  *
228  * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
229  * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
230  * while the PP bits are never used, nor such a page is added to or removed
231  * from mm_context_t::pgtable_list.
232  */
233 unsigned long *page_table_alloc(struct mm_struct *mm)
234 {
235 	unsigned long *table;
236 	struct page *page;
237 	unsigned int mask, bit;
238 
239 	/* Try to get a fragment of a 4K page as a 2K page table */
240 	if (!mm_alloc_pgste(mm)) {
241 		table = NULL;
242 		spin_lock_bh(&mm->context.lock);
243 		if (!list_empty(&mm->context.pgtable_list)) {
244 			page = list_first_entry(&mm->context.pgtable_list,
245 						struct page, lru);
246 			mask = atomic_read(&page->_refcount) >> 24;
247 			/*
248 			 * The pending removal bits must also be checked.
249 			 * Failure to do so might lead to an impossible
250 			 * value of (i.e 0x13 or 0x23) written to _refcount.
251 			 * Such values violate the assumption that pending and
252 			 * allocation bits are mutually exclusive, and the rest
253 			 * of the code unrails as result. That could lead to
254 			 * a whole bunch of races and corruptions.
255 			 */
256 			mask = (mask | (mask >> 4)) & 0x03U;
257 			if (mask != 0x03U) {
258 				table = (unsigned long *) page_to_virt(page);
259 				bit = mask & 1;		/* =1 -> second 2K */
260 				if (bit)
261 					table += PTRS_PER_PTE;
262 				atomic_xor_bits(&page->_refcount,
263 							0x01U << (bit + 24));
264 				list_del(&page->lru);
265 			}
266 		}
267 		spin_unlock_bh(&mm->context.lock);
268 		if (table)
269 			return table;
270 	}
271 	/* Allocate a fresh page */
272 	page = alloc_page(GFP_KERNEL);
273 	if (!page)
274 		return NULL;
275 	if (!pgtable_pte_page_ctor(page)) {
276 		__free_page(page);
277 		return NULL;
278 	}
279 	arch_set_page_dat(page, 0);
280 	/* Initialize page table */
281 	table = (unsigned long *) page_to_virt(page);
282 	if (mm_alloc_pgste(mm)) {
283 		/* Return 4K page table with PGSTEs */
284 		atomic_xor_bits(&page->_refcount, 0x03U << 24);
285 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
286 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
287 	} else {
288 		/* Return the first 2K fragment of the page */
289 		atomic_xor_bits(&page->_refcount, 0x01U << 24);
290 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
291 		spin_lock_bh(&mm->context.lock);
292 		list_add(&page->lru, &mm->context.pgtable_list);
293 		spin_unlock_bh(&mm->context.lock);
294 	}
295 	return table;
296 }
297 
298 static void page_table_release_check(struct page *page, void *table,
299 				     unsigned int half, unsigned int mask)
300 {
301 	char msg[128];
302 
303 	if (!IS_ENABLED(CONFIG_DEBUG_VM) || !mask)
304 		return;
305 	snprintf(msg, sizeof(msg),
306 		 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
307 		 table, half, mask);
308 	dump_page(page, msg);
309 }
310 
311 void page_table_free(struct mm_struct *mm, unsigned long *table)
312 {
313 	unsigned int mask, bit, half;
314 	struct page *page;
315 
316 	page = virt_to_page(table);
317 	if (!mm_alloc_pgste(mm)) {
318 		/* Free 2K page table fragment of a 4K page */
319 		bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
320 		spin_lock_bh(&mm->context.lock);
321 		/*
322 		 * Mark the page for delayed release. The actual release
323 		 * will happen outside of the critical section from this
324 		 * function or from __tlb_remove_table()
325 		 */
326 		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
327 		mask >>= 24;
328 		if (mask & 0x03U)
329 			list_add(&page->lru, &mm->context.pgtable_list);
330 		else
331 			list_del(&page->lru);
332 		spin_unlock_bh(&mm->context.lock);
333 		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
334 		mask >>= 24;
335 		if (mask != 0x00U)
336 			return;
337 		half = 0x01U << bit;
338 	} else {
339 		half = 0x03U;
340 		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
341 		mask >>= 24;
342 	}
343 
344 	page_table_release_check(page, table, half, mask);
345 	pgtable_pte_page_dtor(page);
346 	__free_page(page);
347 }
348 
349 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
350 			 unsigned long vmaddr)
351 {
352 	struct mm_struct *mm;
353 	struct page *page;
354 	unsigned int bit, mask;
355 
356 	mm = tlb->mm;
357 	page = virt_to_page(table);
358 	if (mm_alloc_pgste(mm)) {
359 		gmap_unlink(mm, table, vmaddr);
360 		table = (unsigned long *) ((unsigned long)table | 0x03U);
361 		tlb_remove_table(tlb, table);
362 		return;
363 	}
364 	bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
365 	spin_lock_bh(&mm->context.lock);
366 	/*
367 	 * Mark the page for delayed release. The actual release will happen
368 	 * outside of the critical section from __tlb_remove_table() or from
369 	 * page_table_free()
370 	 */
371 	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
372 	mask >>= 24;
373 	if (mask & 0x03U)
374 		list_add_tail(&page->lru, &mm->context.pgtable_list);
375 	else
376 		list_del(&page->lru);
377 	spin_unlock_bh(&mm->context.lock);
378 	table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
379 	tlb_remove_table(tlb, table);
380 }
381 
382 void __tlb_remove_table(void *_table)
383 {
384 	unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
385 	void *table = (void *)((unsigned long) _table ^ mask);
386 	struct page *page = virt_to_page(table);
387 
388 	switch (half) {
389 	case 0x00U:	/* pmd, pud, or p4d */
390 		free_pages((unsigned long)table, CRST_ALLOC_ORDER);
391 		return;
392 	case 0x01U:	/* lower 2K of a 4K page table */
393 	case 0x02U:	/* higher 2K of a 4K page table */
394 		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
395 		mask >>= 24;
396 		if (mask != 0x00U)
397 			return;
398 		break;
399 	case 0x03U:	/* 4K page table with pgstes */
400 		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
401 		mask >>= 24;
402 		break;
403 	}
404 
405 	page_table_release_check(page, table, half, mask);
406 	pgtable_pte_page_dtor(page);
407 	__free_page(page);
408 }
409 
410 /*
411  * Base infrastructure required to generate basic asces, region, segment,
412  * and page tables that do not make use of enhanced features like EDAT1.
413  */
414 
415 static struct kmem_cache *base_pgt_cache;
416 
417 static unsigned long *base_pgt_alloc(void)
418 {
419 	unsigned long *table;
420 
421 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
422 	if (table)
423 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
424 	return table;
425 }
426 
427 static void base_pgt_free(unsigned long *table)
428 {
429 	kmem_cache_free(base_pgt_cache, table);
430 }
431 
432 static unsigned long *base_crst_alloc(unsigned long val)
433 {
434 	unsigned long *table;
435 
436 	table =	(unsigned long *)__get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
437 	if (table)
438 		crst_table_init(table, val);
439 	return table;
440 }
441 
442 static void base_crst_free(unsigned long *table)
443 {
444 	free_pages((unsigned long)table, CRST_ALLOC_ORDER);
445 }
446 
447 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
448 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
449 						   unsigned long end)	\
450 {									\
451 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
452 									\
453 	return (next - 1) < (end - 1) ? next : end;			\
454 }
455 
456 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
457 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
458 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
459 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
460 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
461 
462 static inline unsigned long base_lra(unsigned long address)
463 {
464 	unsigned long real;
465 
466 	asm volatile(
467 		"	lra	%0,0(%1)\n"
468 		: "=d" (real) : "a" (address) : "cc");
469 	return real;
470 }
471 
472 static int base_page_walk(unsigned long *origin, unsigned long addr,
473 			  unsigned long end, int alloc)
474 {
475 	unsigned long *pte, next;
476 
477 	if (!alloc)
478 		return 0;
479 	pte = origin;
480 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
481 	do {
482 		next = base_page_addr_end(addr, end);
483 		*pte = base_lra(addr);
484 	} while (pte++, addr = next, addr < end);
485 	return 0;
486 }
487 
488 static int base_segment_walk(unsigned long *origin, unsigned long addr,
489 			     unsigned long end, int alloc)
490 {
491 	unsigned long *ste, next, *table;
492 	int rc;
493 
494 	ste = origin;
495 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
496 	do {
497 		next = base_segment_addr_end(addr, end);
498 		if (*ste & _SEGMENT_ENTRY_INVALID) {
499 			if (!alloc)
500 				continue;
501 			table = base_pgt_alloc();
502 			if (!table)
503 				return -ENOMEM;
504 			*ste = __pa(table) | _SEGMENT_ENTRY;
505 		}
506 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
507 		rc = base_page_walk(table, addr, next, alloc);
508 		if (rc)
509 			return rc;
510 		if (!alloc)
511 			base_pgt_free(table);
512 		cond_resched();
513 	} while (ste++, addr = next, addr < end);
514 	return 0;
515 }
516 
517 static int base_region3_walk(unsigned long *origin, unsigned long addr,
518 			     unsigned long end, int alloc)
519 {
520 	unsigned long *rtte, next, *table;
521 	int rc;
522 
523 	rtte = origin;
524 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
525 	do {
526 		next = base_region3_addr_end(addr, end);
527 		if (*rtte & _REGION_ENTRY_INVALID) {
528 			if (!alloc)
529 				continue;
530 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
531 			if (!table)
532 				return -ENOMEM;
533 			*rtte = __pa(table) | _REGION3_ENTRY;
534 		}
535 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
536 		rc = base_segment_walk(table, addr, next, alloc);
537 		if (rc)
538 			return rc;
539 		if (!alloc)
540 			base_crst_free(table);
541 	} while (rtte++, addr = next, addr < end);
542 	return 0;
543 }
544 
545 static int base_region2_walk(unsigned long *origin, unsigned long addr,
546 			     unsigned long end, int alloc)
547 {
548 	unsigned long *rste, next, *table;
549 	int rc;
550 
551 	rste = origin;
552 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
553 	do {
554 		next = base_region2_addr_end(addr, end);
555 		if (*rste & _REGION_ENTRY_INVALID) {
556 			if (!alloc)
557 				continue;
558 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
559 			if (!table)
560 				return -ENOMEM;
561 			*rste = __pa(table) | _REGION2_ENTRY;
562 		}
563 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
564 		rc = base_region3_walk(table, addr, next, alloc);
565 		if (rc)
566 			return rc;
567 		if (!alloc)
568 			base_crst_free(table);
569 	} while (rste++, addr = next, addr < end);
570 	return 0;
571 }
572 
573 static int base_region1_walk(unsigned long *origin, unsigned long addr,
574 			     unsigned long end, int alloc)
575 {
576 	unsigned long *rfte, next, *table;
577 	int rc;
578 
579 	rfte = origin;
580 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
581 	do {
582 		next = base_region1_addr_end(addr, end);
583 		if (*rfte & _REGION_ENTRY_INVALID) {
584 			if (!alloc)
585 				continue;
586 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
587 			if (!table)
588 				return -ENOMEM;
589 			*rfte = __pa(table) | _REGION1_ENTRY;
590 		}
591 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
592 		rc = base_region2_walk(table, addr, next, alloc);
593 		if (rc)
594 			return rc;
595 		if (!alloc)
596 			base_crst_free(table);
597 	} while (rfte++, addr = next, addr < end);
598 	return 0;
599 }
600 
601 /**
602  * base_asce_free - free asce and tables returned from base_asce_alloc()
603  * @asce: asce to be freed
604  *
605  * Frees all region, segment, and page tables that were allocated with a
606  * corresponding base_asce_alloc() call.
607  */
608 void base_asce_free(unsigned long asce)
609 {
610 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
611 
612 	if (!asce)
613 		return;
614 	switch (asce & _ASCE_TYPE_MASK) {
615 	case _ASCE_TYPE_SEGMENT:
616 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
617 		break;
618 	case _ASCE_TYPE_REGION3:
619 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
620 		break;
621 	case _ASCE_TYPE_REGION2:
622 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
623 		break;
624 	case _ASCE_TYPE_REGION1:
625 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
626 		break;
627 	}
628 	base_crst_free(table);
629 }
630 
631 static int base_pgt_cache_init(void)
632 {
633 	static DEFINE_MUTEX(base_pgt_cache_mutex);
634 	unsigned long sz = _PAGE_TABLE_SIZE;
635 
636 	if (base_pgt_cache)
637 		return 0;
638 	mutex_lock(&base_pgt_cache_mutex);
639 	if (!base_pgt_cache)
640 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
641 	mutex_unlock(&base_pgt_cache_mutex);
642 	return base_pgt_cache ? 0 : -ENOMEM;
643 }
644 
645 /**
646  * base_asce_alloc - create kernel mapping without enhanced DAT features
647  * @addr: virtual start address of kernel mapping
648  * @num_pages: number of consecutive pages
649  *
650  * Generate an asce, including all required region, segment and page tables,
651  * that can be used to access the virtual kernel mapping. The difference is
652  * that the returned asce does not make use of any enhanced DAT features like
653  * e.g. large pages. This is required for some I/O functions that pass an
654  * asce, like e.g. some service call requests.
655  *
656  * Note: the returned asce may NEVER be attached to any cpu. It may only be
657  *	 used for I/O requests. tlb entries that might result because the
658  *	 asce was attached to a cpu won't be cleared.
659  */
660 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
661 {
662 	unsigned long asce, *table, end;
663 	int rc;
664 
665 	if (base_pgt_cache_init())
666 		return 0;
667 	end = addr + num_pages * PAGE_SIZE;
668 	if (end <= _REGION3_SIZE) {
669 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
670 		if (!table)
671 			return 0;
672 		rc = base_segment_walk(table, addr, end, 1);
673 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
674 	} else if (end <= _REGION2_SIZE) {
675 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
676 		if (!table)
677 			return 0;
678 		rc = base_region3_walk(table, addr, end, 1);
679 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
680 	} else if (end <= _REGION1_SIZE) {
681 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
682 		if (!table)
683 			return 0;
684 		rc = base_region2_walk(table, addr, end, 1);
685 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
686 	} else {
687 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
688 		if (!table)
689 			return 0;
690 		rc = base_region1_walk(table, addr, end, 1);
691 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
692 	}
693 	if (rc) {
694 		base_asce_free(asce);
695 		asce = 0;
696 	}
697 	return asce;
698 }
699