xref: /openbmc/linux/arch/s390/mm/pgalloc.c (revision 2d972b6a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 
18 #ifdef CONFIG_PGSTE
19 
20 static int page_table_allocate_pgste_min = 0;
21 static int page_table_allocate_pgste_max = 1;
22 int page_table_allocate_pgste = 0;
23 EXPORT_SYMBOL(page_table_allocate_pgste);
24 
25 static struct ctl_table page_table_sysctl[] = {
26 	{
27 		.procname	= "allocate_pgste",
28 		.data		= &page_table_allocate_pgste,
29 		.maxlen		= sizeof(int),
30 		.mode		= S_IRUGO | S_IWUSR,
31 		.proc_handler	= proc_dointvec,
32 		.extra1		= &page_table_allocate_pgste_min,
33 		.extra2		= &page_table_allocate_pgste_max,
34 	},
35 	{ }
36 };
37 
38 static struct ctl_table page_table_sysctl_dir[] = {
39 	{
40 		.procname	= "vm",
41 		.maxlen		= 0,
42 		.mode		= 0555,
43 		.child		= page_table_sysctl,
44 	},
45 	{ }
46 };
47 
48 static int __init page_table_register_sysctl(void)
49 {
50 	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
51 }
52 __initcall(page_table_register_sysctl);
53 
54 #endif /* CONFIG_PGSTE */
55 
56 unsigned long *crst_table_alloc(struct mm_struct *mm)
57 {
58 	struct page *page = alloc_pages(GFP_KERNEL, 2);
59 
60 	if (!page)
61 		return NULL;
62 	arch_set_page_dat(page, 2);
63 	return (unsigned long *) page_to_phys(page);
64 }
65 
66 void crst_table_free(struct mm_struct *mm, unsigned long *table)
67 {
68 	free_pages((unsigned long) table, 2);
69 }
70 
71 static void __crst_table_upgrade(void *arg)
72 {
73 	struct mm_struct *mm = arg;
74 
75 	if (current->active_mm == mm)
76 		set_user_asce(mm);
77 	__tlb_flush_local();
78 }
79 
80 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
81 {
82 	unsigned long *table, *pgd;
83 	int rc, notify;
84 
85 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
86 	VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
87 	rc = 0;
88 	notify = 0;
89 	while (mm->context.asce_limit < end) {
90 		table = crst_table_alloc(mm);
91 		if (!table) {
92 			rc = -ENOMEM;
93 			break;
94 		}
95 		spin_lock_bh(&mm->page_table_lock);
96 		pgd = (unsigned long *) mm->pgd;
97 		if (mm->context.asce_limit == _REGION2_SIZE) {
98 			crst_table_init(table, _REGION2_ENTRY_EMPTY);
99 			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
100 			mm->pgd = (pgd_t *) table;
101 			mm->context.asce_limit = _REGION1_SIZE;
102 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
103 				_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
104 		} else {
105 			crst_table_init(table, _REGION1_ENTRY_EMPTY);
106 			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
107 			mm->pgd = (pgd_t *) table;
108 			mm->context.asce_limit = -PAGE_SIZE;
109 			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110 				_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
111 		}
112 		notify = 1;
113 		spin_unlock_bh(&mm->page_table_lock);
114 	}
115 	if (notify)
116 		on_each_cpu(__crst_table_upgrade, mm, 0);
117 	return rc;
118 }
119 
120 void crst_table_downgrade(struct mm_struct *mm)
121 {
122 	pgd_t *pgd;
123 
124 	/* downgrade should only happen from 3 to 2 levels (compat only) */
125 	VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
126 
127 	if (current->active_mm == mm) {
128 		clear_user_asce();
129 		__tlb_flush_mm(mm);
130 	}
131 
132 	pgd = mm->pgd;
133 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
134 	mm->context.asce_limit = _REGION3_SIZE;
135 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
136 			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
137 	crst_table_free(mm, (unsigned long *) pgd);
138 
139 	if (current->active_mm == mm)
140 		set_user_asce(mm);
141 }
142 
143 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
144 {
145 	unsigned int old, new;
146 
147 	do {
148 		old = atomic_read(v);
149 		new = old ^ bits;
150 	} while (atomic_cmpxchg(v, old, new) != old);
151 	return new;
152 }
153 
154 #ifdef CONFIG_PGSTE
155 
156 struct page *page_table_alloc_pgste(struct mm_struct *mm)
157 {
158 	struct page *page;
159 	u64 *table;
160 
161 	page = alloc_page(GFP_KERNEL);
162 	if (page) {
163 		table = (u64 *)page_to_phys(page);
164 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
165 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
166 	}
167 	return page;
168 }
169 
170 void page_table_free_pgste(struct page *page)
171 {
172 	__free_page(page);
173 }
174 
175 #endif /* CONFIG_PGSTE */
176 
177 /*
178  * page table entry allocation/free routines.
179  */
180 unsigned long *page_table_alloc(struct mm_struct *mm)
181 {
182 	unsigned long *table;
183 	struct page *page;
184 	unsigned int mask, bit;
185 
186 	/* Try to get a fragment of a 4K page as a 2K page table */
187 	if (!mm_alloc_pgste(mm)) {
188 		table = NULL;
189 		spin_lock_bh(&mm->context.lock);
190 		if (!list_empty(&mm->context.pgtable_list)) {
191 			page = list_first_entry(&mm->context.pgtable_list,
192 						struct page, lru);
193 			mask = atomic_read(&page->_mapcount);
194 			mask = (mask | (mask >> 4)) & 3;
195 			if (mask != 3) {
196 				table = (unsigned long *) page_to_phys(page);
197 				bit = mask & 1;		/* =1 -> second 2K */
198 				if (bit)
199 					table += PTRS_PER_PTE;
200 				atomic_xor_bits(&page->_mapcount, 1U << bit);
201 				list_del(&page->lru);
202 			}
203 		}
204 		spin_unlock_bh(&mm->context.lock);
205 		if (table)
206 			return table;
207 	}
208 	/* Allocate a fresh page */
209 	page = alloc_page(GFP_KERNEL);
210 	if (!page)
211 		return NULL;
212 	if (!pgtable_page_ctor(page)) {
213 		__free_page(page);
214 		return NULL;
215 	}
216 	arch_set_page_dat(page, 0);
217 	/* Initialize page table */
218 	table = (unsigned long *) page_to_phys(page);
219 	if (mm_alloc_pgste(mm)) {
220 		/* Return 4K page table with PGSTEs */
221 		atomic_set(&page->_mapcount, 3);
222 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
223 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
224 	} else {
225 		/* Return the first 2K fragment of the page */
226 		atomic_set(&page->_mapcount, 1);
227 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
228 		spin_lock_bh(&mm->context.lock);
229 		list_add(&page->lru, &mm->context.pgtable_list);
230 		spin_unlock_bh(&mm->context.lock);
231 	}
232 	return table;
233 }
234 
235 void page_table_free(struct mm_struct *mm, unsigned long *table)
236 {
237 	struct page *page;
238 	unsigned int bit, mask;
239 
240 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
241 	if (!mm_alloc_pgste(mm)) {
242 		/* Free 2K page table fragment of a 4K page */
243 		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
244 		spin_lock_bh(&mm->context.lock);
245 		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
246 		if (mask & 3)
247 			list_add(&page->lru, &mm->context.pgtable_list);
248 		else
249 			list_del(&page->lru);
250 		spin_unlock_bh(&mm->context.lock);
251 		if (mask != 0)
252 			return;
253 	}
254 
255 	pgtable_page_dtor(page);
256 	atomic_set(&page->_mapcount, -1);
257 	__free_page(page);
258 }
259 
260 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
261 			 unsigned long vmaddr)
262 {
263 	struct mm_struct *mm;
264 	struct page *page;
265 	unsigned int bit, mask;
266 
267 	mm = tlb->mm;
268 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
269 	if (mm_alloc_pgste(mm)) {
270 		gmap_unlink(mm, table, vmaddr);
271 		table = (unsigned long *) (__pa(table) | 3);
272 		tlb_remove_table(tlb, table);
273 		return;
274 	}
275 	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
276 	spin_lock_bh(&mm->context.lock);
277 	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
278 	if (mask & 3)
279 		list_add_tail(&page->lru, &mm->context.pgtable_list);
280 	else
281 		list_del(&page->lru);
282 	spin_unlock_bh(&mm->context.lock);
283 	table = (unsigned long *) (__pa(table) | (1U << bit));
284 	tlb_remove_table(tlb, table);
285 }
286 
287 static void __tlb_remove_table(void *_table)
288 {
289 	unsigned int mask = (unsigned long) _table & 3;
290 	void *table = (void *)((unsigned long) _table ^ mask);
291 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
292 
293 	switch (mask) {
294 	case 0:		/* pmd, pud, or p4d */
295 		free_pages((unsigned long) table, 2);
296 		break;
297 	case 1:		/* lower 2K of a 4K page table */
298 	case 2:		/* higher 2K of a 4K page table */
299 		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
300 			break;
301 		/* fallthrough */
302 	case 3:		/* 4K page table with pgstes */
303 		pgtable_page_dtor(page);
304 		atomic_set(&page->_mapcount, -1);
305 		__free_page(page);
306 		break;
307 	}
308 }
309 
310 static void tlb_remove_table_smp_sync(void *arg)
311 {
312 	/* Simply deliver the interrupt */
313 }
314 
315 static void tlb_remove_table_one(void *table)
316 {
317 	/*
318 	 * This isn't an RCU grace period and hence the page-tables cannot be
319 	 * assumed to be actually RCU-freed.
320 	 *
321 	 * It is however sufficient for software page-table walkers that rely
322 	 * on IRQ disabling. See the comment near struct mmu_table_batch.
323 	 */
324 	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
325 	__tlb_remove_table(table);
326 }
327 
328 static void tlb_remove_table_rcu(struct rcu_head *head)
329 {
330 	struct mmu_table_batch *batch;
331 	int i;
332 
333 	batch = container_of(head, struct mmu_table_batch, rcu);
334 
335 	for (i = 0; i < batch->nr; i++)
336 		__tlb_remove_table(batch->tables[i]);
337 
338 	free_page((unsigned long)batch);
339 }
340 
341 void tlb_table_flush(struct mmu_gather *tlb)
342 {
343 	struct mmu_table_batch **batch = &tlb->batch;
344 
345 	if (*batch) {
346 		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
347 		*batch = NULL;
348 	}
349 }
350 
351 void tlb_remove_table(struct mmu_gather *tlb, void *table)
352 {
353 	struct mmu_table_batch **batch = &tlb->batch;
354 
355 	tlb->mm->context.flush_mm = 1;
356 	if (*batch == NULL) {
357 		*batch = (struct mmu_table_batch *)
358 			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
359 		if (*batch == NULL) {
360 			__tlb_flush_mm_lazy(tlb->mm);
361 			tlb_remove_table_one(table);
362 			return;
363 		}
364 		(*batch)->nr = 0;
365 	}
366 	(*batch)->tables[(*batch)->nr++] = table;
367 	if ((*batch)->nr == MAX_TABLE_BATCH)
368 		tlb_flush_mmu(tlb);
369 }
370 
371 /*
372  * Base infrastructure required to generate basic asces, region, segment,
373  * and page tables that do not make use of enhanced features like EDAT1.
374  */
375 
376 static struct kmem_cache *base_pgt_cache;
377 
378 static unsigned long base_pgt_alloc(void)
379 {
380 	u64 *table;
381 
382 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
383 	if (table)
384 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
385 	return (unsigned long) table;
386 }
387 
388 static void base_pgt_free(unsigned long table)
389 {
390 	kmem_cache_free(base_pgt_cache, (void *) table);
391 }
392 
393 static unsigned long base_crst_alloc(unsigned long val)
394 {
395 	unsigned long table;
396 
397 	table =	 __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
398 	if (table)
399 		crst_table_init((unsigned long *)table, val);
400 	return table;
401 }
402 
403 static void base_crst_free(unsigned long table)
404 {
405 	free_pages(table, CRST_ALLOC_ORDER);
406 }
407 
408 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
409 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
410 						   unsigned long end)	\
411 {									\
412 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
413 									\
414 	return (next - 1) < (end - 1) ? next : end;			\
415 }
416 
417 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
418 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
419 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
420 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
421 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
422 
423 static inline unsigned long base_lra(unsigned long address)
424 {
425 	unsigned long real;
426 
427 	asm volatile(
428 		"	lra	%0,0(%1)\n"
429 		: "=d" (real) : "a" (address) : "cc");
430 	return real;
431 }
432 
433 static int base_page_walk(unsigned long origin, unsigned long addr,
434 			  unsigned long end, int alloc)
435 {
436 	unsigned long *pte, next;
437 
438 	if (!alloc)
439 		return 0;
440 	pte = (unsigned long *) origin;
441 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
442 	do {
443 		next = base_page_addr_end(addr, end);
444 		*pte = base_lra(addr);
445 	} while (pte++, addr = next, addr < end);
446 	return 0;
447 }
448 
449 static int base_segment_walk(unsigned long origin, unsigned long addr,
450 			     unsigned long end, int alloc)
451 {
452 	unsigned long *ste, next, table;
453 	int rc;
454 
455 	ste = (unsigned long *) origin;
456 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
457 	do {
458 		next = base_segment_addr_end(addr, end);
459 		if (*ste & _SEGMENT_ENTRY_INVALID) {
460 			if (!alloc)
461 				continue;
462 			table = base_pgt_alloc();
463 			if (!table)
464 				return -ENOMEM;
465 			*ste = table | _SEGMENT_ENTRY;
466 		}
467 		table = *ste & _SEGMENT_ENTRY_ORIGIN;
468 		rc = base_page_walk(table, addr, next, alloc);
469 		if (rc)
470 			return rc;
471 		if (!alloc)
472 			base_pgt_free(table);
473 		cond_resched();
474 	} while (ste++, addr = next, addr < end);
475 	return 0;
476 }
477 
478 static int base_region3_walk(unsigned long origin, unsigned long addr,
479 			     unsigned long end, int alloc)
480 {
481 	unsigned long *rtte, next, table;
482 	int rc;
483 
484 	rtte = (unsigned long *) origin;
485 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
486 	do {
487 		next = base_region3_addr_end(addr, end);
488 		if (*rtte & _REGION_ENTRY_INVALID) {
489 			if (!alloc)
490 				continue;
491 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
492 			if (!table)
493 				return -ENOMEM;
494 			*rtte = table | _REGION3_ENTRY;
495 		}
496 		table = *rtte & _REGION_ENTRY_ORIGIN;
497 		rc = base_segment_walk(table, addr, next, alloc);
498 		if (rc)
499 			return rc;
500 		if (!alloc)
501 			base_crst_free(table);
502 	} while (rtte++, addr = next, addr < end);
503 	return 0;
504 }
505 
506 static int base_region2_walk(unsigned long origin, unsigned long addr,
507 			     unsigned long end, int alloc)
508 {
509 	unsigned long *rste, next, table;
510 	int rc;
511 
512 	rste = (unsigned long *) origin;
513 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
514 	do {
515 		next = base_region2_addr_end(addr, end);
516 		if (*rste & _REGION_ENTRY_INVALID) {
517 			if (!alloc)
518 				continue;
519 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
520 			if (!table)
521 				return -ENOMEM;
522 			*rste = table | _REGION2_ENTRY;
523 		}
524 		table = *rste & _REGION_ENTRY_ORIGIN;
525 		rc = base_region3_walk(table, addr, next, alloc);
526 		if (rc)
527 			return rc;
528 		if (!alloc)
529 			base_crst_free(table);
530 	} while (rste++, addr = next, addr < end);
531 	return 0;
532 }
533 
534 static int base_region1_walk(unsigned long origin, unsigned long addr,
535 			     unsigned long end, int alloc)
536 {
537 	unsigned long *rfte, next, table;
538 	int rc;
539 
540 	rfte = (unsigned long *) origin;
541 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
542 	do {
543 		next = base_region1_addr_end(addr, end);
544 		if (*rfte & _REGION_ENTRY_INVALID) {
545 			if (!alloc)
546 				continue;
547 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
548 			if (!table)
549 				return -ENOMEM;
550 			*rfte = table | _REGION1_ENTRY;
551 		}
552 		table = *rfte & _REGION_ENTRY_ORIGIN;
553 		rc = base_region2_walk(table, addr, next, alloc);
554 		if (rc)
555 			return rc;
556 		if (!alloc)
557 			base_crst_free(table);
558 	} while (rfte++, addr = next, addr < end);
559 	return 0;
560 }
561 
562 /**
563  * base_asce_free - free asce and tables returned from base_asce_alloc()
564  * @asce: asce to be freed
565  *
566  * Frees all region, segment, and page tables that were allocated with a
567  * corresponding base_asce_alloc() call.
568  */
569 void base_asce_free(unsigned long asce)
570 {
571 	unsigned long table = asce & _ASCE_ORIGIN;
572 
573 	if (!asce)
574 		return;
575 	switch (asce & _ASCE_TYPE_MASK) {
576 	case _ASCE_TYPE_SEGMENT:
577 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
578 		break;
579 	case _ASCE_TYPE_REGION3:
580 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
581 		break;
582 	case _ASCE_TYPE_REGION2:
583 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
584 		break;
585 	case _ASCE_TYPE_REGION1:
586 		base_region1_walk(table, 0, -_PAGE_SIZE, 0);
587 		break;
588 	}
589 	base_crst_free(table);
590 }
591 
592 static int base_pgt_cache_init(void)
593 {
594 	static DEFINE_MUTEX(base_pgt_cache_mutex);
595 	unsigned long sz = _PAGE_TABLE_SIZE;
596 
597 	if (base_pgt_cache)
598 		return 0;
599 	mutex_lock(&base_pgt_cache_mutex);
600 	if (!base_pgt_cache)
601 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
602 	mutex_unlock(&base_pgt_cache_mutex);
603 	return base_pgt_cache ? 0 : -ENOMEM;
604 }
605 
606 /**
607  * base_asce_alloc - create kernel mapping without enhanced DAT features
608  * @addr: virtual start address of kernel mapping
609  * @num_pages: number of consecutive pages
610  *
611  * Generate an asce, including all required region, segment and page tables,
612  * that can be used to access the virtual kernel mapping. The difference is
613  * that the returned asce does not make use of any enhanced DAT features like
614  * e.g. large pages. This is required for some I/O functions that pass an
615  * asce, like e.g. some service call requests.
616  *
617  * Note: the returned asce may NEVER be attached to any cpu. It may only be
618  *	 used for I/O requests. tlb entries that might result because the
619  *	 asce was attached to a cpu won't be cleared.
620  */
621 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
622 {
623 	unsigned long asce, table, end;
624 	int rc;
625 
626 	if (base_pgt_cache_init())
627 		return 0;
628 	end = addr + num_pages * PAGE_SIZE;
629 	if (end <= _REGION3_SIZE) {
630 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
631 		if (!table)
632 			return 0;
633 		rc = base_segment_walk(table, addr, end, 1);
634 		asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
635 	} else if (end <= _REGION2_SIZE) {
636 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
637 		if (!table)
638 			return 0;
639 		rc = base_region3_walk(table, addr, end, 1);
640 		asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
641 	} else if (end <= _REGION1_SIZE) {
642 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
643 		if (!table)
644 			return 0;
645 		rc = base_region2_walk(table, addr, end, 1);
646 		asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
647 	} else {
648 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
649 		if (!table)
650 			return 0;
651 		rc = base_region1_walk(table, addr, end, 1);
652 		asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
653 	}
654 	if (rc) {
655 		base_asce_free(asce);
656 		asce = 0;
657 	}
658 	return asce;
659 }
660