1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Page table allocation functions 4 * 5 * Copyright IBM Corp. 2016 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 */ 8 9 #include <linux/sysctl.h> 10 #include <linux/slab.h> 11 #include <linux/mm.h> 12 #include <asm/mmu_context.h> 13 #include <asm/pgalloc.h> 14 #include <asm/gmap.h> 15 #include <asm/tlb.h> 16 #include <asm/tlbflush.h> 17 18 #ifdef CONFIG_PGSTE 19 20 int page_table_allocate_pgste = 0; 21 EXPORT_SYMBOL(page_table_allocate_pgste); 22 23 static struct ctl_table page_table_sysctl[] = { 24 { 25 .procname = "allocate_pgste", 26 .data = &page_table_allocate_pgste, 27 .maxlen = sizeof(int), 28 .mode = S_IRUGO | S_IWUSR, 29 .proc_handler = proc_dointvec_minmax, 30 .extra1 = SYSCTL_ZERO, 31 .extra2 = SYSCTL_ONE, 32 }, 33 { } 34 }; 35 36 static struct ctl_table page_table_sysctl_dir[] = { 37 { 38 .procname = "vm", 39 .maxlen = 0, 40 .mode = 0555, 41 .child = page_table_sysctl, 42 }, 43 { } 44 }; 45 46 static int __init page_table_register_sysctl(void) 47 { 48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; 49 } 50 __initcall(page_table_register_sysctl); 51 52 #endif /* CONFIG_PGSTE */ 53 54 unsigned long *crst_table_alloc(struct mm_struct *mm) 55 { 56 struct page *page = alloc_pages(GFP_KERNEL, 2); 57 58 if (!page) 59 return NULL; 60 arch_set_page_dat(page, 2); 61 return (unsigned long *) page_to_phys(page); 62 } 63 64 void crst_table_free(struct mm_struct *mm, unsigned long *table) 65 { 66 free_pages((unsigned long) table, 2); 67 } 68 69 static void __crst_table_upgrade(void *arg) 70 { 71 struct mm_struct *mm = arg; 72 73 if (current->active_mm == mm) 74 set_user_asce(mm); 75 __tlb_flush_local(); 76 } 77 78 int crst_table_upgrade(struct mm_struct *mm, unsigned long end) 79 { 80 unsigned long *pgd = NULL, *p4d = NULL, *__pgd; 81 unsigned long asce_limit = mm->context.asce_limit; 82 83 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 84 VM_BUG_ON(asce_limit < _REGION2_SIZE); 85 86 if (end <= asce_limit) 87 return 0; 88 89 if (asce_limit == _REGION2_SIZE) { 90 p4d = crst_table_alloc(mm); 91 if (unlikely(!p4d)) 92 goto err_p4d; 93 crst_table_init(p4d, _REGION2_ENTRY_EMPTY); 94 } 95 if (end > _REGION1_SIZE) { 96 pgd = crst_table_alloc(mm); 97 if (unlikely(!pgd)) 98 goto err_pgd; 99 crst_table_init(pgd, _REGION1_ENTRY_EMPTY); 100 } 101 102 spin_lock_bh(&mm->page_table_lock); 103 104 /* 105 * This routine gets called with mmap_sem lock held and there is 106 * no reason to optimize for the case of otherwise. However, if 107 * that would ever change, the below check will let us know. 108 */ 109 VM_BUG_ON(asce_limit != mm->context.asce_limit); 110 111 if (p4d) { 112 __pgd = (unsigned long *) mm->pgd; 113 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd); 114 mm->pgd = (pgd_t *) p4d; 115 mm->context.asce_limit = _REGION1_SIZE; 116 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 117 _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 118 mm_inc_nr_puds(mm); 119 } 120 if (pgd) { 121 __pgd = (unsigned long *) mm->pgd; 122 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd); 123 mm->pgd = (pgd_t *) pgd; 124 mm->context.asce_limit = TASK_SIZE_MAX; 125 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 126 _ASCE_USER_BITS | _ASCE_TYPE_REGION1; 127 } 128 129 spin_unlock_bh(&mm->page_table_lock); 130 131 on_each_cpu(__crst_table_upgrade, mm, 0); 132 133 return 0; 134 135 err_pgd: 136 crst_table_free(mm, p4d); 137 err_p4d: 138 return -ENOMEM; 139 } 140 141 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) 142 { 143 unsigned int old, new; 144 145 do { 146 old = atomic_read(v); 147 new = old ^ bits; 148 } while (atomic_cmpxchg(v, old, new) != old); 149 return new; 150 } 151 152 #ifdef CONFIG_PGSTE 153 154 struct page *page_table_alloc_pgste(struct mm_struct *mm) 155 { 156 struct page *page; 157 u64 *table; 158 159 page = alloc_page(GFP_KERNEL); 160 if (page) { 161 table = (u64 *)page_to_phys(page); 162 memset64(table, _PAGE_INVALID, PTRS_PER_PTE); 163 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); 164 } 165 return page; 166 } 167 168 void page_table_free_pgste(struct page *page) 169 { 170 __free_page(page); 171 } 172 173 #endif /* CONFIG_PGSTE */ 174 175 /* 176 * page table entry allocation/free routines. 177 */ 178 unsigned long *page_table_alloc(struct mm_struct *mm) 179 { 180 unsigned long *table; 181 struct page *page; 182 unsigned int mask, bit; 183 184 /* Try to get a fragment of a 4K page as a 2K page table */ 185 if (!mm_alloc_pgste(mm)) { 186 table = NULL; 187 spin_lock_bh(&mm->context.lock); 188 if (!list_empty(&mm->context.pgtable_list)) { 189 page = list_first_entry(&mm->context.pgtable_list, 190 struct page, lru); 191 mask = atomic_read(&page->_refcount) >> 24; 192 mask = (mask | (mask >> 4)) & 3; 193 if (mask != 3) { 194 table = (unsigned long *) page_to_phys(page); 195 bit = mask & 1; /* =1 -> second 2K */ 196 if (bit) 197 table += PTRS_PER_PTE; 198 atomic_xor_bits(&page->_refcount, 199 1U << (bit + 24)); 200 list_del(&page->lru); 201 } 202 } 203 spin_unlock_bh(&mm->context.lock); 204 if (table) 205 return table; 206 } 207 /* Allocate a fresh page */ 208 page = alloc_page(GFP_KERNEL); 209 if (!page) 210 return NULL; 211 if (!pgtable_pte_page_ctor(page)) { 212 __free_page(page); 213 return NULL; 214 } 215 arch_set_page_dat(page, 0); 216 /* Initialize page table */ 217 table = (unsigned long *) page_to_phys(page); 218 if (mm_alloc_pgste(mm)) { 219 /* Return 4K page table with PGSTEs */ 220 atomic_xor_bits(&page->_refcount, 3 << 24); 221 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); 222 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); 223 } else { 224 /* Return the first 2K fragment of the page */ 225 atomic_xor_bits(&page->_refcount, 1 << 24); 226 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); 227 spin_lock_bh(&mm->context.lock); 228 list_add(&page->lru, &mm->context.pgtable_list); 229 spin_unlock_bh(&mm->context.lock); 230 } 231 return table; 232 } 233 234 void page_table_free(struct mm_struct *mm, unsigned long *table) 235 { 236 struct page *page; 237 unsigned int bit, mask; 238 239 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 240 if (!mm_alloc_pgste(mm)) { 241 /* Free 2K page table fragment of a 4K page */ 242 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); 243 spin_lock_bh(&mm->context.lock); 244 mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); 245 mask >>= 24; 246 if (mask & 3) 247 list_add(&page->lru, &mm->context.pgtable_list); 248 else 249 list_del(&page->lru); 250 spin_unlock_bh(&mm->context.lock); 251 if (mask != 0) 252 return; 253 } else { 254 atomic_xor_bits(&page->_refcount, 3U << 24); 255 } 256 257 pgtable_pte_page_dtor(page); 258 __free_page(page); 259 } 260 261 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, 262 unsigned long vmaddr) 263 { 264 struct mm_struct *mm; 265 struct page *page; 266 unsigned int bit, mask; 267 268 mm = tlb->mm; 269 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 270 if (mm_alloc_pgste(mm)) { 271 gmap_unlink(mm, table, vmaddr); 272 table = (unsigned long *) (__pa(table) | 3); 273 tlb_remove_table(tlb, table); 274 return; 275 } 276 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); 277 spin_lock_bh(&mm->context.lock); 278 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); 279 mask >>= 24; 280 if (mask & 3) 281 list_add_tail(&page->lru, &mm->context.pgtable_list); 282 else 283 list_del(&page->lru); 284 spin_unlock_bh(&mm->context.lock); 285 table = (unsigned long *) (__pa(table) | (1U << bit)); 286 tlb_remove_table(tlb, table); 287 } 288 289 void __tlb_remove_table(void *_table) 290 { 291 unsigned int mask = (unsigned long) _table & 3; 292 void *table = (void *)((unsigned long) _table ^ mask); 293 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 294 295 switch (mask) { 296 case 0: /* pmd, pud, or p4d */ 297 free_pages((unsigned long) table, 2); 298 break; 299 case 1: /* lower 2K of a 4K page table */ 300 case 2: /* higher 2K of a 4K page table */ 301 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24)); 302 mask >>= 24; 303 if (mask != 0) 304 break; 305 fallthrough; 306 case 3: /* 4K page table with pgstes */ 307 if (mask & 3) 308 atomic_xor_bits(&page->_refcount, 3 << 24); 309 pgtable_pte_page_dtor(page); 310 __free_page(page); 311 break; 312 } 313 } 314 315 /* 316 * Base infrastructure required to generate basic asces, region, segment, 317 * and page tables that do not make use of enhanced features like EDAT1. 318 */ 319 320 static struct kmem_cache *base_pgt_cache; 321 322 static unsigned long base_pgt_alloc(void) 323 { 324 u64 *table; 325 326 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL); 327 if (table) 328 memset64(table, _PAGE_INVALID, PTRS_PER_PTE); 329 return (unsigned long) table; 330 } 331 332 static void base_pgt_free(unsigned long table) 333 { 334 kmem_cache_free(base_pgt_cache, (void *) table); 335 } 336 337 static unsigned long base_crst_alloc(unsigned long val) 338 { 339 unsigned long table; 340 341 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 342 if (table) 343 crst_table_init((unsigned long *)table, val); 344 return table; 345 } 346 347 static void base_crst_free(unsigned long table) 348 { 349 free_pages(table, CRST_ALLOC_ORDER); 350 } 351 352 #define BASE_ADDR_END_FUNC(NAME, SIZE) \ 353 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \ 354 unsigned long end) \ 355 { \ 356 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \ 357 \ 358 return (next - 1) < (end - 1) ? next : end; \ 359 } 360 361 BASE_ADDR_END_FUNC(page, _PAGE_SIZE) 362 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE) 363 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE) 364 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE) 365 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE) 366 367 static inline unsigned long base_lra(unsigned long address) 368 { 369 unsigned long real; 370 371 asm volatile( 372 " lra %0,0(%1)\n" 373 : "=d" (real) : "a" (address) : "cc"); 374 return real; 375 } 376 377 static int base_page_walk(unsigned long origin, unsigned long addr, 378 unsigned long end, int alloc) 379 { 380 unsigned long *pte, next; 381 382 if (!alloc) 383 return 0; 384 pte = (unsigned long *) origin; 385 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT; 386 do { 387 next = base_page_addr_end(addr, end); 388 *pte = base_lra(addr); 389 } while (pte++, addr = next, addr < end); 390 return 0; 391 } 392 393 static int base_segment_walk(unsigned long origin, unsigned long addr, 394 unsigned long end, int alloc) 395 { 396 unsigned long *ste, next, table; 397 int rc; 398 399 ste = (unsigned long *) origin; 400 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 401 do { 402 next = base_segment_addr_end(addr, end); 403 if (*ste & _SEGMENT_ENTRY_INVALID) { 404 if (!alloc) 405 continue; 406 table = base_pgt_alloc(); 407 if (!table) 408 return -ENOMEM; 409 *ste = table | _SEGMENT_ENTRY; 410 } 411 table = *ste & _SEGMENT_ENTRY_ORIGIN; 412 rc = base_page_walk(table, addr, next, alloc); 413 if (rc) 414 return rc; 415 if (!alloc) 416 base_pgt_free(table); 417 cond_resched(); 418 } while (ste++, addr = next, addr < end); 419 return 0; 420 } 421 422 static int base_region3_walk(unsigned long origin, unsigned long addr, 423 unsigned long end, int alloc) 424 { 425 unsigned long *rtte, next, table; 426 int rc; 427 428 rtte = (unsigned long *) origin; 429 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT; 430 do { 431 next = base_region3_addr_end(addr, end); 432 if (*rtte & _REGION_ENTRY_INVALID) { 433 if (!alloc) 434 continue; 435 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY); 436 if (!table) 437 return -ENOMEM; 438 *rtte = table | _REGION3_ENTRY; 439 } 440 table = *rtte & _REGION_ENTRY_ORIGIN; 441 rc = base_segment_walk(table, addr, next, alloc); 442 if (rc) 443 return rc; 444 if (!alloc) 445 base_crst_free(table); 446 } while (rtte++, addr = next, addr < end); 447 return 0; 448 } 449 450 static int base_region2_walk(unsigned long origin, unsigned long addr, 451 unsigned long end, int alloc) 452 { 453 unsigned long *rste, next, table; 454 int rc; 455 456 rste = (unsigned long *) origin; 457 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT; 458 do { 459 next = base_region2_addr_end(addr, end); 460 if (*rste & _REGION_ENTRY_INVALID) { 461 if (!alloc) 462 continue; 463 table = base_crst_alloc(_REGION3_ENTRY_EMPTY); 464 if (!table) 465 return -ENOMEM; 466 *rste = table | _REGION2_ENTRY; 467 } 468 table = *rste & _REGION_ENTRY_ORIGIN; 469 rc = base_region3_walk(table, addr, next, alloc); 470 if (rc) 471 return rc; 472 if (!alloc) 473 base_crst_free(table); 474 } while (rste++, addr = next, addr < end); 475 return 0; 476 } 477 478 static int base_region1_walk(unsigned long origin, unsigned long addr, 479 unsigned long end, int alloc) 480 { 481 unsigned long *rfte, next, table; 482 int rc; 483 484 rfte = (unsigned long *) origin; 485 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT; 486 do { 487 next = base_region1_addr_end(addr, end); 488 if (*rfte & _REGION_ENTRY_INVALID) { 489 if (!alloc) 490 continue; 491 table = base_crst_alloc(_REGION2_ENTRY_EMPTY); 492 if (!table) 493 return -ENOMEM; 494 *rfte = table | _REGION1_ENTRY; 495 } 496 table = *rfte & _REGION_ENTRY_ORIGIN; 497 rc = base_region2_walk(table, addr, next, alloc); 498 if (rc) 499 return rc; 500 if (!alloc) 501 base_crst_free(table); 502 } while (rfte++, addr = next, addr < end); 503 return 0; 504 } 505 506 /** 507 * base_asce_free - free asce and tables returned from base_asce_alloc() 508 * @asce: asce to be freed 509 * 510 * Frees all region, segment, and page tables that were allocated with a 511 * corresponding base_asce_alloc() call. 512 */ 513 void base_asce_free(unsigned long asce) 514 { 515 unsigned long table = asce & _ASCE_ORIGIN; 516 517 if (!asce) 518 return; 519 switch (asce & _ASCE_TYPE_MASK) { 520 case _ASCE_TYPE_SEGMENT: 521 base_segment_walk(table, 0, _REGION3_SIZE, 0); 522 break; 523 case _ASCE_TYPE_REGION3: 524 base_region3_walk(table, 0, _REGION2_SIZE, 0); 525 break; 526 case _ASCE_TYPE_REGION2: 527 base_region2_walk(table, 0, _REGION1_SIZE, 0); 528 break; 529 case _ASCE_TYPE_REGION1: 530 base_region1_walk(table, 0, TASK_SIZE_MAX, 0); 531 break; 532 } 533 base_crst_free(table); 534 } 535 536 static int base_pgt_cache_init(void) 537 { 538 static DEFINE_MUTEX(base_pgt_cache_mutex); 539 unsigned long sz = _PAGE_TABLE_SIZE; 540 541 if (base_pgt_cache) 542 return 0; 543 mutex_lock(&base_pgt_cache_mutex); 544 if (!base_pgt_cache) 545 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL); 546 mutex_unlock(&base_pgt_cache_mutex); 547 return base_pgt_cache ? 0 : -ENOMEM; 548 } 549 550 /** 551 * base_asce_alloc - create kernel mapping without enhanced DAT features 552 * @addr: virtual start address of kernel mapping 553 * @num_pages: number of consecutive pages 554 * 555 * Generate an asce, including all required region, segment and page tables, 556 * that can be used to access the virtual kernel mapping. The difference is 557 * that the returned asce does not make use of any enhanced DAT features like 558 * e.g. large pages. This is required for some I/O functions that pass an 559 * asce, like e.g. some service call requests. 560 * 561 * Note: the returned asce may NEVER be attached to any cpu. It may only be 562 * used for I/O requests. tlb entries that might result because the 563 * asce was attached to a cpu won't be cleared. 564 */ 565 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages) 566 { 567 unsigned long asce, table, end; 568 int rc; 569 570 if (base_pgt_cache_init()) 571 return 0; 572 end = addr + num_pages * PAGE_SIZE; 573 if (end <= _REGION3_SIZE) { 574 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY); 575 if (!table) 576 return 0; 577 rc = base_segment_walk(table, addr, end, 1); 578 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH; 579 } else if (end <= _REGION2_SIZE) { 580 table = base_crst_alloc(_REGION3_ENTRY_EMPTY); 581 if (!table) 582 return 0; 583 rc = base_region3_walk(table, addr, end, 1); 584 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 585 } else if (end <= _REGION1_SIZE) { 586 table = base_crst_alloc(_REGION2_ENTRY_EMPTY); 587 if (!table) 588 return 0; 589 rc = base_region2_walk(table, addr, end, 1); 590 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 591 } else { 592 table = base_crst_alloc(_REGION1_ENTRY_EMPTY); 593 if (!table) 594 return 0; 595 rc = base_region1_walk(table, addr, end, 1); 596 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH; 597 } 598 if (rc) { 599 base_asce_free(asce); 600 asce = 0; 601 } 602 return asce; 603 } 604