1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Page table allocation functions 4 * 5 * Copyright IBM Corp. 2016 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 */ 8 9 #include <linux/sysctl.h> 10 #include <linux/slab.h> 11 #include <linux/mm.h> 12 #include <asm/mmu_context.h> 13 #include <asm/pgalloc.h> 14 #include <asm/gmap.h> 15 #include <asm/tlb.h> 16 #include <asm/tlbflush.h> 17 18 #ifdef CONFIG_PGSTE 19 20 static int page_table_allocate_pgste_min = 0; 21 static int page_table_allocate_pgste_max = 1; 22 int page_table_allocate_pgste = 0; 23 EXPORT_SYMBOL(page_table_allocate_pgste); 24 25 static struct ctl_table page_table_sysctl[] = { 26 { 27 .procname = "allocate_pgste", 28 .data = &page_table_allocate_pgste, 29 .maxlen = sizeof(int), 30 .mode = S_IRUGO | S_IWUSR, 31 .proc_handler = proc_dointvec_minmax, 32 .extra1 = &page_table_allocate_pgste_min, 33 .extra2 = &page_table_allocate_pgste_max, 34 }, 35 { } 36 }; 37 38 static struct ctl_table page_table_sysctl_dir[] = { 39 { 40 .procname = "vm", 41 .maxlen = 0, 42 .mode = 0555, 43 .child = page_table_sysctl, 44 }, 45 { } 46 }; 47 48 static int __init page_table_register_sysctl(void) 49 { 50 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; 51 } 52 __initcall(page_table_register_sysctl); 53 54 #endif /* CONFIG_PGSTE */ 55 56 unsigned long *crst_table_alloc(struct mm_struct *mm) 57 { 58 struct page *page = alloc_pages(GFP_KERNEL, 2); 59 60 if (!page) 61 return NULL; 62 arch_set_page_dat(page, 2); 63 return (unsigned long *) page_to_phys(page); 64 } 65 66 void crst_table_free(struct mm_struct *mm, unsigned long *table) 67 { 68 free_pages((unsigned long) table, 2); 69 } 70 71 static void __crst_table_upgrade(void *arg) 72 { 73 struct mm_struct *mm = arg; 74 75 if (current->active_mm == mm) 76 set_user_asce(mm); 77 __tlb_flush_local(); 78 } 79 80 int crst_table_upgrade(struct mm_struct *mm, unsigned long end) 81 { 82 unsigned long *table, *pgd; 83 int rc, notify; 84 85 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 86 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); 87 rc = 0; 88 notify = 0; 89 while (mm->context.asce_limit < end) { 90 table = crst_table_alloc(mm); 91 if (!table) { 92 rc = -ENOMEM; 93 break; 94 } 95 spin_lock_bh(&mm->page_table_lock); 96 pgd = (unsigned long *) mm->pgd; 97 if (mm->context.asce_limit == _REGION2_SIZE) { 98 crst_table_init(table, _REGION2_ENTRY_EMPTY); 99 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); 100 mm->pgd = (pgd_t *) table; 101 mm->context.asce_limit = _REGION1_SIZE; 102 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 103 _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 104 } else { 105 crst_table_init(table, _REGION1_ENTRY_EMPTY); 106 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); 107 mm->pgd = (pgd_t *) table; 108 mm->context.asce_limit = -PAGE_SIZE; 109 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 110 _ASCE_USER_BITS | _ASCE_TYPE_REGION1; 111 } 112 notify = 1; 113 spin_unlock_bh(&mm->page_table_lock); 114 } 115 if (notify) 116 on_each_cpu(__crst_table_upgrade, mm, 0); 117 return rc; 118 } 119 120 void crst_table_downgrade(struct mm_struct *mm) 121 { 122 pgd_t *pgd; 123 124 /* downgrade should only happen from 3 to 2 levels (compat only) */ 125 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE); 126 127 if (current->active_mm == mm) { 128 clear_user_asce(); 129 __tlb_flush_mm(mm); 130 } 131 132 pgd = mm->pgd; 133 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 134 mm->context.asce_limit = _REGION3_SIZE; 135 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 136 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 137 crst_table_free(mm, (unsigned long *) pgd); 138 139 if (current->active_mm == mm) 140 set_user_asce(mm); 141 } 142 143 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) 144 { 145 unsigned int old, new; 146 147 do { 148 old = atomic_read(v); 149 new = old ^ bits; 150 } while (atomic_cmpxchg(v, old, new) != old); 151 return new; 152 } 153 154 #ifdef CONFIG_PGSTE 155 156 struct page *page_table_alloc_pgste(struct mm_struct *mm) 157 { 158 struct page *page; 159 u64 *table; 160 161 page = alloc_page(GFP_KERNEL); 162 if (page) { 163 table = (u64 *)page_to_phys(page); 164 memset64(table, _PAGE_INVALID, PTRS_PER_PTE); 165 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); 166 } 167 return page; 168 } 169 170 void page_table_free_pgste(struct page *page) 171 { 172 __free_page(page); 173 } 174 175 #endif /* CONFIG_PGSTE */ 176 177 /* 178 * page table entry allocation/free routines. 179 */ 180 unsigned long *page_table_alloc(struct mm_struct *mm) 181 { 182 unsigned long *table; 183 struct page *page; 184 unsigned int mask, bit; 185 186 /* Try to get a fragment of a 4K page as a 2K page table */ 187 if (!mm_alloc_pgste(mm)) { 188 table = NULL; 189 spin_lock_bh(&mm->context.lock); 190 if (!list_empty(&mm->context.pgtable_list)) { 191 page = list_first_entry(&mm->context.pgtable_list, 192 struct page, lru); 193 mask = atomic_read(&page->_refcount) >> 24; 194 mask = (mask | (mask >> 4)) & 3; 195 if (mask != 3) { 196 table = (unsigned long *) page_to_phys(page); 197 bit = mask & 1; /* =1 -> second 2K */ 198 if (bit) 199 table += PTRS_PER_PTE; 200 atomic_xor_bits(&page->_refcount, 201 1U << (bit + 24)); 202 list_del(&page->lru); 203 } 204 } 205 spin_unlock_bh(&mm->context.lock); 206 if (table) 207 return table; 208 } 209 /* Allocate a fresh page */ 210 page = alloc_page(GFP_KERNEL); 211 if (!page) 212 return NULL; 213 if (!pgtable_page_ctor(page)) { 214 __free_page(page); 215 return NULL; 216 } 217 arch_set_page_dat(page, 0); 218 /* Initialize page table */ 219 table = (unsigned long *) page_to_phys(page); 220 if (mm_alloc_pgste(mm)) { 221 /* Return 4K page table with PGSTEs */ 222 atomic_xor_bits(&page->_refcount, 3 << 24); 223 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); 224 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); 225 } else { 226 /* Return the first 2K fragment of the page */ 227 atomic_xor_bits(&page->_refcount, 1 << 24); 228 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); 229 spin_lock_bh(&mm->context.lock); 230 list_add(&page->lru, &mm->context.pgtable_list); 231 spin_unlock_bh(&mm->context.lock); 232 } 233 return table; 234 } 235 236 void page_table_free(struct mm_struct *mm, unsigned long *table) 237 { 238 struct page *page; 239 unsigned int bit, mask; 240 241 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 242 if (!mm_alloc_pgste(mm)) { 243 /* Free 2K page table fragment of a 4K page */ 244 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); 245 spin_lock_bh(&mm->context.lock); 246 mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); 247 mask >>= 24; 248 if (mask & 3) 249 list_add(&page->lru, &mm->context.pgtable_list); 250 else 251 list_del(&page->lru); 252 spin_unlock_bh(&mm->context.lock); 253 if (mask != 0) 254 return; 255 } else { 256 atomic_xor_bits(&page->_refcount, 3U << 24); 257 } 258 259 pgtable_page_dtor(page); 260 __free_page(page); 261 } 262 263 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, 264 unsigned long vmaddr) 265 { 266 struct mm_struct *mm; 267 struct page *page; 268 unsigned int bit, mask; 269 270 mm = tlb->mm; 271 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 272 if (mm_alloc_pgste(mm)) { 273 gmap_unlink(mm, table, vmaddr); 274 table = (unsigned long *) (__pa(table) | 3); 275 tlb_remove_table(tlb, table); 276 return; 277 } 278 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); 279 spin_lock_bh(&mm->context.lock); 280 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); 281 mask >>= 24; 282 if (mask & 3) 283 list_add_tail(&page->lru, &mm->context.pgtable_list); 284 else 285 list_del(&page->lru); 286 spin_unlock_bh(&mm->context.lock); 287 table = (unsigned long *) (__pa(table) | (1U << bit)); 288 tlb_remove_table(tlb, table); 289 } 290 291 static void __tlb_remove_table(void *_table) 292 { 293 unsigned int mask = (unsigned long) _table & 3; 294 void *table = (void *)((unsigned long) _table ^ mask); 295 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 296 297 switch (mask) { 298 case 0: /* pmd, pud, or p4d */ 299 free_pages((unsigned long) table, 2); 300 break; 301 case 1: /* lower 2K of a 4K page table */ 302 case 2: /* higher 2K of a 4K page table */ 303 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24)); 304 mask >>= 24; 305 if (mask != 0) 306 break; 307 /* fallthrough */ 308 case 3: /* 4K page table with pgstes */ 309 if (mask & 3) 310 atomic_xor_bits(&page->_refcount, 3 << 24); 311 pgtable_page_dtor(page); 312 __free_page(page); 313 break; 314 } 315 } 316 317 static void tlb_remove_table_smp_sync(void *arg) 318 { 319 /* Simply deliver the interrupt */ 320 } 321 322 static void tlb_remove_table_one(void *table) 323 { 324 /* 325 * This isn't an RCU grace period and hence the page-tables cannot be 326 * assumed to be actually RCU-freed. 327 * 328 * It is however sufficient for software page-table walkers that rely 329 * on IRQ disabling. See the comment near struct mmu_table_batch. 330 */ 331 smp_call_function(tlb_remove_table_smp_sync, NULL, 1); 332 __tlb_remove_table(table); 333 } 334 335 static void tlb_remove_table_rcu(struct rcu_head *head) 336 { 337 struct mmu_table_batch *batch; 338 int i; 339 340 batch = container_of(head, struct mmu_table_batch, rcu); 341 342 for (i = 0; i < batch->nr; i++) 343 __tlb_remove_table(batch->tables[i]); 344 345 free_page((unsigned long)batch); 346 } 347 348 void tlb_table_flush(struct mmu_gather *tlb) 349 { 350 struct mmu_table_batch **batch = &tlb->batch; 351 352 if (*batch) { 353 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); 354 *batch = NULL; 355 } 356 } 357 358 void tlb_remove_table(struct mmu_gather *tlb, void *table) 359 { 360 struct mmu_table_batch **batch = &tlb->batch; 361 362 tlb->mm->context.flush_mm = 1; 363 if (*batch == NULL) { 364 *batch = (struct mmu_table_batch *) 365 __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 366 if (*batch == NULL) { 367 __tlb_flush_mm_lazy(tlb->mm); 368 tlb_remove_table_one(table); 369 return; 370 } 371 (*batch)->nr = 0; 372 } 373 (*batch)->tables[(*batch)->nr++] = table; 374 if ((*batch)->nr == MAX_TABLE_BATCH) 375 tlb_flush_mmu(tlb); 376 } 377 378 /* 379 * Base infrastructure required to generate basic asces, region, segment, 380 * and page tables that do not make use of enhanced features like EDAT1. 381 */ 382 383 static struct kmem_cache *base_pgt_cache; 384 385 static unsigned long base_pgt_alloc(void) 386 { 387 u64 *table; 388 389 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL); 390 if (table) 391 memset64(table, _PAGE_INVALID, PTRS_PER_PTE); 392 return (unsigned long) table; 393 } 394 395 static void base_pgt_free(unsigned long table) 396 { 397 kmem_cache_free(base_pgt_cache, (void *) table); 398 } 399 400 static unsigned long base_crst_alloc(unsigned long val) 401 { 402 unsigned long table; 403 404 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 405 if (table) 406 crst_table_init((unsigned long *)table, val); 407 return table; 408 } 409 410 static void base_crst_free(unsigned long table) 411 { 412 free_pages(table, CRST_ALLOC_ORDER); 413 } 414 415 #define BASE_ADDR_END_FUNC(NAME, SIZE) \ 416 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \ 417 unsigned long end) \ 418 { \ 419 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \ 420 \ 421 return (next - 1) < (end - 1) ? next : end; \ 422 } 423 424 BASE_ADDR_END_FUNC(page, _PAGE_SIZE) 425 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE) 426 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE) 427 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE) 428 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE) 429 430 static inline unsigned long base_lra(unsigned long address) 431 { 432 unsigned long real; 433 434 asm volatile( 435 " lra %0,0(%1)\n" 436 : "=d" (real) : "a" (address) : "cc"); 437 return real; 438 } 439 440 static int base_page_walk(unsigned long origin, unsigned long addr, 441 unsigned long end, int alloc) 442 { 443 unsigned long *pte, next; 444 445 if (!alloc) 446 return 0; 447 pte = (unsigned long *) origin; 448 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT; 449 do { 450 next = base_page_addr_end(addr, end); 451 *pte = base_lra(addr); 452 } while (pte++, addr = next, addr < end); 453 return 0; 454 } 455 456 static int base_segment_walk(unsigned long origin, unsigned long addr, 457 unsigned long end, int alloc) 458 { 459 unsigned long *ste, next, table; 460 int rc; 461 462 ste = (unsigned long *) origin; 463 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; 464 do { 465 next = base_segment_addr_end(addr, end); 466 if (*ste & _SEGMENT_ENTRY_INVALID) { 467 if (!alloc) 468 continue; 469 table = base_pgt_alloc(); 470 if (!table) 471 return -ENOMEM; 472 *ste = table | _SEGMENT_ENTRY; 473 } 474 table = *ste & _SEGMENT_ENTRY_ORIGIN; 475 rc = base_page_walk(table, addr, next, alloc); 476 if (rc) 477 return rc; 478 if (!alloc) 479 base_pgt_free(table); 480 cond_resched(); 481 } while (ste++, addr = next, addr < end); 482 return 0; 483 } 484 485 static int base_region3_walk(unsigned long origin, unsigned long addr, 486 unsigned long end, int alloc) 487 { 488 unsigned long *rtte, next, table; 489 int rc; 490 491 rtte = (unsigned long *) origin; 492 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT; 493 do { 494 next = base_region3_addr_end(addr, end); 495 if (*rtte & _REGION_ENTRY_INVALID) { 496 if (!alloc) 497 continue; 498 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY); 499 if (!table) 500 return -ENOMEM; 501 *rtte = table | _REGION3_ENTRY; 502 } 503 table = *rtte & _REGION_ENTRY_ORIGIN; 504 rc = base_segment_walk(table, addr, next, alloc); 505 if (rc) 506 return rc; 507 if (!alloc) 508 base_crst_free(table); 509 } while (rtte++, addr = next, addr < end); 510 return 0; 511 } 512 513 static int base_region2_walk(unsigned long origin, unsigned long addr, 514 unsigned long end, int alloc) 515 { 516 unsigned long *rste, next, table; 517 int rc; 518 519 rste = (unsigned long *) origin; 520 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT; 521 do { 522 next = base_region2_addr_end(addr, end); 523 if (*rste & _REGION_ENTRY_INVALID) { 524 if (!alloc) 525 continue; 526 table = base_crst_alloc(_REGION3_ENTRY_EMPTY); 527 if (!table) 528 return -ENOMEM; 529 *rste = table | _REGION2_ENTRY; 530 } 531 table = *rste & _REGION_ENTRY_ORIGIN; 532 rc = base_region3_walk(table, addr, next, alloc); 533 if (rc) 534 return rc; 535 if (!alloc) 536 base_crst_free(table); 537 } while (rste++, addr = next, addr < end); 538 return 0; 539 } 540 541 static int base_region1_walk(unsigned long origin, unsigned long addr, 542 unsigned long end, int alloc) 543 { 544 unsigned long *rfte, next, table; 545 int rc; 546 547 rfte = (unsigned long *) origin; 548 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT; 549 do { 550 next = base_region1_addr_end(addr, end); 551 if (*rfte & _REGION_ENTRY_INVALID) { 552 if (!alloc) 553 continue; 554 table = base_crst_alloc(_REGION2_ENTRY_EMPTY); 555 if (!table) 556 return -ENOMEM; 557 *rfte = table | _REGION1_ENTRY; 558 } 559 table = *rfte & _REGION_ENTRY_ORIGIN; 560 rc = base_region2_walk(table, addr, next, alloc); 561 if (rc) 562 return rc; 563 if (!alloc) 564 base_crst_free(table); 565 } while (rfte++, addr = next, addr < end); 566 return 0; 567 } 568 569 /** 570 * base_asce_free - free asce and tables returned from base_asce_alloc() 571 * @asce: asce to be freed 572 * 573 * Frees all region, segment, and page tables that were allocated with a 574 * corresponding base_asce_alloc() call. 575 */ 576 void base_asce_free(unsigned long asce) 577 { 578 unsigned long table = asce & _ASCE_ORIGIN; 579 580 if (!asce) 581 return; 582 switch (asce & _ASCE_TYPE_MASK) { 583 case _ASCE_TYPE_SEGMENT: 584 base_segment_walk(table, 0, _REGION3_SIZE, 0); 585 break; 586 case _ASCE_TYPE_REGION3: 587 base_region3_walk(table, 0, _REGION2_SIZE, 0); 588 break; 589 case _ASCE_TYPE_REGION2: 590 base_region2_walk(table, 0, _REGION1_SIZE, 0); 591 break; 592 case _ASCE_TYPE_REGION1: 593 base_region1_walk(table, 0, -_PAGE_SIZE, 0); 594 break; 595 } 596 base_crst_free(table); 597 } 598 599 static int base_pgt_cache_init(void) 600 { 601 static DEFINE_MUTEX(base_pgt_cache_mutex); 602 unsigned long sz = _PAGE_TABLE_SIZE; 603 604 if (base_pgt_cache) 605 return 0; 606 mutex_lock(&base_pgt_cache_mutex); 607 if (!base_pgt_cache) 608 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL); 609 mutex_unlock(&base_pgt_cache_mutex); 610 return base_pgt_cache ? 0 : -ENOMEM; 611 } 612 613 /** 614 * base_asce_alloc - create kernel mapping without enhanced DAT features 615 * @addr: virtual start address of kernel mapping 616 * @num_pages: number of consecutive pages 617 * 618 * Generate an asce, including all required region, segment and page tables, 619 * that can be used to access the virtual kernel mapping. The difference is 620 * that the returned asce does not make use of any enhanced DAT features like 621 * e.g. large pages. This is required for some I/O functions that pass an 622 * asce, like e.g. some service call requests. 623 * 624 * Note: the returned asce may NEVER be attached to any cpu. It may only be 625 * used for I/O requests. tlb entries that might result because the 626 * asce was attached to a cpu won't be cleared. 627 */ 628 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages) 629 { 630 unsigned long asce, table, end; 631 int rc; 632 633 if (base_pgt_cache_init()) 634 return 0; 635 end = addr + num_pages * PAGE_SIZE; 636 if (end <= _REGION3_SIZE) { 637 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY); 638 if (!table) 639 return 0; 640 rc = base_segment_walk(table, addr, end, 1); 641 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH; 642 } else if (end <= _REGION2_SIZE) { 643 table = base_crst_alloc(_REGION3_ENTRY_EMPTY); 644 if (!table) 645 return 0; 646 rc = base_region3_walk(table, addr, end, 1); 647 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 648 } else if (end <= _REGION1_SIZE) { 649 table = base_crst_alloc(_REGION2_ENTRY_EMPTY); 650 if (!table) 651 return 0; 652 rc = base_region2_walk(table, addr, end, 1); 653 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 654 } else { 655 table = base_crst_alloc(_REGION1_ENTRY_EMPTY); 656 if (!table) 657 return 0; 658 rc = base_region1_walk(table, addr, end, 1); 659 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH; 660 } 661 if (rc) { 662 base_asce_free(asce); 663 asce = 0; 664 } 665 return asce; 666 } 667