1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2005, Paul Mackerras, IBM Corporation. 4 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. 5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. 6 */ 7 8 #include <linux/sched.h> 9 #include <linux/mm_types.h> 10 #include <linux/mm.h> 11 #include <linux/stop_machine.h> 12 13 #include <asm/sections.h> 14 #include <asm/mmu.h> 15 #include <asm/tlb.h> 16 17 #include <mm/mmu_decl.h> 18 19 #include <trace/events/thp.h> 20 21 #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE)) 22 #warning Limited user VSID range means pagetable space is wasted 23 #endif 24 25 #ifdef CONFIG_SPARSEMEM_VMEMMAP 26 /* 27 * vmemmap is the starting address of the virtual address space where 28 * struct pages are allocated for all possible PFNs present on the system 29 * including holes and bad memory (hence sparse). These virtual struct 30 * pages are stored in sequence in this virtual address space irrespective 31 * of the fact whether the corresponding PFN is valid or not. This achieves 32 * constant relationship between address of struct page and its PFN. 33 * 34 * During boot or memory hotplug operation when a new memory section is 35 * added, physical memory allocation (including hash table bolting) will 36 * be performed for the set of struct pages which are part of the memory 37 * section. This saves memory by not allocating struct pages for PFNs 38 * which are not valid. 39 * 40 * ---------------------------------------------- 41 * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES| 42 * ---------------------------------------------- 43 * 44 * f000000000000000 c000000000000000 45 * vmemmap +--------------+ +--------------+ 46 * + | page struct | +--------------> | page struct | 47 * | +--------------+ +--------------+ 48 * | | page struct | +--------------> | page struct | 49 * | +--------------+ | +--------------+ 50 * | | page struct | + +------> | page struct | 51 * | +--------------+ | +--------------+ 52 * | | page struct | | +--> | page struct | 53 * | +--------------+ | | +--------------+ 54 * | | page struct | | | 55 * | +--------------+ | | 56 * | | page struct | | | 57 * | +--------------+ | | 58 * | | page struct | | | 59 * | +--------------+ | | 60 * | | page struct | | | 61 * | +--------------+ | | 62 * | | page struct | +-------+ | 63 * | +--------------+ | 64 * | | page struct | +-----------+ 65 * | +--------------+ 66 * | | page struct | No mapping 67 * | +--------------+ 68 * | | page struct | No mapping 69 * v +--------------+ 70 * 71 * ----------------------------------------- 72 * | RELATION BETWEEN STRUCT PAGES AND PFNS| 73 * ----------------------------------------- 74 * 75 * vmemmap +--------------+ +---------------+ 76 * + | page struct | +-------------> | PFN | 77 * | +--------------+ +---------------+ 78 * | | page struct | +-------------> | PFN | 79 * | +--------------+ +---------------+ 80 * | | page struct | +-------------> | PFN | 81 * | +--------------+ +---------------+ 82 * | | page struct | +-------------> | PFN | 83 * | +--------------+ +---------------+ 84 * | | | 85 * | +--------------+ 86 * | | | 87 * | +--------------+ 88 * | | | 89 * | +--------------+ +---------------+ 90 * | | page struct | +-------------> | PFN | 91 * | +--------------+ +---------------+ 92 * | | | 93 * | +--------------+ 94 * | | | 95 * | +--------------+ +---------------+ 96 * | | page struct | +-------------> | PFN | 97 * | +--------------+ +---------------+ 98 * | | page struct | +-------------> | PFN | 99 * v +--------------+ +---------------+ 100 */ 101 /* 102 * On hash-based CPUs, the vmemmap is bolted in the hash table. 103 * 104 */ 105 int __meminit hash__vmemmap_create_mapping(unsigned long start, 106 unsigned long page_size, 107 unsigned long phys) 108 { 109 int rc; 110 111 if ((start + page_size) >= H_VMEMMAP_END) { 112 pr_warn("Outside the supported range\n"); 113 return -1; 114 } 115 116 rc = htab_bolt_mapping(start, start + page_size, phys, 117 pgprot_val(PAGE_KERNEL), 118 mmu_vmemmap_psize, mmu_kernel_ssize); 119 if (rc < 0) { 120 int rc2 = htab_remove_mapping(start, start + page_size, 121 mmu_vmemmap_psize, 122 mmu_kernel_ssize); 123 BUG_ON(rc2 && (rc2 != -ENOENT)); 124 } 125 return rc; 126 } 127 128 #ifdef CONFIG_MEMORY_HOTPLUG 129 void hash__vmemmap_remove_mapping(unsigned long start, 130 unsigned long page_size) 131 { 132 int rc = htab_remove_mapping(start, start + page_size, 133 mmu_vmemmap_psize, 134 mmu_kernel_ssize); 135 BUG_ON((rc < 0) && (rc != -ENOENT)); 136 WARN_ON(rc == -ENOENT); 137 } 138 #endif 139 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 140 141 /* 142 * map_kernel_page currently only called by __ioremap 143 * map_kernel_page adds an entry to the ioremap page table 144 * and adds an entry to the HPT, possibly bolting it 145 */ 146 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) 147 { 148 pgd_t *pgdp; 149 p4d_t *p4dp; 150 pud_t *pudp; 151 pmd_t *pmdp; 152 pte_t *ptep; 153 154 BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); 155 if (slab_is_available()) { 156 pgdp = pgd_offset_k(ea); 157 p4dp = p4d_offset(pgdp, ea); 158 pudp = pud_alloc(&init_mm, p4dp, ea); 159 if (!pudp) 160 return -ENOMEM; 161 pmdp = pmd_alloc(&init_mm, pudp, ea); 162 if (!pmdp) 163 return -ENOMEM; 164 ptep = pte_alloc_kernel(pmdp, ea); 165 if (!ptep) 166 return -ENOMEM; 167 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); 168 } else { 169 /* 170 * If the mm subsystem is not fully up, we cannot create a 171 * linux page table entry for this mapping. Simply bolt an 172 * entry in the hardware page table. 173 * 174 */ 175 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot), 176 mmu_io_psize, mmu_kernel_ssize)) { 177 printk(KERN_ERR "Failed to do bolted mapping IO " 178 "memory at %016lx !\n", pa); 179 return -ENOMEM; 180 } 181 } 182 183 smp_wmb(); 184 return 0; 185 } 186 187 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 188 189 unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 190 pmd_t *pmdp, unsigned long clr, 191 unsigned long set) 192 { 193 __be64 old_be, tmp; 194 unsigned long old; 195 196 #ifdef CONFIG_DEBUG_VM 197 WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); 198 assert_spin_locked(pmd_lockptr(mm, pmdp)); 199 #endif 200 201 __asm__ __volatile__( 202 "1: ldarx %0,0,%3\n\ 203 and. %1,%0,%6\n\ 204 bne- 1b \n\ 205 andc %1,%0,%4 \n\ 206 or %1,%1,%7\n\ 207 stdcx. %1,0,%3 \n\ 208 bne- 1b" 209 : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) 210 : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), 211 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) 212 : "cc" ); 213 214 old = be64_to_cpu(old_be); 215 216 trace_hugepage_update(addr, old, clr, set); 217 if (old & H_PAGE_HASHPTE) 218 hpte_do_hugepage_flush(mm, addr, pmdp, old); 219 return old; 220 } 221 222 pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 223 pmd_t *pmdp) 224 { 225 pmd_t pmd; 226 227 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 228 VM_BUG_ON(pmd_trans_huge(*pmdp)); 229 VM_BUG_ON(pmd_devmap(*pmdp)); 230 231 pmd = *pmdp; 232 pmd_clear(pmdp); 233 /* 234 * Wait for all pending hash_page to finish. This is needed 235 * in case of subpage collapse. When we collapse normal pages 236 * to hugepage, we first clear the pmd, then invalidate all 237 * the PTE entries. The assumption here is that any low level 238 * page fault will see a none pmd and take the slow path that 239 * will wait on mmap_lock. But we could very well be in a 240 * hash_page with local ptep pointer value. Such a hash page 241 * can result in adding new HPTE entries for normal subpages. 242 * That means we could be modifying the page content as we 243 * copy them to a huge page. So wait for parallel hash_page 244 * to finish before invalidating HPTE entries. We can do this 245 * by sending an IPI to all the cpus and executing a dummy 246 * function there. 247 */ 248 serialize_against_pte_lookup(vma->vm_mm); 249 /* 250 * Now invalidate the hpte entries in the range 251 * covered by pmd. This make sure we take a 252 * fault and will find the pmd as none, which will 253 * result in a major fault which takes mmap_lock and 254 * hence wait for collapse to complete. Without this 255 * the __collapse_huge_page_copy can result in copying 256 * the old content. 257 */ 258 flush_tlb_pmd_range(vma->vm_mm, &pmd, address); 259 return pmd; 260 } 261 262 /* 263 * We want to put the pgtable in pmd and use pgtable for tracking 264 * the base page size hptes 265 */ 266 void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 267 pgtable_t pgtable) 268 { 269 pgtable_t *pgtable_slot; 270 271 assert_spin_locked(pmd_lockptr(mm, pmdp)); 272 /* 273 * we store the pgtable in the second half of PMD 274 */ 275 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 276 *pgtable_slot = pgtable; 277 /* 278 * expose the deposited pgtable to other cpus. 279 * before we set the hugepage PTE at pmd level 280 * hash fault code looks at the deposted pgtable 281 * to store hash index values. 282 */ 283 smp_wmb(); 284 } 285 286 pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 287 { 288 pgtable_t pgtable; 289 pgtable_t *pgtable_slot; 290 291 assert_spin_locked(pmd_lockptr(mm, pmdp)); 292 293 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 294 pgtable = *pgtable_slot; 295 /* 296 * Once we withdraw, mark the entry NULL. 297 */ 298 *pgtable_slot = NULL; 299 /* 300 * We store HPTE information in the deposited PTE fragment. 301 * zero out the content on withdraw. 302 */ 303 memset(pgtable, 0, PTE_FRAG_SIZE); 304 return pgtable; 305 } 306 307 /* 308 * A linux hugepage PMD was changed and the corresponding hash table entries 309 * neesd to be flushed. 310 */ 311 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 312 pmd_t *pmdp, unsigned long old_pmd) 313 { 314 int ssize; 315 unsigned int psize; 316 unsigned long vsid; 317 unsigned long flags = 0; 318 319 /* get the base page size,vsid and segment size */ 320 #ifdef CONFIG_DEBUG_VM 321 psize = get_slice_psize(mm, addr); 322 BUG_ON(psize == MMU_PAGE_16M); 323 #endif 324 if (old_pmd & H_PAGE_COMBO) 325 psize = MMU_PAGE_4K; 326 else 327 psize = MMU_PAGE_64K; 328 329 if (!is_kernel_addr(addr)) { 330 ssize = user_segment_size(addr); 331 vsid = get_user_vsid(&mm->context, addr, ssize); 332 WARN_ON(vsid == 0); 333 } else { 334 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 335 ssize = mmu_kernel_ssize; 336 } 337 338 if (mm_is_thread_local(mm)) 339 flags |= HPTE_LOCAL_UPDATE; 340 341 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); 342 } 343 344 pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, 345 unsigned long addr, pmd_t *pmdp) 346 { 347 pmd_t old_pmd; 348 pgtable_t pgtable; 349 unsigned long old; 350 pgtable_t *pgtable_slot; 351 352 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); 353 old_pmd = __pmd(old); 354 /* 355 * We have pmd == none and we are holding page_table_lock. 356 * So we can safely go and clear the pgtable hash 357 * index info. 358 */ 359 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 360 pgtable = *pgtable_slot; 361 /* 362 * Let's zero out old valid and hash index details 363 * hash fault look at them. 364 */ 365 memset(pgtable, 0, PTE_FRAG_SIZE); 366 return old_pmd; 367 } 368 369 int hash__has_transparent_hugepage(void) 370 { 371 372 if (!mmu_has_feature(MMU_FTR_16M_PAGE)) 373 return 0; 374 /* 375 * We support THP only if PMD_SIZE is 16MB. 376 */ 377 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) 378 return 0; 379 /* 380 * We need to make sure that we support 16MB hugepage in a segement 381 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE 382 * of 64K. 383 */ 384 /* 385 * If we have 64K HPTE, we will be using that by default 386 */ 387 if (mmu_psize_defs[MMU_PAGE_64K].shift && 388 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) 389 return 0; 390 /* 391 * Ok we only have 4K HPTE 392 */ 393 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) 394 return 0; 395 396 return 1; 397 } 398 EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage); 399 400 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 401 402 #ifdef CONFIG_STRICT_KERNEL_RWX 403 404 struct change_memory_parms { 405 unsigned long start, end, newpp; 406 unsigned int step, nr_cpus, master_cpu; 407 atomic_t cpu_counter; 408 }; 409 410 // We'd rather this was on the stack but it has to be in the RMO 411 static struct change_memory_parms chmem_parms; 412 413 // And therefore we need a lock to protect it from concurrent use 414 static DEFINE_MUTEX(chmem_lock); 415 416 static void change_memory_range(unsigned long start, unsigned long end, 417 unsigned int step, unsigned long newpp) 418 { 419 unsigned long idx; 420 421 pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", 422 start, end, newpp, step); 423 424 for (idx = start; idx < end; idx += step) 425 /* Not sure if we can do much with the return value */ 426 mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, 427 mmu_kernel_ssize); 428 } 429 430 static int notrace chmem_secondary_loop(struct change_memory_parms *parms) 431 { 432 unsigned long msr, tmp, flags; 433 int *p; 434 435 p = &parms->cpu_counter.counter; 436 437 local_irq_save(flags); 438 hard_irq_disable(); 439 440 asm volatile ( 441 // Switch to real mode and leave interrupts off 442 "mfmsr %[msr] ;" 443 "li %[tmp], %[MSR_IR_DR] ;" 444 "andc %[tmp], %[msr], %[tmp] ;" 445 "mtmsrd %[tmp] ;" 446 447 // Tell the master we are in real mode 448 "1: " 449 "lwarx %[tmp], 0, %[p] ;" 450 "addic %[tmp], %[tmp], -1 ;" 451 "stwcx. %[tmp], 0, %[p] ;" 452 "bne- 1b ;" 453 454 // Spin until the counter goes to zero 455 "2: ;" 456 "lwz %[tmp], 0(%[p]) ;" 457 "cmpwi %[tmp], 0 ;" 458 "bne- 2b ;" 459 460 // Switch back to virtual mode 461 "mtmsrd %[msr] ;" 462 463 : // outputs 464 [msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p) 465 : // inputs 466 [p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR) 467 : // clobbers 468 "cc", "xer" 469 ); 470 471 local_irq_restore(flags); 472 473 return 0; 474 } 475 476 static int change_memory_range_fn(void *data) 477 { 478 struct change_memory_parms *parms = data; 479 480 if (parms->master_cpu != smp_processor_id()) 481 return chmem_secondary_loop(parms); 482 483 // Wait for all but one CPU (this one) to call-in 484 while (atomic_read(&parms->cpu_counter) > 1) 485 barrier(); 486 487 change_memory_range(parms->start, parms->end, parms->step, parms->newpp); 488 489 mb(); 490 491 // Signal the other CPUs that we're done 492 atomic_dec(&parms->cpu_counter); 493 494 return 0; 495 } 496 497 static bool hash__change_memory_range(unsigned long start, unsigned long end, 498 unsigned long newpp) 499 { 500 unsigned int step, shift; 501 502 shift = mmu_psize_defs[mmu_linear_psize].shift; 503 step = 1 << shift; 504 505 start = ALIGN_DOWN(start, step); 506 end = ALIGN(end, step); // aligns up 507 508 if (start >= end) 509 return false; 510 511 if (firmware_has_feature(FW_FEATURE_LPAR)) { 512 mutex_lock(&chmem_lock); 513 514 chmem_parms.start = start; 515 chmem_parms.end = end; 516 chmem_parms.step = step; 517 chmem_parms.newpp = newpp; 518 chmem_parms.master_cpu = smp_processor_id(); 519 520 cpus_read_lock(); 521 522 atomic_set(&chmem_parms.cpu_counter, num_online_cpus()); 523 524 // Ensure state is consistent before we call the other CPUs 525 mb(); 526 527 stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms, 528 cpu_online_mask); 529 530 cpus_read_unlock(); 531 mutex_unlock(&chmem_lock); 532 } else 533 change_memory_range(start, end, step, newpp); 534 535 return true; 536 } 537 538 void hash__mark_rodata_ro(void) 539 { 540 unsigned long start, end, pp; 541 542 start = (unsigned long)_stext; 543 end = (unsigned long)__init_begin; 544 545 pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY); 546 547 WARN_ON(!hash__change_memory_range(start, end, pp)); 548 } 549 550 void hash__mark_initmem_nx(void) 551 { 552 unsigned long start, end, pp; 553 554 start = (unsigned long)__init_begin; 555 end = (unsigned long)__init_end; 556 557 pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY); 558 559 WARN_ON(!hash__change_memory_range(start, end, pp)); 560 } 561 #endif 562