1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2005, Paul Mackerras, IBM Corporation. 4 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. 5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. 6 */ 7 8 #include <linux/sched.h> 9 #include <linux/mm_types.h> 10 #include <linux/mm.h> 11 12 #include <asm/pgalloc.h> 13 #include <asm/pgtable.h> 14 #include <asm/sections.h> 15 #include <asm/mmu.h> 16 #include <asm/tlb.h> 17 18 #include <mm/mmu_decl.h> 19 20 #define CREATE_TRACE_POINTS 21 #include <trace/events/thp.h> 22 23 #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE)) 24 #warning Limited user VSID range means pagetable space is wasted 25 #endif 26 27 #ifdef CONFIG_SPARSEMEM_VMEMMAP 28 /* 29 * vmemmap is the starting address of the virtual address space where 30 * struct pages are allocated for all possible PFNs present on the system 31 * including holes and bad memory (hence sparse). These virtual struct 32 * pages are stored in sequence in this virtual address space irrespective 33 * of the fact whether the corresponding PFN is valid or not. This achieves 34 * constant relationship between address of struct page and its PFN. 35 * 36 * During boot or memory hotplug operation when a new memory section is 37 * added, physical memory allocation (including hash table bolting) will 38 * be performed for the set of struct pages which are part of the memory 39 * section. This saves memory by not allocating struct pages for PFNs 40 * which are not valid. 41 * 42 * ---------------------------------------------- 43 * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES| 44 * ---------------------------------------------- 45 * 46 * f000000000000000 c000000000000000 47 * vmemmap +--------------+ +--------------+ 48 * + | page struct | +--------------> | page struct | 49 * | +--------------+ +--------------+ 50 * | | page struct | +--------------> | page struct | 51 * | +--------------+ | +--------------+ 52 * | | page struct | + +------> | page struct | 53 * | +--------------+ | +--------------+ 54 * | | page struct | | +--> | page struct | 55 * | +--------------+ | | +--------------+ 56 * | | page struct | | | 57 * | +--------------+ | | 58 * | | page struct | | | 59 * | +--------------+ | | 60 * | | page struct | | | 61 * | +--------------+ | | 62 * | | page struct | | | 63 * | +--------------+ | | 64 * | | page struct | +-------+ | 65 * | +--------------+ | 66 * | | page struct | +-----------+ 67 * | +--------------+ 68 * | | page struct | No mapping 69 * | +--------------+ 70 * | | page struct | No mapping 71 * v +--------------+ 72 * 73 * ----------------------------------------- 74 * | RELATION BETWEEN STRUCT PAGES AND PFNS| 75 * ----------------------------------------- 76 * 77 * vmemmap +--------------+ +---------------+ 78 * + | page struct | +-------------> | PFN | 79 * | +--------------+ +---------------+ 80 * | | page struct | +-------------> | PFN | 81 * | +--------------+ +---------------+ 82 * | | page struct | +-------------> | PFN | 83 * | +--------------+ +---------------+ 84 * | | page struct | +-------------> | PFN | 85 * | +--------------+ +---------------+ 86 * | | | 87 * | +--------------+ 88 * | | | 89 * | +--------------+ 90 * | | | 91 * | +--------------+ +---------------+ 92 * | | page struct | +-------------> | PFN | 93 * | +--------------+ +---------------+ 94 * | | | 95 * | +--------------+ 96 * | | | 97 * | +--------------+ +---------------+ 98 * | | page struct | +-------------> | PFN | 99 * | +--------------+ +---------------+ 100 * | | page struct | +-------------> | PFN | 101 * v +--------------+ +---------------+ 102 */ 103 /* 104 * On hash-based CPUs, the vmemmap is bolted in the hash table. 105 * 106 */ 107 int __meminit hash__vmemmap_create_mapping(unsigned long start, 108 unsigned long page_size, 109 unsigned long phys) 110 { 111 int rc; 112 113 if ((start + page_size) >= H_VMEMMAP_END) { 114 pr_warn("Outside the supported range\n"); 115 return -1; 116 } 117 118 rc = htab_bolt_mapping(start, start + page_size, phys, 119 pgprot_val(PAGE_KERNEL), 120 mmu_vmemmap_psize, mmu_kernel_ssize); 121 if (rc < 0) { 122 int rc2 = htab_remove_mapping(start, start + page_size, 123 mmu_vmemmap_psize, 124 mmu_kernel_ssize); 125 BUG_ON(rc2 && (rc2 != -ENOENT)); 126 } 127 return rc; 128 } 129 130 #ifdef CONFIG_MEMORY_HOTPLUG 131 void hash__vmemmap_remove_mapping(unsigned long start, 132 unsigned long page_size) 133 { 134 int rc = htab_remove_mapping(start, start + page_size, 135 mmu_vmemmap_psize, 136 mmu_kernel_ssize); 137 BUG_ON((rc < 0) && (rc != -ENOENT)); 138 WARN_ON(rc == -ENOENT); 139 } 140 #endif 141 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 142 143 /* 144 * map_kernel_page currently only called by __ioremap 145 * map_kernel_page adds an entry to the ioremap page table 146 * and adds an entry to the HPT, possibly bolting it 147 */ 148 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) 149 { 150 pgd_t *pgdp; 151 p4d_t *p4dp; 152 pud_t *pudp; 153 pmd_t *pmdp; 154 pte_t *ptep; 155 156 BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); 157 if (slab_is_available()) { 158 pgdp = pgd_offset_k(ea); 159 p4dp = p4d_offset(pgdp, ea); 160 pudp = pud_alloc(&init_mm, p4dp, ea); 161 if (!pudp) 162 return -ENOMEM; 163 pmdp = pmd_alloc(&init_mm, pudp, ea); 164 if (!pmdp) 165 return -ENOMEM; 166 ptep = pte_alloc_kernel(pmdp, ea); 167 if (!ptep) 168 return -ENOMEM; 169 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); 170 } else { 171 /* 172 * If the mm subsystem is not fully up, we cannot create a 173 * linux page table entry for this mapping. Simply bolt an 174 * entry in the hardware page table. 175 * 176 */ 177 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot), 178 mmu_io_psize, mmu_kernel_ssize)) { 179 printk(KERN_ERR "Failed to do bolted mapping IO " 180 "memory at %016lx !\n", pa); 181 return -ENOMEM; 182 } 183 } 184 185 smp_wmb(); 186 return 0; 187 } 188 189 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 190 191 unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 192 pmd_t *pmdp, unsigned long clr, 193 unsigned long set) 194 { 195 __be64 old_be, tmp; 196 unsigned long old; 197 198 #ifdef CONFIG_DEBUG_VM 199 WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); 200 assert_spin_locked(pmd_lockptr(mm, pmdp)); 201 #endif 202 203 __asm__ __volatile__( 204 "1: ldarx %0,0,%3\n\ 205 and. %1,%0,%6\n\ 206 bne- 1b \n\ 207 andc %1,%0,%4 \n\ 208 or %1,%1,%7\n\ 209 stdcx. %1,0,%3 \n\ 210 bne- 1b" 211 : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) 212 : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), 213 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) 214 : "cc" ); 215 216 old = be64_to_cpu(old_be); 217 218 trace_hugepage_update(addr, old, clr, set); 219 if (old & H_PAGE_HASHPTE) 220 hpte_do_hugepage_flush(mm, addr, pmdp, old); 221 return old; 222 } 223 224 pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 225 pmd_t *pmdp) 226 { 227 pmd_t pmd; 228 229 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 230 VM_BUG_ON(pmd_trans_huge(*pmdp)); 231 VM_BUG_ON(pmd_devmap(*pmdp)); 232 233 pmd = *pmdp; 234 pmd_clear(pmdp); 235 /* 236 * Wait for all pending hash_page to finish. This is needed 237 * in case of subpage collapse. When we collapse normal pages 238 * to hugepage, we first clear the pmd, then invalidate all 239 * the PTE entries. The assumption here is that any low level 240 * page fault will see a none pmd and take the slow path that 241 * will wait on mmap_sem. But we could very well be in a 242 * hash_page with local ptep pointer value. Such a hash page 243 * can result in adding new HPTE entries for normal subpages. 244 * That means we could be modifying the page content as we 245 * copy them to a huge page. So wait for parallel hash_page 246 * to finish before invalidating HPTE entries. We can do this 247 * by sending an IPI to all the cpus and executing a dummy 248 * function there. 249 */ 250 serialize_against_pte_lookup(vma->vm_mm); 251 /* 252 * Now invalidate the hpte entries in the range 253 * covered by pmd. This make sure we take a 254 * fault and will find the pmd as none, which will 255 * result in a major fault which takes mmap_sem and 256 * hence wait for collapse to complete. Without this 257 * the __collapse_huge_page_copy can result in copying 258 * the old content. 259 */ 260 flush_tlb_pmd_range(vma->vm_mm, &pmd, address); 261 return pmd; 262 } 263 264 /* 265 * We want to put the pgtable in pmd and use pgtable for tracking 266 * the base page size hptes 267 */ 268 void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 269 pgtable_t pgtable) 270 { 271 pgtable_t *pgtable_slot; 272 273 assert_spin_locked(pmd_lockptr(mm, pmdp)); 274 /* 275 * we store the pgtable in the second half of PMD 276 */ 277 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 278 *pgtable_slot = pgtable; 279 /* 280 * expose the deposited pgtable to other cpus. 281 * before we set the hugepage PTE at pmd level 282 * hash fault code looks at the deposted pgtable 283 * to store hash index values. 284 */ 285 smp_wmb(); 286 } 287 288 pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 289 { 290 pgtable_t pgtable; 291 pgtable_t *pgtable_slot; 292 293 assert_spin_locked(pmd_lockptr(mm, pmdp)); 294 295 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 296 pgtable = *pgtable_slot; 297 /* 298 * Once we withdraw, mark the entry NULL. 299 */ 300 *pgtable_slot = NULL; 301 /* 302 * We store HPTE information in the deposited PTE fragment. 303 * zero out the content on withdraw. 304 */ 305 memset(pgtable, 0, PTE_FRAG_SIZE); 306 return pgtable; 307 } 308 309 /* 310 * A linux hugepage PMD was changed and the corresponding hash table entries 311 * neesd to be flushed. 312 */ 313 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 314 pmd_t *pmdp, unsigned long old_pmd) 315 { 316 int ssize; 317 unsigned int psize; 318 unsigned long vsid; 319 unsigned long flags = 0; 320 321 /* get the base page size,vsid and segment size */ 322 #ifdef CONFIG_DEBUG_VM 323 psize = get_slice_psize(mm, addr); 324 BUG_ON(psize == MMU_PAGE_16M); 325 #endif 326 if (old_pmd & H_PAGE_COMBO) 327 psize = MMU_PAGE_4K; 328 else 329 psize = MMU_PAGE_64K; 330 331 if (!is_kernel_addr(addr)) { 332 ssize = user_segment_size(addr); 333 vsid = get_user_vsid(&mm->context, addr, ssize); 334 WARN_ON(vsid == 0); 335 } else { 336 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 337 ssize = mmu_kernel_ssize; 338 } 339 340 if (mm_is_thread_local(mm)) 341 flags |= HPTE_LOCAL_UPDATE; 342 343 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); 344 } 345 346 pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, 347 unsigned long addr, pmd_t *pmdp) 348 { 349 pmd_t old_pmd; 350 pgtable_t pgtable; 351 unsigned long old; 352 pgtable_t *pgtable_slot; 353 354 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); 355 old_pmd = __pmd(old); 356 /* 357 * We have pmd == none and we are holding page_table_lock. 358 * So we can safely go and clear the pgtable hash 359 * index info. 360 */ 361 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 362 pgtable = *pgtable_slot; 363 /* 364 * Let's zero out old valid and hash index details 365 * hash fault look at them. 366 */ 367 memset(pgtable, 0, PTE_FRAG_SIZE); 368 /* 369 * Serialize against find_current_mm_pte variants which does lock-less 370 * lookup in page tables with local interrupts disabled. For huge pages 371 * it casts pmd_t to pte_t. Since format of pte_t is different from 372 * pmd_t we want to prevent transit from pmd pointing to page table 373 * to pmd pointing to huge page (and back) while interrupts are disabled. 374 * We clear pmd to possibly replace it with page table pointer in 375 * different code paths. So make sure we wait for the parallel 376 * find_curren_mm_pte to finish. 377 */ 378 serialize_against_pte_lookup(mm); 379 return old_pmd; 380 } 381 382 int hash__has_transparent_hugepage(void) 383 { 384 385 if (!mmu_has_feature(MMU_FTR_16M_PAGE)) 386 return 0; 387 /* 388 * We support THP only if PMD_SIZE is 16MB. 389 */ 390 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) 391 return 0; 392 /* 393 * We need to make sure that we support 16MB hugepage in a segement 394 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE 395 * of 64K. 396 */ 397 /* 398 * If we have 64K HPTE, we will be using that by default 399 */ 400 if (mmu_psize_defs[MMU_PAGE_64K].shift && 401 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) 402 return 0; 403 /* 404 * Ok we only have 4K HPTE 405 */ 406 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) 407 return 0; 408 409 return 1; 410 } 411 EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage); 412 413 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 414 415 #ifdef CONFIG_STRICT_KERNEL_RWX 416 static bool hash__change_memory_range(unsigned long start, unsigned long end, 417 unsigned long newpp) 418 { 419 unsigned long idx; 420 unsigned int step, shift; 421 422 shift = mmu_psize_defs[mmu_linear_psize].shift; 423 step = 1 << shift; 424 425 start = ALIGN_DOWN(start, step); 426 end = ALIGN(end, step); // aligns up 427 428 if (start >= end) 429 return false; 430 431 pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", 432 start, end, newpp, step); 433 434 for (idx = start; idx < end; idx += step) 435 /* Not sure if we can do much with the return value */ 436 mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, 437 mmu_kernel_ssize); 438 439 return true; 440 } 441 442 void hash__mark_rodata_ro(void) 443 { 444 unsigned long start, end; 445 446 start = (unsigned long)_stext; 447 end = (unsigned long)__init_begin; 448 449 WARN_ON(!hash__change_memory_range(start, end, PP_RXXX)); 450 } 451 452 void hash__mark_initmem_nx(void) 453 { 454 unsigned long start, end, pp; 455 456 start = (unsigned long)__init_begin; 457 end = (unsigned long)__init_end; 458 459 pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL)); 460 461 WARN_ON(!hash__change_memory_range(start, end, pp)); 462 } 463 #endif 464