1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2009 Red Hat, Inc. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/sched/coredump.h> 11 #include <linux/sched/numa_balancing.h> 12 #include <linux/highmem.h> 13 #include <linux/hugetlb.h> 14 #include <linux/mmu_notifier.h> 15 #include <linux/rmap.h> 16 #include <linux/swap.h> 17 #include <linux/shrinker.h> 18 #include <linux/mm_inline.h> 19 #include <linux/swapops.h> 20 #include <linux/dax.h> 21 #include <linux/khugepaged.h> 22 #include <linux/freezer.h> 23 #include <linux/pfn_t.h> 24 #include <linux/mman.h> 25 #include <linux/memremap.h> 26 #include <linux/pagemap.h> 27 #include <linux/debugfs.h> 28 #include <linux/migrate.h> 29 #include <linux/hashtable.h> 30 #include <linux/userfaultfd_k.h> 31 #include <linux/page_idle.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/oom.h> 34 #include <linux/numa.h> 35 #include <linux/page_owner.h> 36 37 #include <asm/tlb.h> 38 #include <asm/pgalloc.h> 39 #include "internal.h" 40 41 /* 42 * By default, transparent hugepage support is disabled in order to avoid 43 * risking an increased memory footprint for applications that are not 44 * guaranteed to benefit from it. When transparent hugepage support is 45 * enabled, it is for all mappings, and khugepaged scans all mappings. 46 * Defrag is invoked by khugepaged hugepage allocations and by page faults 47 * for all hugepage allocations. 48 */ 49 unsigned long transparent_hugepage_flags __read_mostly = 50 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 51 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 52 #endif 53 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 54 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 55 #endif 56 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| 57 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 58 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 59 60 static struct shrinker deferred_split_shrinker; 61 62 static atomic_t huge_zero_refcount; 63 struct page *huge_zero_page __read_mostly; 64 65 bool transparent_hugepage_enabled(struct vm_area_struct *vma) 66 { 67 /* The addr is used to check if the vma size fits */ 68 unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; 69 70 if (!transhuge_vma_suitable(vma, addr)) 71 return false; 72 if (vma_is_anonymous(vma)) 73 return __transparent_hugepage_enabled(vma); 74 if (vma_is_shmem(vma)) 75 return shmem_huge_enabled(vma); 76 77 return false; 78 } 79 80 static struct page *get_huge_zero_page(void) 81 { 82 struct page *zero_page; 83 retry: 84 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 85 return READ_ONCE(huge_zero_page); 86 87 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 88 HPAGE_PMD_ORDER); 89 if (!zero_page) { 90 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 91 return NULL; 92 } 93 count_vm_event(THP_ZERO_PAGE_ALLOC); 94 preempt_disable(); 95 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 96 preempt_enable(); 97 __free_pages(zero_page, compound_order(zero_page)); 98 goto retry; 99 } 100 101 /* We take additional reference here. It will be put back by shrinker */ 102 atomic_set(&huge_zero_refcount, 2); 103 preempt_enable(); 104 return READ_ONCE(huge_zero_page); 105 } 106 107 static void put_huge_zero_page(void) 108 { 109 /* 110 * Counter should never go to zero here. Only shrinker can put 111 * last reference. 112 */ 113 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 114 } 115 116 struct page *mm_get_huge_zero_page(struct mm_struct *mm) 117 { 118 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 119 return READ_ONCE(huge_zero_page); 120 121 if (!get_huge_zero_page()) 122 return NULL; 123 124 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 125 put_huge_zero_page(); 126 127 return READ_ONCE(huge_zero_page); 128 } 129 130 void mm_put_huge_zero_page(struct mm_struct *mm) 131 { 132 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) 133 put_huge_zero_page(); 134 } 135 136 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 137 struct shrink_control *sc) 138 { 139 /* we can free zero page only if last reference remains */ 140 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 141 } 142 143 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 144 struct shrink_control *sc) 145 { 146 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 147 struct page *zero_page = xchg(&huge_zero_page, NULL); 148 BUG_ON(zero_page == NULL); 149 __free_pages(zero_page, compound_order(zero_page)); 150 return HPAGE_PMD_NR; 151 } 152 153 return 0; 154 } 155 156 static struct shrinker huge_zero_page_shrinker = { 157 .count_objects = shrink_huge_zero_page_count, 158 .scan_objects = shrink_huge_zero_page_scan, 159 .seeks = DEFAULT_SEEKS, 160 }; 161 162 #ifdef CONFIG_SYSFS 163 static ssize_t enabled_show(struct kobject *kobj, 164 struct kobj_attribute *attr, char *buf) 165 { 166 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) 167 return sprintf(buf, "[always] madvise never\n"); 168 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) 169 return sprintf(buf, "always [madvise] never\n"); 170 else 171 return sprintf(buf, "always madvise [never]\n"); 172 } 173 174 static ssize_t enabled_store(struct kobject *kobj, 175 struct kobj_attribute *attr, 176 const char *buf, size_t count) 177 { 178 ssize_t ret = count; 179 180 if (!memcmp("always", buf, 181 min(sizeof("always")-1, count))) { 182 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 183 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 184 } else if (!memcmp("madvise", buf, 185 min(sizeof("madvise")-1, count))) { 186 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 187 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 188 } else if (!memcmp("never", buf, 189 min(sizeof("never")-1, count))) { 190 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); 191 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); 192 } else 193 ret = -EINVAL; 194 195 if (ret > 0) { 196 int err = start_stop_khugepaged(); 197 if (err) 198 ret = err; 199 } 200 return ret; 201 } 202 static struct kobj_attribute enabled_attr = 203 __ATTR(enabled, 0644, enabled_show, enabled_store); 204 205 ssize_t single_hugepage_flag_show(struct kobject *kobj, 206 struct kobj_attribute *attr, char *buf, 207 enum transparent_hugepage_flag flag) 208 { 209 return sprintf(buf, "%d\n", 210 !!test_bit(flag, &transparent_hugepage_flags)); 211 } 212 213 ssize_t single_hugepage_flag_store(struct kobject *kobj, 214 struct kobj_attribute *attr, 215 const char *buf, size_t count, 216 enum transparent_hugepage_flag flag) 217 { 218 unsigned long value; 219 int ret; 220 221 ret = kstrtoul(buf, 10, &value); 222 if (ret < 0) 223 return ret; 224 if (value > 1) 225 return -EINVAL; 226 227 if (value) 228 set_bit(flag, &transparent_hugepage_flags); 229 else 230 clear_bit(flag, &transparent_hugepage_flags); 231 232 return count; 233 } 234 235 static ssize_t defrag_show(struct kobject *kobj, 236 struct kobj_attribute *attr, char *buf) 237 { 238 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 239 return sprintf(buf, "[always] defer defer+madvise madvise never\n"); 240 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 241 return sprintf(buf, "always [defer] defer+madvise madvise never\n"); 242 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 243 return sprintf(buf, "always defer [defer+madvise] madvise never\n"); 244 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 245 return sprintf(buf, "always defer defer+madvise [madvise] never\n"); 246 return sprintf(buf, "always defer defer+madvise madvise [never]\n"); 247 } 248 249 static ssize_t defrag_store(struct kobject *kobj, 250 struct kobj_attribute *attr, 251 const char *buf, size_t count) 252 { 253 if (!memcmp("always", buf, 254 min(sizeof("always")-1, count))) { 255 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 256 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 258 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 259 } else if (!memcmp("defer+madvise", buf, 260 min(sizeof("defer+madvise")-1, count))) { 261 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 262 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 263 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 264 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 265 } else if (!memcmp("defer", buf, 266 min(sizeof("defer")-1, count))) { 267 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 268 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 269 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 270 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 271 } else if (!memcmp("madvise", buf, 272 min(sizeof("madvise")-1, count))) { 273 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 274 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 275 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 276 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 277 } else if (!memcmp("never", buf, 278 min(sizeof("never")-1, count))) { 279 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 280 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 281 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 282 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 283 } else 284 return -EINVAL; 285 286 return count; 287 } 288 static struct kobj_attribute defrag_attr = 289 __ATTR(defrag, 0644, defrag_show, defrag_store); 290 291 static ssize_t use_zero_page_show(struct kobject *kobj, 292 struct kobj_attribute *attr, char *buf) 293 { 294 return single_hugepage_flag_show(kobj, attr, buf, 295 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 296 } 297 static ssize_t use_zero_page_store(struct kobject *kobj, 298 struct kobj_attribute *attr, const char *buf, size_t count) 299 { 300 return single_hugepage_flag_store(kobj, attr, buf, count, 301 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 302 } 303 static struct kobj_attribute use_zero_page_attr = 304 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 305 306 static ssize_t hpage_pmd_size_show(struct kobject *kobj, 307 struct kobj_attribute *attr, char *buf) 308 { 309 return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE); 310 } 311 static struct kobj_attribute hpage_pmd_size_attr = 312 __ATTR_RO(hpage_pmd_size); 313 314 #ifdef CONFIG_DEBUG_VM 315 static ssize_t debug_cow_show(struct kobject *kobj, 316 struct kobj_attribute *attr, char *buf) 317 { 318 return single_hugepage_flag_show(kobj, attr, buf, 319 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 320 } 321 static ssize_t debug_cow_store(struct kobject *kobj, 322 struct kobj_attribute *attr, 323 const char *buf, size_t count) 324 { 325 return single_hugepage_flag_store(kobj, attr, buf, count, 326 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 327 } 328 static struct kobj_attribute debug_cow_attr = 329 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 330 #endif /* CONFIG_DEBUG_VM */ 331 332 static struct attribute *hugepage_attr[] = { 333 &enabled_attr.attr, 334 &defrag_attr.attr, 335 &use_zero_page_attr.attr, 336 &hpage_pmd_size_attr.attr, 337 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 338 &shmem_enabled_attr.attr, 339 #endif 340 #ifdef CONFIG_DEBUG_VM 341 &debug_cow_attr.attr, 342 #endif 343 NULL, 344 }; 345 346 static const struct attribute_group hugepage_attr_group = { 347 .attrs = hugepage_attr, 348 }; 349 350 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 351 { 352 int err; 353 354 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 355 if (unlikely(!*hugepage_kobj)) { 356 pr_err("failed to create transparent hugepage kobject\n"); 357 return -ENOMEM; 358 } 359 360 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 361 if (err) { 362 pr_err("failed to register transparent hugepage group\n"); 363 goto delete_obj; 364 } 365 366 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 367 if (err) { 368 pr_err("failed to register transparent hugepage group\n"); 369 goto remove_hp_group; 370 } 371 372 return 0; 373 374 remove_hp_group: 375 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 376 delete_obj: 377 kobject_put(*hugepage_kobj); 378 return err; 379 } 380 381 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 382 { 383 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 384 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 385 kobject_put(hugepage_kobj); 386 } 387 #else 388 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 389 { 390 return 0; 391 } 392 393 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 394 { 395 } 396 #endif /* CONFIG_SYSFS */ 397 398 static int __init hugepage_init(void) 399 { 400 int err; 401 struct kobject *hugepage_kobj; 402 403 if (!has_transparent_hugepage()) { 404 transparent_hugepage_flags = 0; 405 return -EINVAL; 406 } 407 408 /* 409 * hugepages can't be allocated by the buddy allocator 410 */ 411 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER); 412 /* 413 * we use page->mapping and page->index in second tail page 414 * as list_head: assuming THP order >= 2 415 */ 416 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); 417 418 err = hugepage_init_sysfs(&hugepage_kobj); 419 if (err) 420 goto err_sysfs; 421 422 err = khugepaged_init(); 423 if (err) 424 goto err_slab; 425 426 err = register_shrinker(&huge_zero_page_shrinker); 427 if (err) 428 goto err_hzp_shrinker; 429 err = register_shrinker(&deferred_split_shrinker); 430 if (err) 431 goto err_split_shrinker; 432 433 /* 434 * By default disable transparent hugepages on smaller systems, 435 * where the extra memory used could hurt more than TLB overhead 436 * is likely to save. The admin can still enable it through /sys. 437 */ 438 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 439 transparent_hugepage_flags = 0; 440 return 0; 441 } 442 443 err = start_stop_khugepaged(); 444 if (err) 445 goto err_khugepaged; 446 447 return 0; 448 err_khugepaged: 449 unregister_shrinker(&deferred_split_shrinker); 450 err_split_shrinker: 451 unregister_shrinker(&huge_zero_page_shrinker); 452 err_hzp_shrinker: 453 khugepaged_destroy(); 454 err_slab: 455 hugepage_exit_sysfs(hugepage_kobj); 456 err_sysfs: 457 return err; 458 } 459 subsys_initcall(hugepage_init); 460 461 static int __init setup_transparent_hugepage(char *str) 462 { 463 int ret = 0; 464 if (!str) 465 goto out; 466 if (!strcmp(str, "always")) { 467 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 468 &transparent_hugepage_flags); 469 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 470 &transparent_hugepage_flags); 471 ret = 1; 472 } else if (!strcmp(str, "madvise")) { 473 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 474 &transparent_hugepage_flags); 475 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 476 &transparent_hugepage_flags); 477 ret = 1; 478 } else if (!strcmp(str, "never")) { 479 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 480 &transparent_hugepage_flags); 481 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 482 &transparent_hugepage_flags); 483 ret = 1; 484 } 485 out: 486 if (!ret) 487 pr_warn("transparent_hugepage= cannot parse, ignored\n"); 488 return ret; 489 } 490 __setup("transparent_hugepage=", setup_transparent_hugepage); 491 492 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 493 { 494 if (likely(vma->vm_flags & VM_WRITE)) 495 pmd = pmd_mkwrite(pmd); 496 return pmd; 497 } 498 499 #ifdef CONFIG_MEMCG 500 static inline struct deferred_split *get_deferred_split_queue(struct page *page) 501 { 502 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; 503 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 504 505 if (memcg) 506 return &memcg->deferred_split_queue; 507 else 508 return &pgdat->deferred_split_queue; 509 } 510 #else 511 static inline struct deferred_split *get_deferred_split_queue(struct page *page) 512 { 513 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); 514 515 return &pgdat->deferred_split_queue; 516 } 517 #endif 518 519 void prep_transhuge_page(struct page *page) 520 { 521 /* 522 * we use page->mapping and page->indexlru in second tail page 523 * as list_head: assuming THP order >= 2 524 */ 525 526 INIT_LIST_HEAD(page_deferred_list(page)); 527 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); 528 } 529 530 static unsigned long __thp_get_unmapped_area(struct file *filp, 531 unsigned long addr, unsigned long len, 532 loff_t off, unsigned long flags, unsigned long size) 533 { 534 loff_t off_end = off + len; 535 loff_t off_align = round_up(off, size); 536 unsigned long len_pad, ret; 537 538 if (off_end <= off_align || (off_end - off_align) < size) 539 return 0; 540 541 len_pad = len + size; 542 if (len_pad < len || (off + len_pad) < off) 543 return 0; 544 545 ret = current->mm->get_unmapped_area(filp, addr, len_pad, 546 off >> PAGE_SHIFT, flags); 547 548 /* 549 * The failure might be due to length padding. The caller will retry 550 * without the padding. 551 */ 552 if (IS_ERR_VALUE(ret)) 553 return 0; 554 555 /* 556 * Do not try to align to THP boundary if allocation at the address 557 * hint succeeds. 558 */ 559 if (ret == addr) 560 return addr; 561 562 ret += (off - ret) & (size - 1); 563 return ret; 564 } 565 566 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 567 unsigned long len, unsigned long pgoff, unsigned long flags) 568 { 569 unsigned long ret; 570 loff_t off = (loff_t)pgoff << PAGE_SHIFT; 571 572 if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD)) 573 goto out; 574 575 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); 576 if (ret) 577 return ret; 578 out: 579 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 580 } 581 EXPORT_SYMBOL_GPL(thp_get_unmapped_area); 582 583 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, 584 struct page *page, gfp_t gfp) 585 { 586 struct vm_area_struct *vma = vmf->vma; 587 struct mem_cgroup *memcg; 588 pgtable_t pgtable; 589 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 590 vm_fault_t ret = 0; 591 592 VM_BUG_ON_PAGE(!PageCompound(page), page); 593 594 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { 595 put_page(page); 596 count_vm_event(THP_FAULT_FALLBACK); 597 return VM_FAULT_FALLBACK; 598 } 599 600 pgtable = pte_alloc_one(vma->vm_mm); 601 if (unlikely(!pgtable)) { 602 ret = VM_FAULT_OOM; 603 goto release; 604 } 605 606 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); 607 /* 608 * The memory barrier inside __SetPageUptodate makes sure that 609 * clear_huge_page writes become visible before the set_pmd_at() 610 * write. 611 */ 612 __SetPageUptodate(page); 613 614 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 615 if (unlikely(!pmd_none(*vmf->pmd))) { 616 goto unlock_release; 617 } else { 618 pmd_t entry; 619 620 ret = check_stable_address_space(vma->vm_mm); 621 if (ret) 622 goto unlock_release; 623 624 /* Deliver the page fault to userland */ 625 if (userfaultfd_missing(vma)) { 626 vm_fault_t ret2; 627 628 spin_unlock(vmf->ptl); 629 mem_cgroup_cancel_charge(page, memcg, true); 630 put_page(page); 631 pte_free(vma->vm_mm, pgtable); 632 ret2 = handle_userfault(vmf, VM_UFFD_MISSING); 633 VM_BUG_ON(ret2 & VM_FAULT_FALLBACK); 634 return ret2; 635 } 636 637 entry = mk_huge_pmd(page, vma->vm_page_prot); 638 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 639 page_add_new_anon_rmap(page, vma, haddr, true); 640 mem_cgroup_commit_charge(page, memcg, false, true); 641 lru_cache_add_active_or_unevictable(page, vma); 642 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 643 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 644 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 645 mm_inc_nr_ptes(vma->vm_mm); 646 spin_unlock(vmf->ptl); 647 count_vm_event(THP_FAULT_ALLOC); 648 count_memcg_events(memcg, THP_FAULT_ALLOC, 1); 649 } 650 651 return 0; 652 unlock_release: 653 spin_unlock(vmf->ptl); 654 release: 655 if (pgtable) 656 pte_free(vma->vm_mm, pgtable); 657 mem_cgroup_cancel_charge(page, memcg, true); 658 put_page(page); 659 return ret; 660 661 } 662 663 /* 664 * always: directly stall for all thp allocations 665 * defer: wake kswapd and fail if not immediately available 666 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise 667 * fail if not immediately available 668 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately 669 * available 670 * never: never stall for any thp allocation 671 */ 672 static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 673 { 674 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 675 676 /* Always do synchronous compaction */ 677 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 678 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 679 680 /* Kick kcompactd and fail quickly */ 681 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 682 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 683 684 /* Synchronous compaction if madvised, otherwise kick kcompactd */ 685 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 686 return GFP_TRANSHUGE_LIGHT | 687 (vma_madvised ? __GFP_DIRECT_RECLAIM : 688 __GFP_KSWAPD_RECLAIM); 689 690 /* Only do synchronous compaction if madvised */ 691 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 692 return GFP_TRANSHUGE_LIGHT | 693 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 694 695 return GFP_TRANSHUGE_LIGHT; 696 } 697 698 /* Caller must hold page table lock. */ 699 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 700 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 701 struct page *zero_page) 702 { 703 pmd_t entry; 704 if (!pmd_none(*pmd)) 705 return false; 706 entry = mk_pmd(zero_page, vma->vm_page_prot); 707 entry = pmd_mkhuge(entry); 708 if (pgtable) 709 pgtable_trans_huge_deposit(mm, pmd, pgtable); 710 set_pmd_at(mm, haddr, pmd, entry); 711 mm_inc_nr_ptes(mm); 712 return true; 713 } 714 715 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) 716 { 717 struct vm_area_struct *vma = vmf->vma; 718 gfp_t gfp; 719 struct page *page; 720 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 721 722 if (!transhuge_vma_suitable(vma, haddr)) 723 return VM_FAULT_FALLBACK; 724 if (unlikely(anon_vma_prepare(vma))) 725 return VM_FAULT_OOM; 726 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 727 return VM_FAULT_OOM; 728 if (!(vmf->flags & FAULT_FLAG_WRITE) && 729 !mm_forbids_zeropage(vma->vm_mm) && 730 transparent_hugepage_use_zero_page()) { 731 pgtable_t pgtable; 732 struct page *zero_page; 733 bool set; 734 vm_fault_t ret; 735 pgtable = pte_alloc_one(vma->vm_mm); 736 if (unlikely(!pgtable)) 737 return VM_FAULT_OOM; 738 zero_page = mm_get_huge_zero_page(vma->vm_mm); 739 if (unlikely(!zero_page)) { 740 pte_free(vma->vm_mm, pgtable); 741 count_vm_event(THP_FAULT_FALLBACK); 742 return VM_FAULT_FALLBACK; 743 } 744 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 745 ret = 0; 746 set = false; 747 if (pmd_none(*vmf->pmd)) { 748 ret = check_stable_address_space(vma->vm_mm); 749 if (ret) { 750 spin_unlock(vmf->ptl); 751 } else if (userfaultfd_missing(vma)) { 752 spin_unlock(vmf->ptl); 753 ret = handle_userfault(vmf, VM_UFFD_MISSING); 754 VM_BUG_ON(ret & VM_FAULT_FALLBACK); 755 } else { 756 set_huge_zero_page(pgtable, vma->vm_mm, vma, 757 haddr, vmf->pmd, zero_page); 758 spin_unlock(vmf->ptl); 759 set = true; 760 } 761 } else 762 spin_unlock(vmf->ptl); 763 if (!set) 764 pte_free(vma->vm_mm, pgtable); 765 return ret; 766 } 767 gfp = alloc_hugepage_direct_gfpmask(vma); 768 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 769 if (unlikely(!page)) { 770 count_vm_event(THP_FAULT_FALLBACK); 771 return VM_FAULT_FALLBACK; 772 } 773 prep_transhuge_page(page); 774 return __do_huge_pmd_anonymous_page(vmf, page, gfp); 775 } 776 777 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 778 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, 779 pgtable_t pgtable) 780 { 781 struct mm_struct *mm = vma->vm_mm; 782 pmd_t entry; 783 spinlock_t *ptl; 784 785 ptl = pmd_lock(mm, pmd); 786 if (!pmd_none(*pmd)) { 787 if (write) { 788 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { 789 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); 790 goto out_unlock; 791 } 792 entry = pmd_mkyoung(*pmd); 793 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 794 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) 795 update_mmu_cache_pmd(vma, addr, pmd); 796 } 797 798 goto out_unlock; 799 } 800 801 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); 802 if (pfn_t_devmap(pfn)) 803 entry = pmd_mkdevmap(entry); 804 if (write) { 805 entry = pmd_mkyoung(pmd_mkdirty(entry)); 806 entry = maybe_pmd_mkwrite(entry, vma); 807 } 808 809 if (pgtable) { 810 pgtable_trans_huge_deposit(mm, pmd, pgtable); 811 mm_inc_nr_ptes(mm); 812 pgtable = NULL; 813 } 814 815 set_pmd_at(mm, addr, pmd, entry); 816 update_mmu_cache_pmd(vma, addr, pmd); 817 818 out_unlock: 819 spin_unlock(ptl); 820 if (pgtable) 821 pte_free(mm, pgtable); 822 } 823 824 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) 825 { 826 unsigned long addr = vmf->address & PMD_MASK; 827 struct vm_area_struct *vma = vmf->vma; 828 pgprot_t pgprot = vma->vm_page_prot; 829 pgtable_t pgtable = NULL; 830 831 /* 832 * If we had pmd_special, we could avoid all these restrictions, 833 * but we need to be consistent with PTEs and architectures that 834 * can't support a 'special' bit. 835 */ 836 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 837 !pfn_t_devmap(pfn)); 838 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 839 (VM_PFNMAP|VM_MIXEDMAP)); 840 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 841 842 if (addr < vma->vm_start || addr >= vma->vm_end) 843 return VM_FAULT_SIGBUS; 844 845 if (arch_needs_pgtable_deposit()) { 846 pgtable = pte_alloc_one(vma->vm_mm); 847 if (!pgtable) 848 return VM_FAULT_OOM; 849 } 850 851 track_pfn_insert(vma, &pgprot, pfn); 852 853 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); 854 return VM_FAULT_NOPAGE; 855 } 856 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); 857 858 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 859 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) 860 { 861 if (likely(vma->vm_flags & VM_WRITE)) 862 pud = pud_mkwrite(pud); 863 return pud; 864 } 865 866 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 867 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) 868 { 869 struct mm_struct *mm = vma->vm_mm; 870 pud_t entry; 871 spinlock_t *ptl; 872 873 ptl = pud_lock(mm, pud); 874 if (!pud_none(*pud)) { 875 if (write) { 876 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { 877 WARN_ON_ONCE(!is_huge_zero_pud(*pud)); 878 goto out_unlock; 879 } 880 entry = pud_mkyoung(*pud); 881 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); 882 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) 883 update_mmu_cache_pud(vma, addr, pud); 884 } 885 goto out_unlock; 886 } 887 888 entry = pud_mkhuge(pfn_t_pud(pfn, prot)); 889 if (pfn_t_devmap(pfn)) 890 entry = pud_mkdevmap(entry); 891 if (write) { 892 entry = pud_mkyoung(pud_mkdirty(entry)); 893 entry = maybe_pud_mkwrite(entry, vma); 894 } 895 set_pud_at(mm, addr, pud, entry); 896 update_mmu_cache_pud(vma, addr, pud); 897 898 out_unlock: 899 spin_unlock(ptl); 900 } 901 902 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) 903 { 904 unsigned long addr = vmf->address & PUD_MASK; 905 struct vm_area_struct *vma = vmf->vma; 906 pgprot_t pgprot = vma->vm_page_prot; 907 908 /* 909 * If we had pud_special, we could avoid all these restrictions, 910 * but we need to be consistent with PTEs and architectures that 911 * can't support a 'special' bit. 912 */ 913 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 914 !pfn_t_devmap(pfn)); 915 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 916 (VM_PFNMAP|VM_MIXEDMAP)); 917 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 918 919 if (addr < vma->vm_start || addr >= vma->vm_end) 920 return VM_FAULT_SIGBUS; 921 922 track_pfn_insert(vma, &pgprot, pfn); 923 924 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); 925 return VM_FAULT_NOPAGE; 926 } 927 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); 928 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 929 930 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, 931 pmd_t *pmd, int flags) 932 { 933 pmd_t _pmd; 934 935 _pmd = pmd_mkyoung(*pmd); 936 if (flags & FOLL_WRITE) 937 _pmd = pmd_mkdirty(_pmd); 938 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 939 pmd, _pmd, flags & FOLL_WRITE)) 940 update_mmu_cache_pmd(vma, addr, pmd); 941 } 942 943 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 944 pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 945 { 946 unsigned long pfn = pmd_pfn(*pmd); 947 struct mm_struct *mm = vma->vm_mm; 948 struct page *page; 949 950 assert_spin_locked(pmd_lockptr(mm, pmd)); 951 952 /* 953 * When we COW a devmap PMD entry, we split it into PTEs, so we should 954 * not be in this function with `flags & FOLL_COW` set. 955 */ 956 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 957 958 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 959 return NULL; 960 961 if (pmd_present(*pmd) && pmd_devmap(*pmd)) 962 /* pass */; 963 else 964 return NULL; 965 966 if (flags & FOLL_TOUCH) 967 touch_pmd(vma, addr, pmd, flags); 968 969 /* 970 * device mapped pages can only be returned if the 971 * caller will manage the page reference count. 972 */ 973 if (!(flags & FOLL_GET)) 974 return ERR_PTR(-EEXIST); 975 976 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; 977 *pgmap = get_dev_pagemap(pfn, *pgmap); 978 if (!*pgmap) 979 return ERR_PTR(-EFAULT); 980 page = pfn_to_page(pfn); 981 get_page(page); 982 983 return page; 984 } 985 986 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 987 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 988 struct vm_area_struct *vma) 989 { 990 spinlock_t *dst_ptl, *src_ptl; 991 struct page *src_page; 992 pmd_t pmd; 993 pgtable_t pgtable = NULL; 994 int ret = -ENOMEM; 995 996 /* Skip if can be re-fill on fault */ 997 if (!vma_is_anonymous(vma)) 998 return 0; 999 1000 pgtable = pte_alloc_one(dst_mm); 1001 if (unlikely(!pgtable)) 1002 goto out; 1003 1004 dst_ptl = pmd_lock(dst_mm, dst_pmd); 1005 src_ptl = pmd_lockptr(src_mm, src_pmd); 1006 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1007 1008 ret = -EAGAIN; 1009 pmd = *src_pmd; 1010 1011 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1012 if (unlikely(is_swap_pmd(pmd))) { 1013 swp_entry_t entry = pmd_to_swp_entry(pmd); 1014 1015 VM_BUG_ON(!is_pmd_migration_entry(pmd)); 1016 if (is_write_migration_entry(entry)) { 1017 make_migration_entry_read(&entry); 1018 pmd = swp_entry_to_pmd(entry); 1019 if (pmd_swp_soft_dirty(*src_pmd)) 1020 pmd = pmd_swp_mksoft_dirty(pmd); 1021 set_pmd_at(src_mm, addr, src_pmd, pmd); 1022 } 1023 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1024 mm_inc_nr_ptes(dst_mm); 1025 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 1026 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 1027 ret = 0; 1028 goto out_unlock; 1029 } 1030 #endif 1031 1032 if (unlikely(!pmd_trans_huge(pmd))) { 1033 pte_free(dst_mm, pgtable); 1034 goto out_unlock; 1035 } 1036 /* 1037 * When page table lock is held, the huge zero pmd should not be 1038 * under splitting since we don't split the page itself, only pmd to 1039 * a page table. 1040 */ 1041 if (is_huge_zero_pmd(pmd)) { 1042 struct page *zero_page; 1043 /* 1044 * get_huge_zero_page() will never allocate a new page here, 1045 * since we already have a zero page to copy. It just takes a 1046 * reference. 1047 */ 1048 zero_page = mm_get_huge_zero_page(dst_mm); 1049 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 1050 zero_page); 1051 ret = 0; 1052 goto out_unlock; 1053 } 1054 1055 src_page = pmd_page(pmd); 1056 VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1057 get_page(src_page); 1058 page_dup_rmap(src_page, true); 1059 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1060 mm_inc_nr_ptes(dst_mm); 1061 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 1062 1063 pmdp_set_wrprotect(src_mm, addr, src_pmd); 1064 pmd = pmd_mkold(pmd_wrprotect(pmd)); 1065 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 1066 1067 ret = 0; 1068 out_unlock: 1069 spin_unlock(src_ptl); 1070 spin_unlock(dst_ptl); 1071 out: 1072 return ret; 1073 } 1074 1075 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1076 static void touch_pud(struct vm_area_struct *vma, unsigned long addr, 1077 pud_t *pud, int flags) 1078 { 1079 pud_t _pud; 1080 1081 _pud = pud_mkyoung(*pud); 1082 if (flags & FOLL_WRITE) 1083 _pud = pud_mkdirty(_pud); 1084 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, 1085 pud, _pud, flags & FOLL_WRITE)) 1086 update_mmu_cache_pud(vma, addr, pud); 1087 } 1088 1089 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 1090 pud_t *pud, int flags, struct dev_pagemap **pgmap) 1091 { 1092 unsigned long pfn = pud_pfn(*pud); 1093 struct mm_struct *mm = vma->vm_mm; 1094 struct page *page; 1095 1096 assert_spin_locked(pud_lockptr(mm, pud)); 1097 1098 if (flags & FOLL_WRITE && !pud_write(*pud)) 1099 return NULL; 1100 1101 if (pud_present(*pud) && pud_devmap(*pud)) 1102 /* pass */; 1103 else 1104 return NULL; 1105 1106 if (flags & FOLL_TOUCH) 1107 touch_pud(vma, addr, pud, flags); 1108 1109 /* 1110 * device mapped pages can only be returned if the 1111 * caller will manage the page reference count. 1112 */ 1113 if (!(flags & FOLL_GET)) 1114 return ERR_PTR(-EEXIST); 1115 1116 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; 1117 *pgmap = get_dev_pagemap(pfn, *pgmap); 1118 if (!*pgmap) 1119 return ERR_PTR(-EFAULT); 1120 page = pfn_to_page(pfn); 1121 get_page(page); 1122 1123 return page; 1124 } 1125 1126 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1127 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1128 struct vm_area_struct *vma) 1129 { 1130 spinlock_t *dst_ptl, *src_ptl; 1131 pud_t pud; 1132 int ret; 1133 1134 dst_ptl = pud_lock(dst_mm, dst_pud); 1135 src_ptl = pud_lockptr(src_mm, src_pud); 1136 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1137 1138 ret = -EAGAIN; 1139 pud = *src_pud; 1140 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) 1141 goto out_unlock; 1142 1143 /* 1144 * When page table lock is held, the huge zero pud should not be 1145 * under splitting since we don't split the page itself, only pud to 1146 * a page table. 1147 */ 1148 if (is_huge_zero_pud(pud)) { 1149 /* No huge zero pud yet */ 1150 } 1151 1152 pudp_set_wrprotect(src_mm, addr, src_pud); 1153 pud = pud_mkold(pud_wrprotect(pud)); 1154 set_pud_at(dst_mm, addr, dst_pud, pud); 1155 1156 ret = 0; 1157 out_unlock: 1158 spin_unlock(src_ptl); 1159 spin_unlock(dst_ptl); 1160 return ret; 1161 } 1162 1163 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 1164 { 1165 pud_t entry; 1166 unsigned long haddr; 1167 bool write = vmf->flags & FAULT_FLAG_WRITE; 1168 1169 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); 1170 if (unlikely(!pud_same(*vmf->pud, orig_pud))) 1171 goto unlock; 1172 1173 entry = pud_mkyoung(orig_pud); 1174 if (write) 1175 entry = pud_mkdirty(entry); 1176 haddr = vmf->address & HPAGE_PUD_MASK; 1177 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) 1178 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); 1179 1180 unlock: 1181 spin_unlock(vmf->ptl); 1182 } 1183 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1184 1185 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) 1186 { 1187 pmd_t entry; 1188 unsigned long haddr; 1189 bool write = vmf->flags & FAULT_FLAG_WRITE; 1190 1191 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 1192 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1193 goto unlock; 1194 1195 entry = pmd_mkyoung(orig_pmd); 1196 if (write) 1197 entry = pmd_mkdirty(entry); 1198 haddr = vmf->address & HPAGE_PMD_MASK; 1199 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) 1200 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); 1201 1202 unlock: 1203 spin_unlock(vmf->ptl); 1204 } 1205 1206 static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, 1207 pmd_t orig_pmd, struct page *page) 1208 { 1209 struct vm_area_struct *vma = vmf->vma; 1210 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1211 struct mem_cgroup *memcg; 1212 pgtable_t pgtable; 1213 pmd_t _pmd; 1214 int i; 1215 vm_fault_t ret = 0; 1216 struct page **pages; 1217 struct mmu_notifier_range range; 1218 1219 pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), 1220 GFP_KERNEL); 1221 if (unlikely(!pages)) { 1222 ret |= VM_FAULT_OOM; 1223 goto out; 1224 } 1225 1226 for (i = 0; i < HPAGE_PMD_NR; i++) { 1227 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, 1228 vmf->address, page_to_nid(page)); 1229 if (unlikely(!pages[i] || 1230 mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, 1231 GFP_KERNEL, &memcg, false))) { 1232 if (pages[i]) 1233 put_page(pages[i]); 1234 while (--i >= 0) { 1235 memcg = (void *)page_private(pages[i]); 1236 set_page_private(pages[i], 0); 1237 mem_cgroup_cancel_charge(pages[i], memcg, 1238 false); 1239 put_page(pages[i]); 1240 } 1241 kfree(pages); 1242 ret |= VM_FAULT_OOM; 1243 goto out; 1244 } 1245 set_page_private(pages[i], (unsigned long)memcg); 1246 } 1247 1248 for (i = 0; i < HPAGE_PMD_NR; i++) { 1249 copy_user_highpage(pages[i], page + i, 1250 haddr + PAGE_SIZE * i, vma); 1251 __SetPageUptodate(pages[i]); 1252 cond_resched(); 1253 } 1254 1255 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1256 haddr, haddr + HPAGE_PMD_SIZE); 1257 mmu_notifier_invalidate_range_start(&range); 1258 1259 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1260 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1261 goto out_free_pages; 1262 VM_BUG_ON_PAGE(!PageHead(page), page); 1263 1264 /* 1265 * Leave pmd empty until pte is filled note we must notify here as 1266 * concurrent CPU thread might write to new page before the call to 1267 * mmu_notifier_invalidate_range_end() happens which can lead to a 1268 * device seeing memory write in different order than CPU. 1269 * 1270 * See Documentation/vm/mmu_notifier.rst 1271 */ 1272 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 1273 1274 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); 1275 pmd_populate(vma->vm_mm, &_pmd, pgtable); 1276 1277 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1278 pte_t entry; 1279 entry = mk_pte(pages[i], vma->vm_page_prot); 1280 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1281 memcg = (void *)page_private(pages[i]); 1282 set_page_private(pages[i], 0); 1283 page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); 1284 mem_cgroup_commit_charge(pages[i], memcg, false, false); 1285 lru_cache_add_active_or_unevictable(pages[i], vma); 1286 vmf->pte = pte_offset_map(&_pmd, haddr); 1287 VM_BUG_ON(!pte_none(*vmf->pte)); 1288 set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); 1289 pte_unmap(vmf->pte); 1290 } 1291 kfree(pages); 1292 1293 smp_wmb(); /* make pte visible before pmd */ 1294 pmd_populate(vma->vm_mm, vmf->pmd, pgtable); 1295 page_remove_rmap(page, true); 1296 spin_unlock(vmf->ptl); 1297 1298 /* 1299 * No need to double call mmu_notifier->invalidate_range() callback as 1300 * the above pmdp_huge_clear_flush_notify() did already call it. 1301 */ 1302 mmu_notifier_invalidate_range_only_end(&range); 1303 1304 ret |= VM_FAULT_WRITE; 1305 put_page(page); 1306 1307 out: 1308 return ret; 1309 1310 out_free_pages: 1311 spin_unlock(vmf->ptl); 1312 mmu_notifier_invalidate_range_end(&range); 1313 for (i = 0; i < HPAGE_PMD_NR; i++) { 1314 memcg = (void *)page_private(pages[i]); 1315 set_page_private(pages[i], 0); 1316 mem_cgroup_cancel_charge(pages[i], memcg, false); 1317 put_page(pages[i]); 1318 } 1319 kfree(pages); 1320 goto out; 1321 } 1322 1323 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) 1324 { 1325 struct vm_area_struct *vma = vmf->vma; 1326 struct page *page = NULL, *new_page; 1327 struct mem_cgroup *memcg; 1328 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1329 struct mmu_notifier_range range; 1330 gfp_t huge_gfp; /* for allocation and charge */ 1331 vm_fault_t ret = 0; 1332 1333 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); 1334 VM_BUG_ON_VMA(!vma->anon_vma, vma); 1335 if (is_huge_zero_pmd(orig_pmd)) 1336 goto alloc; 1337 spin_lock(vmf->ptl); 1338 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 1339 goto out_unlock; 1340 1341 page = pmd_page(orig_pmd); 1342 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 1343 /* 1344 * We can only reuse the page if nobody else maps the huge page or it's 1345 * part. 1346 */ 1347 if (!trylock_page(page)) { 1348 get_page(page); 1349 spin_unlock(vmf->ptl); 1350 lock_page(page); 1351 spin_lock(vmf->ptl); 1352 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1353 unlock_page(page); 1354 put_page(page); 1355 goto out_unlock; 1356 } 1357 put_page(page); 1358 } 1359 if (reuse_swap_page(page, NULL)) { 1360 pmd_t entry; 1361 entry = pmd_mkyoung(orig_pmd); 1362 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1363 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) 1364 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1365 ret |= VM_FAULT_WRITE; 1366 unlock_page(page); 1367 goto out_unlock; 1368 } 1369 unlock_page(page); 1370 get_page(page); 1371 spin_unlock(vmf->ptl); 1372 alloc: 1373 if (__transparent_hugepage_enabled(vma) && 1374 !transparent_hugepage_debug_cow()) { 1375 huge_gfp = alloc_hugepage_direct_gfpmask(vma); 1376 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1377 } else 1378 new_page = NULL; 1379 1380 if (likely(new_page)) { 1381 prep_transhuge_page(new_page); 1382 } else { 1383 if (!page) { 1384 split_huge_pmd(vma, vmf->pmd, vmf->address); 1385 ret |= VM_FAULT_FALLBACK; 1386 } else { 1387 ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page); 1388 if (ret & VM_FAULT_OOM) { 1389 split_huge_pmd(vma, vmf->pmd, vmf->address); 1390 ret |= VM_FAULT_FALLBACK; 1391 } 1392 put_page(page); 1393 } 1394 count_vm_event(THP_FAULT_FALLBACK); 1395 goto out; 1396 } 1397 1398 if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm, 1399 huge_gfp, &memcg, true))) { 1400 put_page(new_page); 1401 split_huge_pmd(vma, vmf->pmd, vmf->address); 1402 if (page) 1403 put_page(page); 1404 ret |= VM_FAULT_FALLBACK; 1405 count_vm_event(THP_FAULT_FALLBACK); 1406 goto out; 1407 } 1408 1409 count_vm_event(THP_FAULT_ALLOC); 1410 count_memcg_events(memcg, THP_FAULT_ALLOC, 1); 1411 1412 if (!page) 1413 clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR); 1414 else 1415 copy_user_huge_page(new_page, page, vmf->address, 1416 vma, HPAGE_PMD_NR); 1417 __SetPageUptodate(new_page); 1418 1419 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1420 haddr, haddr + HPAGE_PMD_SIZE); 1421 mmu_notifier_invalidate_range_start(&range); 1422 1423 spin_lock(vmf->ptl); 1424 if (page) 1425 put_page(page); 1426 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { 1427 spin_unlock(vmf->ptl); 1428 mem_cgroup_cancel_charge(new_page, memcg, true); 1429 put_page(new_page); 1430 goto out_mn; 1431 } else { 1432 pmd_t entry; 1433 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1434 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1435 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); 1436 page_add_new_anon_rmap(new_page, vma, haddr, true); 1437 mem_cgroup_commit_charge(new_page, memcg, false, true); 1438 lru_cache_add_active_or_unevictable(new_page, vma); 1439 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 1440 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1441 if (!page) { 1442 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); 1443 } else { 1444 VM_BUG_ON_PAGE(!PageHead(page), page); 1445 page_remove_rmap(page, true); 1446 put_page(page); 1447 } 1448 ret |= VM_FAULT_WRITE; 1449 } 1450 spin_unlock(vmf->ptl); 1451 out_mn: 1452 /* 1453 * No need to double call mmu_notifier->invalidate_range() callback as 1454 * the above pmdp_huge_clear_flush_notify() did already call it. 1455 */ 1456 mmu_notifier_invalidate_range_only_end(&range); 1457 out: 1458 return ret; 1459 out_unlock: 1460 spin_unlock(vmf->ptl); 1461 return ret; 1462 } 1463 1464 /* 1465 * FOLL_FORCE can write to even unwritable pmd's, but only 1466 * after we've gone through a COW cycle and they are dirty. 1467 */ 1468 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 1469 { 1470 return pmd_write(pmd) || 1471 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 1472 } 1473 1474 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1475 unsigned long addr, 1476 pmd_t *pmd, 1477 unsigned int flags) 1478 { 1479 struct mm_struct *mm = vma->vm_mm; 1480 struct page *page = NULL; 1481 1482 assert_spin_locked(pmd_lockptr(mm, pmd)); 1483 1484 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) 1485 goto out; 1486 1487 /* Avoid dumping huge zero page */ 1488 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1489 return ERR_PTR(-EFAULT); 1490 1491 /* Full NUMA hinting faults to serialise migration in fault paths */ 1492 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) 1493 goto out; 1494 1495 page = pmd_page(*pmd); 1496 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); 1497 if (flags & FOLL_TOUCH) 1498 touch_pmd(vma, addr, pmd, flags); 1499 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1500 /* 1501 * We don't mlock() pte-mapped THPs. This way we can avoid 1502 * leaking mlocked pages into non-VM_LOCKED VMAs. 1503 * 1504 * For anon THP: 1505 * 1506 * In most cases the pmd is the only mapping of the page as we 1507 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for 1508 * writable private mappings in populate_vma_page_range(). 1509 * 1510 * The only scenario when we have the page shared here is if we 1511 * mlocking read-only mapping shared over fork(). We skip 1512 * mlocking such pages. 1513 * 1514 * For file THP: 1515 * 1516 * We can expect PageDoubleMap() to be stable under page lock: 1517 * for file pages we set it in page_add_file_rmap(), which 1518 * requires page to be locked. 1519 */ 1520 1521 if (PageAnon(page) && compound_mapcount(page) != 1) 1522 goto skip_mlock; 1523 if (PageDoubleMap(page) || !page->mapping) 1524 goto skip_mlock; 1525 if (!trylock_page(page)) 1526 goto skip_mlock; 1527 lru_add_drain(); 1528 if (page->mapping && !PageDoubleMap(page)) 1529 mlock_vma_page(page); 1530 unlock_page(page); 1531 } 1532 skip_mlock: 1533 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1534 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); 1535 if (flags & FOLL_GET) 1536 get_page(page); 1537 1538 out: 1539 return page; 1540 } 1541 1542 /* NUMA hinting page fault entry point for trans huge pmds */ 1543 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) 1544 { 1545 struct vm_area_struct *vma = vmf->vma; 1546 struct anon_vma *anon_vma = NULL; 1547 struct page *page; 1548 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 1549 int page_nid = NUMA_NO_NODE, this_nid = numa_node_id(); 1550 int target_nid, last_cpupid = -1; 1551 bool page_locked; 1552 bool migrated = false; 1553 bool was_writable; 1554 int flags = 0; 1555 1556 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 1557 if (unlikely(!pmd_same(pmd, *vmf->pmd))) 1558 goto out_unlock; 1559 1560 /* 1561 * If there are potential migrations, wait for completion and retry 1562 * without disrupting NUMA hinting information. Do not relock and 1563 * check_same as the page may no longer be mapped. 1564 */ 1565 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 1566 page = pmd_page(*vmf->pmd); 1567 if (!get_page_unless_zero(page)) 1568 goto out_unlock; 1569 spin_unlock(vmf->ptl); 1570 put_and_wait_on_page_locked(page); 1571 goto out; 1572 } 1573 1574 page = pmd_page(pmd); 1575 BUG_ON(is_huge_zero_page(page)); 1576 page_nid = page_to_nid(page); 1577 last_cpupid = page_cpupid_last(page); 1578 count_vm_numa_event(NUMA_HINT_FAULTS); 1579 if (page_nid == this_nid) { 1580 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1581 flags |= TNF_FAULT_LOCAL; 1582 } 1583 1584 /* See similar comment in do_numa_page for explanation */ 1585 if (!pmd_savedwrite(pmd)) 1586 flags |= TNF_NO_GROUP; 1587 1588 /* 1589 * Acquire the page lock to serialise THP migrations but avoid dropping 1590 * page_table_lock if at all possible 1591 */ 1592 page_locked = trylock_page(page); 1593 target_nid = mpol_misplaced(page, vma, haddr); 1594 if (target_nid == NUMA_NO_NODE) { 1595 /* If the page was locked, there are no parallel migrations */ 1596 if (page_locked) 1597 goto clear_pmdnuma; 1598 } 1599 1600 /* Migration could have started since the pmd_trans_migrating check */ 1601 if (!page_locked) { 1602 page_nid = NUMA_NO_NODE; 1603 if (!get_page_unless_zero(page)) 1604 goto out_unlock; 1605 spin_unlock(vmf->ptl); 1606 put_and_wait_on_page_locked(page); 1607 goto out; 1608 } 1609 1610 /* 1611 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 1612 * to serialises splits 1613 */ 1614 get_page(page); 1615 spin_unlock(vmf->ptl); 1616 anon_vma = page_lock_anon_vma_read(page); 1617 1618 /* Confirm the PMD did not change while page_table_lock was released */ 1619 spin_lock(vmf->ptl); 1620 if (unlikely(!pmd_same(pmd, *vmf->pmd))) { 1621 unlock_page(page); 1622 put_page(page); 1623 page_nid = NUMA_NO_NODE; 1624 goto out_unlock; 1625 } 1626 1627 /* Bail if we fail to protect against THP splits for any reason */ 1628 if (unlikely(!anon_vma)) { 1629 put_page(page); 1630 page_nid = NUMA_NO_NODE; 1631 goto clear_pmdnuma; 1632 } 1633 1634 /* 1635 * Since we took the NUMA fault, we must have observed the !accessible 1636 * bit. Make sure all other CPUs agree with that, to avoid them 1637 * modifying the page we're about to migrate. 1638 * 1639 * Must be done under PTL such that we'll observe the relevant 1640 * inc_tlb_flush_pending(). 1641 * 1642 * We are not sure a pending tlb flush here is for a huge page 1643 * mapping or not. Hence use the tlb range variant 1644 */ 1645 if (mm_tlb_flush_pending(vma->vm_mm)) { 1646 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 1647 /* 1648 * change_huge_pmd() released the pmd lock before 1649 * invalidating the secondary MMUs sharing the primary 1650 * MMU pagetables (with ->invalidate_range()). The 1651 * mmu_notifier_invalidate_range_end() (which 1652 * internally calls ->invalidate_range()) in 1653 * change_pmd_range() will run after us, so we can't 1654 * rely on it here and we need an explicit invalidate. 1655 */ 1656 mmu_notifier_invalidate_range(vma->vm_mm, haddr, 1657 haddr + HPAGE_PMD_SIZE); 1658 } 1659 1660 /* 1661 * Migrate the THP to the requested node, returns with page unlocked 1662 * and access rights restored. 1663 */ 1664 spin_unlock(vmf->ptl); 1665 1666 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, 1667 vmf->pmd, pmd, vmf->address, page, target_nid); 1668 if (migrated) { 1669 flags |= TNF_MIGRATED; 1670 page_nid = target_nid; 1671 } else 1672 flags |= TNF_MIGRATE_FAIL; 1673 1674 goto out; 1675 clear_pmdnuma: 1676 BUG_ON(!PageLocked(page)); 1677 was_writable = pmd_savedwrite(pmd); 1678 pmd = pmd_modify(pmd, vma->vm_page_prot); 1679 pmd = pmd_mkyoung(pmd); 1680 if (was_writable) 1681 pmd = pmd_mkwrite(pmd); 1682 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); 1683 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 1684 unlock_page(page); 1685 out_unlock: 1686 spin_unlock(vmf->ptl); 1687 1688 out: 1689 if (anon_vma) 1690 page_unlock_anon_vma_read(anon_vma); 1691 1692 if (page_nid != NUMA_NO_NODE) 1693 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, 1694 flags); 1695 1696 return 0; 1697 } 1698 1699 /* 1700 * Return true if we do MADV_FREE successfully on entire pmd page. 1701 * Otherwise, return false. 1702 */ 1703 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1704 pmd_t *pmd, unsigned long addr, unsigned long next) 1705 { 1706 spinlock_t *ptl; 1707 pmd_t orig_pmd; 1708 struct page *page; 1709 struct mm_struct *mm = tlb->mm; 1710 bool ret = false; 1711 1712 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 1713 1714 ptl = pmd_trans_huge_lock(pmd, vma); 1715 if (!ptl) 1716 goto out_unlocked; 1717 1718 orig_pmd = *pmd; 1719 if (is_huge_zero_pmd(orig_pmd)) 1720 goto out; 1721 1722 if (unlikely(!pmd_present(orig_pmd))) { 1723 VM_BUG_ON(thp_migration_supported() && 1724 !is_pmd_migration_entry(orig_pmd)); 1725 goto out; 1726 } 1727 1728 page = pmd_page(orig_pmd); 1729 /* 1730 * If other processes are mapping this page, we couldn't discard 1731 * the page unless they all do MADV_FREE so let's skip the page. 1732 */ 1733 if (page_mapcount(page) != 1) 1734 goto out; 1735 1736 if (!trylock_page(page)) 1737 goto out; 1738 1739 /* 1740 * If user want to discard part-pages of THP, split it so MADV_FREE 1741 * will deactivate only them. 1742 */ 1743 if (next - addr != HPAGE_PMD_SIZE) { 1744 get_page(page); 1745 spin_unlock(ptl); 1746 split_huge_page(page); 1747 unlock_page(page); 1748 put_page(page); 1749 goto out_unlocked; 1750 } 1751 1752 if (PageDirty(page)) 1753 ClearPageDirty(page); 1754 unlock_page(page); 1755 1756 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1757 pmdp_invalidate(vma, addr, pmd); 1758 orig_pmd = pmd_mkold(orig_pmd); 1759 orig_pmd = pmd_mkclean(orig_pmd); 1760 1761 set_pmd_at(mm, addr, pmd, orig_pmd); 1762 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1763 } 1764 1765 mark_page_lazyfree(page); 1766 ret = true; 1767 out: 1768 spin_unlock(ptl); 1769 out_unlocked: 1770 return ret; 1771 } 1772 1773 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) 1774 { 1775 pgtable_t pgtable; 1776 1777 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1778 pte_free(mm, pgtable); 1779 mm_dec_nr_ptes(mm); 1780 } 1781 1782 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1783 pmd_t *pmd, unsigned long addr) 1784 { 1785 pmd_t orig_pmd; 1786 spinlock_t *ptl; 1787 1788 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 1789 1790 ptl = __pmd_trans_huge_lock(pmd, vma); 1791 if (!ptl) 1792 return 0; 1793 /* 1794 * For architectures like ppc64 we look at deposited pgtable 1795 * when calling pmdp_huge_get_and_clear. So do the 1796 * pgtable_trans_huge_withdraw after finishing pmdp related 1797 * operations. 1798 */ 1799 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1800 tlb->fullmm); 1801 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1802 if (vma_is_dax(vma)) { 1803 if (arch_needs_pgtable_deposit()) 1804 zap_deposited_table(tlb->mm, pmd); 1805 spin_unlock(ptl); 1806 if (is_huge_zero_pmd(orig_pmd)) 1807 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1808 } else if (is_huge_zero_pmd(orig_pmd)) { 1809 zap_deposited_table(tlb->mm, pmd); 1810 spin_unlock(ptl); 1811 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE); 1812 } else { 1813 struct page *page = NULL; 1814 int flush_needed = 1; 1815 1816 if (pmd_present(orig_pmd)) { 1817 page = pmd_page(orig_pmd); 1818 page_remove_rmap(page, true); 1819 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1820 VM_BUG_ON_PAGE(!PageHead(page), page); 1821 } else if (thp_migration_supported()) { 1822 swp_entry_t entry; 1823 1824 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); 1825 entry = pmd_to_swp_entry(orig_pmd); 1826 page = pfn_to_page(swp_offset(entry)); 1827 flush_needed = 0; 1828 } else 1829 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); 1830 1831 if (PageAnon(page)) { 1832 zap_deposited_table(tlb->mm, pmd); 1833 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1834 } else { 1835 if (arch_needs_pgtable_deposit()) 1836 zap_deposited_table(tlb->mm, pmd); 1837 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); 1838 } 1839 1840 spin_unlock(ptl); 1841 if (flush_needed) 1842 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); 1843 } 1844 return 1; 1845 } 1846 1847 #ifndef pmd_move_must_withdraw 1848 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 1849 spinlock_t *old_pmd_ptl, 1850 struct vm_area_struct *vma) 1851 { 1852 /* 1853 * With split pmd lock we also need to move preallocated 1854 * PTE page table if new_pmd is on different PMD page table. 1855 * 1856 * We also don't deposit and withdraw tables for file pages. 1857 */ 1858 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 1859 } 1860 #endif 1861 1862 static pmd_t move_soft_dirty_pmd(pmd_t pmd) 1863 { 1864 #ifdef CONFIG_MEM_SOFT_DIRTY 1865 if (unlikely(is_pmd_migration_entry(pmd))) 1866 pmd = pmd_swp_mksoft_dirty(pmd); 1867 else if (pmd_present(pmd)) 1868 pmd = pmd_mksoft_dirty(pmd); 1869 #endif 1870 return pmd; 1871 } 1872 1873 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1874 unsigned long new_addr, unsigned long old_end, 1875 pmd_t *old_pmd, pmd_t *new_pmd) 1876 { 1877 spinlock_t *old_ptl, *new_ptl; 1878 pmd_t pmd; 1879 struct mm_struct *mm = vma->vm_mm; 1880 bool force_flush = false; 1881 1882 if ((old_addr & ~HPAGE_PMD_MASK) || 1883 (new_addr & ~HPAGE_PMD_MASK) || 1884 old_end - old_addr < HPAGE_PMD_SIZE) 1885 return false; 1886 1887 /* 1888 * The destination pmd shouldn't be established, free_pgtables() 1889 * should have release it. 1890 */ 1891 if (WARN_ON(!pmd_none(*new_pmd))) { 1892 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 1893 return false; 1894 } 1895 1896 /* 1897 * We don't have to worry about the ordering of src and dst 1898 * ptlocks because exclusive mmap_sem prevents deadlock. 1899 */ 1900 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); 1901 if (old_ptl) { 1902 new_ptl = pmd_lockptr(mm, new_pmd); 1903 if (new_ptl != old_ptl) 1904 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1905 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1906 if (pmd_present(pmd)) 1907 force_flush = true; 1908 VM_BUG_ON(!pmd_none(*new_pmd)); 1909 1910 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { 1911 pgtable_t pgtable; 1912 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1913 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1914 } 1915 pmd = move_soft_dirty_pmd(pmd); 1916 set_pmd_at(mm, new_addr, new_pmd, pmd); 1917 if (force_flush) 1918 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 1919 if (new_ptl != old_ptl) 1920 spin_unlock(new_ptl); 1921 spin_unlock(old_ptl); 1922 return true; 1923 } 1924 return false; 1925 } 1926 1927 /* 1928 * Returns 1929 * - 0 if PMD could not be locked 1930 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1931 * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1932 */ 1933 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1934 unsigned long addr, pgprot_t newprot, int prot_numa) 1935 { 1936 struct mm_struct *mm = vma->vm_mm; 1937 spinlock_t *ptl; 1938 pmd_t entry; 1939 bool preserve_write; 1940 int ret; 1941 1942 ptl = __pmd_trans_huge_lock(pmd, vma); 1943 if (!ptl) 1944 return 0; 1945 1946 preserve_write = prot_numa && pmd_write(*pmd); 1947 ret = 1; 1948 1949 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1950 if (is_swap_pmd(*pmd)) { 1951 swp_entry_t entry = pmd_to_swp_entry(*pmd); 1952 1953 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); 1954 if (is_write_migration_entry(entry)) { 1955 pmd_t newpmd; 1956 /* 1957 * A protection check is difficult so 1958 * just be safe and disable write 1959 */ 1960 make_migration_entry_read(&entry); 1961 newpmd = swp_entry_to_pmd(entry); 1962 if (pmd_swp_soft_dirty(*pmd)) 1963 newpmd = pmd_swp_mksoft_dirty(newpmd); 1964 set_pmd_at(mm, addr, pmd, newpmd); 1965 } 1966 goto unlock; 1967 } 1968 #endif 1969 1970 /* 1971 * Avoid trapping faults against the zero page. The read-only 1972 * data is likely to be read-cached on the local CPU and 1973 * local/remote hits to the zero page are not interesting. 1974 */ 1975 if (prot_numa && is_huge_zero_pmd(*pmd)) 1976 goto unlock; 1977 1978 if (prot_numa && pmd_protnone(*pmd)) 1979 goto unlock; 1980 1981 /* 1982 * In case prot_numa, we are under down_read(mmap_sem). It's critical 1983 * to not clear pmd intermittently to avoid race with MADV_DONTNEED 1984 * which is also under down_read(mmap_sem): 1985 * 1986 * CPU0: CPU1: 1987 * change_huge_pmd(prot_numa=1) 1988 * pmdp_huge_get_and_clear_notify() 1989 * madvise_dontneed() 1990 * zap_pmd_range() 1991 * pmd_trans_huge(*pmd) == 0 (without ptl) 1992 * // skip the pmd 1993 * set_pmd_at(); 1994 * // pmd is re-established 1995 * 1996 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it 1997 * which may break userspace. 1998 * 1999 * pmdp_invalidate() is required to make sure we don't miss 2000 * dirty/young flags set by hardware. 2001 */ 2002 entry = pmdp_invalidate(vma, addr, pmd); 2003 2004 entry = pmd_modify(entry, newprot); 2005 if (preserve_write) 2006 entry = pmd_mk_savedwrite(entry); 2007 ret = HPAGE_PMD_NR; 2008 set_pmd_at(mm, addr, pmd, entry); 2009 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); 2010 unlock: 2011 spin_unlock(ptl); 2012 return ret; 2013 } 2014 2015 /* 2016 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. 2017 * 2018 * Note that if it returns page table lock pointer, this routine returns without 2019 * unlocking page table lock. So callers must unlock it. 2020 */ 2021 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 2022 { 2023 spinlock_t *ptl; 2024 ptl = pmd_lock(vma->vm_mm, pmd); 2025 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 2026 pmd_devmap(*pmd))) 2027 return ptl; 2028 spin_unlock(ptl); 2029 return NULL; 2030 } 2031 2032 /* 2033 * Returns true if a given pud maps a thp, false otherwise. 2034 * 2035 * Note that if it returns true, this routine returns without unlocking page 2036 * table lock. So callers must unlock it. 2037 */ 2038 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) 2039 { 2040 spinlock_t *ptl; 2041 2042 ptl = pud_lock(vma->vm_mm, pud); 2043 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) 2044 return ptl; 2045 spin_unlock(ptl); 2046 return NULL; 2047 } 2048 2049 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 2050 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, 2051 pud_t *pud, unsigned long addr) 2052 { 2053 spinlock_t *ptl; 2054 2055 ptl = __pud_trans_huge_lock(pud, vma); 2056 if (!ptl) 2057 return 0; 2058 /* 2059 * For architectures like ppc64 we look at deposited pgtable 2060 * when calling pudp_huge_get_and_clear. So do the 2061 * pgtable_trans_huge_withdraw after finishing pudp related 2062 * operations. 2063 */ 2064 pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm); 2065 tlb_remove_pud_tlb_entry(tlb, pud, addr); 2066 if (vma_is_dax(vma)) { 2067 spin_unlock(ptl); 2068 /* No zero page support yet */ 2069 } else { 2070 /* No support for anonymous PUD pages yet */ 2071 BUG(); 2072 } 2073 return 1; 2074 } 2075 2076 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, 2077 unsigned long haddr) 2078 { 2079 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); 2080 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2081 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 2082 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 2083 2084 count_vm_event(THP_SPLIT_PUD); 2085 2086 pudp_huge_clear_flush_notify(vma, haddr, pud); 2087 } 2088 2089 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 2090 unsigned long address) 2091 { 2092 spinlock_t *ptl; 2093 struct mmu_notifier_range range; 2094 2095 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 2096 address & HPAGE_PUD_MASK, 2097 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); 2098 mmu_notifier_invalidate_range_start(&range); 2099 ptl = pud_lock(vma->vm_mm, pud); 2100 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) 2101 goto out; 2102 __split_huge_pud_locked(vma, pud, range.start); 2103 2104 out: 2105 spin_unlock(ptl); 2106 /* 2107 * No need to double call mmu_notifier->invalidate_range() callback as 2108 * the above pudp_huge_clear_flush_notify() did already call it. 2109 */ 2110 mmu_notifier_invalidate_range_only_end(&range); 2111 } 2112 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 2113 2114 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2115 unsigned long haddr, pmd_t *pmd) 2116 { 2117 struct mm_struct *mm = vma->vm_mm; 2118 pgtable_t pgtable; 2119 pmd_t _pmd; 2120 int i; 2121 2122 /* 2123 * Leave pmd empty until pte is filled note that it is fine to delay 2124 * notification until mmu_notifier_invalidate_range_end() as we are 2125 * replacing a zero pmd write protected page with a zero pte write 2126 * protected page. 2127 * 2128 * See Documentation/vm/mmu_notifier.rst 2129 */ 2130 pmdp_huge_clear_flush(vma, haddr, pmd); 2131 2132 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2133 pmd_populate(mm, &_pmd, pgtable); 2134 2135 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2136 pte_t *pte, entry; 2137 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2138 entry = pte_mkspecial(entry); 2139 pte = pte_offset_map(&_pmd, haddr); 2140 VM_BUG_ON(!pte_none(*pte)); 2141 set_pte_at(mm, haddr, pte, entry); 2142 pte_unmap(pte); 2143 } 2144 smp_wmb(); /* make pte visible before pmd */ 2145 pmd_populate(mm, pmd, pgtable); 2146 } 2147 2148 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, 2149 unsigned long haddr, bool freeze) 2150 { 2151 struct mm_struct *mm = vma->vm_mm; 2152 struct page *page; 2153 pgtable_t pgtable; 2154 pmd_t old_pmd, _pmd; 2155 bool young, write, soft_dirty, pmd_migration = false; 2156 unsigned long addr; 2157 int i; 2158 2159 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2160 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); 2161 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); 2162 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) 2163 && !pmd_devmap(*pmd)); 2164 2165 count_vm_event(THP_SPLIT_PMD); 2166 2167 if (!vma_is_anonymous(vma)) { 2168 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); 2169 /* 2170 * We are going to unmap this huge page. So 2171 * just go ahead and zap it 2172 */ 2173 if (arch_needs_pgtable_deposit()) 2174 zap_deposited_table(mm, pmd); 2175 if (vma_is_dax(vma)) 2176 return; 2177 page = pmd_page(_pmd); 2178 if (!PageDirty(page) && pmd_dirty(_pmd)) 2179 set_page_dirty(page); 2180 if (!PageReferenced(page) && pmd_young(_pmd)) 2181 SetPageReferenced(page); 2182 page_remove_rmap(page, true); 2183 put_page(page); 2184 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); 2185 return; 2186 } else if (is_huge_zero_pmd(*pmd)) { 2187 /* 2188 * FIXME: Do we want to invalidate secondary mmu by calling 2189 * mmu_notifier_invalidate_range() see comments below inside 2190 * __split_huge_pmd() ? 2191 * 2192 * We are going from a zero huge page write protected to zero 2193 * small page also write protected so it does not seems useful 2194 * to invalidate secondary mmu at this time. 2195 */ 2196 return __split_huge_zero_page_pmd(vma, haddr, pmd); 2197 } 2198 2199 /* 2200 * Up to this point the pmd is present and huge and userland has the 2201 * whole access to the hugepage during the split (which happens in 2202 * place). If we overwrite the pmd with the not-huge version pointing 2203 * to the pte here (which of course we could if all CPUs were bug 2204 * free), userland could trigger a small page size TLB miss on the 2205 * small sized TLB while the hugepage TLB entry is still established in 2206 * the huge TLB. Some CPU doesn't like that. 2207 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum 2208 * 383 on page 93. Intel should be safe but is also warns that it's 2209 * only safe if the permission and cache attributes of the two entries 2210 * loaded in the two TLB is identical (which should be the case here). 2211 * But it is generally safer to never allow small and huge TLB entries 2212 * for the same virtual address to be loaded simultaneously. So instead 2213 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the 2214 * current pmd notpresent (atomically because here the pmd_trans_huge 2215 * must remain set at all times on the pmd until the split is complete 2216 * for this pmd), then we flush the SMP TLB and finally we write the 2217 * non-huge version of the pmd entry with pmd_populate. 2218 */ 2219 old_pmd = pmdp_invalidate(vma, haddr, pmd); 2220 2221 pmd_migration = is_pmd_migration_entry(old_pmd); 2222 if (unlikely(pmd_migration)) { 2223 swp_entry_t entry; 2224 2225 entry = pmd_to_swp_entry(old_pmd); 2226 page = pfn_to_page(swp_offset(entry)); 2227 write = is_write_migration_entry(entry); 2228 young = false; 2229 soft_dirty = pmd_swp_soft_dirty(old_pmd); 2230 } else { 2231 page = pmd_page(old_pmd); 2232 if (pmd_dirty(old_pmd)) 2233 SetPageDirty(page); 2234 write = pmd_write(old_pmd); 2235 young = pmd_young(old_pmd); 2236 soft_dirty = pmd_soft_dirty(old_pmd); 2237 } 2238 VM_BUG_ON_PAGE(!page_count(page), page); 2239 page_ref_add(page, HPAGE_PMD_NR - 1); 2240 2241 /* 2242 * Withdraw the table only after we mark the pmd entry invalid. 2243 * This's critical for some architectures (Power). 2244 */ 2245 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2246 pmd_populate(mm, &_pmd, pgtable); 2247 2248 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { 2249 pte_t entry, *pte; 2250 /* 2251 * Note that NUMA hinting access restrictions are not 2252 * transferred to avoid any possibility of altering 2253 * permissions across VMAs. 2254 */ 2255 if (freeze || pmd_migration) { 2256 swp_entry_t swp_entry; 2257 swp_entry = make_migration_entry(page + i, write); 2258 entry = swp_entry_to_pte(swp_entry); 2259 if (soft_dirty) 2260 entry = pte_swp_mksoft_dirty(entry); 2261 } else { 2262 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); 2263 entry = maybe_mkwrite(entry, vma); 2264 if (!write) 2265 entry = pte_wrprotect(entry); 2266 if (!young) 2267 entry = pte_mkold(entry); 2268 if (soft_dirty) 2269 entry = pte_mksoft_dirty(entry); 2270 } 2271 pte = pte_offset_map(&_pmd, addr); 2272 BUG_ON(!pte_none(*pte)); 2273 set_pte_at(mm, addr, pte, entry); 2274 atomic_inc(&page[i]._mapcount); 2275 pte_unmap(pte); 2276 } 2277 2278 /* 2279 * Set PG_double_map before dropping compound_mapcount to avoid 2280 * false-negative page_mapped(). 2281 */ 2282 if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { 2283 for (i = 0; i < HPAGE_PMD_NR; i++) 2284 atomic_inc(&page[i]._mapcount); 2285 } 2286 2287 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 2288 /* Last compound_mapcount is gone. */ 2289 __dec_node_page_state(page, NR_ANON_THPS); 2290 if (TestClearPageDoubleMap(page)) { 2291 /* No need in mapcount reference anymore */ 2292 for (i = 0; i < HPAGE_PMD_NR; i++) 2293 atomic_dec(&page[i]._mapcount); 2294 } 2295 } 2296 2297 smp_wmb(); /* make pte visible before pmd */ 2298 pmd_populate(mm, pmd, pgtable); 2299 2300 if (freeze) { 2301 for (i = 0; i < HPAGE_PMD_NR; i++) { 2302 page_remove_rmap(page + i, false); 2303 put_page(page + i); 2304 } 2305 } 2306 } 2307 2308 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 2309 unsigned long address, bool freeze, struct page *page) 2310 { 2311 spinlock_t *ptl; 2312 struct mmu_notifier_range range; 2313 2314 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 2315 address & HPAGE_PMD_MASK, 2316 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); 2317 mmu_notifier_invalidate_range_start(&range); 2318 ptl = pmd_lock(vma->vm_mm, pmd); 2319 2320 /* 2321 * If caller asks to setup a migration entries, we need a page to check 2322 * pmd against. Otherwise we can end up replacing wrong page. 2323 */ 2324 VM_BUG_ON(freeze && !page); 2325 if (page && page != pmd_page(*pmd)) 2326 goto out; 2327 2328 if (pmd_trans_huge(*pmd)) { 2329 page = pmd_page(*pmd); 2330 if (PageMlocked(page)) 2331 clear_page_mlock(page); 2332 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) 2333 goto out; 2334 __split_huge_pmd_locked(vma, pmd, range.start, freeze); 2335 out: 2336 spin_unlock(ptl); 2337 /* 2338 * No need to double call mmu_notifier->invalidate_range() callback. 2339 * They are 3 cases to consider inside __split_huge_pmd_locked(): 2340 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious 2341 * 2) __split_huge_zero_page_pmd() read only zero page and any write 2342 * fault will trigger a flush_notify before pointing to a new page 2343 * (it is fine if the secondary mmu keeps pointing to the old zero 2344 * page in the meantime) 2345 * 3) Split a huge pmd into pte pointing to the same page. No need 2346 * to invalidate secondary tlb entry they are all still valid. 2347 * any further changes to individual pte will notify. So no need 2348 * to call mmu_notifier->invalidate_range() 2349 */ 2350 mmu_notifier_invalidate_range_only_end(&range); 2351 } 2352 2353 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 2354 bool freeze, struct page *page) 2355 { 2356 pgd_t *pgd; 2357 p4d_t *p4d; 2358 pud_t *pud; 2359 pmd_t *pmd; 2360 2361 pgd = pgd_offset(vma->vm_mm, address); 2362 if (!pgd_present(*pgd)) 2363 return; 2364 2365 p4d = p4d_offset(pgd, address); 2366 if (!p4d_present(*p4d)) 2367 return; 2368 2369 pud = pud_offset(p4d, address); 2370 if (!pud_present(*pud)) 2371 return; 2372 2373 pmd = pmd_offset(pud, address); 2374 2375 __split_huge_pmd(vma, pmd, address, freeze, page); 2376 } 2377 2378 void vma_adjust_trans_huge(struct vm_area_struct *vma, 2379 unsigned long start, 2380 unsigned long end, 2381 long adjust_next) 2382 { 2383 /* 2384 * If the new start address isn't hpage aligned and it could 2385 * previously contain an hugepage: check if we need to split 2386 * an huge pmd. 2387 */ 2388 if (start & ~HPAGE_PMD_MASK && 2389 (start & HPAGE_PMD_MASK) >= vma->vm_start && 2390 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2391 split_huge_pmd_address(vma, start, false, NULL); 2392 2393 /* 2394 * If the new end address isn't hpage aligned and it could 2395 * previously contain an hugepage: check if we need to split 2396 * an huge pmd. 2397 */ 2398 if (end & ~HPAGE_PMD_MASK && 2399 (end & HPAGE_PMD_MASK) >= vma->vm_start && 2400 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2401 split_huge_pmd_address(vma, end, false, NULL); 2402 2403 /* 2404 * If we're also updating the vma->vm_next->vm_start, if the new 2405 * vm_next->vm_start isn't page aligned and it could previously 2406 * contain an hugepage: check if we need to split an huge pmd. 2407 */ 2408 if (adjust_next > 0) { 2409 struct vm_area_struct *next = vma->vm_next; 2410 unsigned long nstart = next->vm_start; 2411 nstart += adjust_next << PAGE_SHIFT; 2412 if (nstart & ~HPAGE_PMD_MASK && 2413 (nstart & HPAGE_PMD_MASK) >= next->vm_start && 2414 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2415 split_huge_pmd_address(next, nstart, false, NULL); 2416 } 2417 } 2418 2419 static void unmap_page(struct page *page) 2420 { 2421 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | 2422 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; 2423 bool unmap_success; 2424 2425 VM_BUG_ON_PAGE(!PageHead(page), page); 2426 2427 if (PageAnon(page)) 2428 ttu_flags |= TTU_SPLIT_FREEZE; 2429 2430 unmap_success = try_to_unmap(page, ttu_flags); 2431 VM_BUG_ON_PAGE(!unmap_success, page); 2432 } 2433 2434 static void remap_page(struct page *page) 2435 { 2436 int i; 2437 if (PageTransHuge(page)) { 2438 remove_migration_ptes(page, page, true); 2439 } else { 2440 for (i = 0; i < HPAGE_PMD_NR; i++) 2441 remove_migration_ptes(page + i, page + i, true); 2442 } 2443 } 2444 2445 static void __split_huge_page_tail(struct page *head, int tail, 2446 struct lruvec *lruvec, struct list_head *list) 2447 { 2448 struct page *page_tail = head + tail; 2449 2450 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); 2451 2452 /* 2453 * Clone page flags before unfreezing refcount. 2454 * 2455 * After successful get_page_unless_zero() might follow flags change, 2456 * for exmaple lock_page() which set PG_waiters. 2457 */ 2458 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 2459 page_tail->flags |= (head->flags & 2460 ((1L << PG_referenced) | 2461 (1L << PG_swapbacked) | 2462 (1L << PG_swapcache) | 2463 (1L << PG_mlocked) | 2464 (1L << PG_uptodate) | 2465 (1L << PG_active) | 2466 (1L << PG_workingset) | 2467 (1L << PG_locked) | 2468 (1L << PG_unevictable) | 2469 (1L << PG_dirty))); 2470 2471 /* ->mapping in first tail page is compound_mapcount */ 2472 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, 2473 page_tail); 2474 page_tail->mapping = head->mapping; 2475 page_tail->index = head->index + tail; 2476 2477 /* Page flags must be visible before we make the page non-compound. */ 2478 smp_wmb(); 2479 2480 /* 2481 * Clear PageTail before unfreezing page refcount. 2482 * 2483 * After successful get_page_unless_zero() might follow put_page() 2484 * which needs correct compound_head(). 2485 */ 2486 clear_compound_head(page_tail); 2487 2488 /* Finally unfreeze refcount. Additional reference from page cache. */ 2489 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || 2490 PageSwapCache(head))); 2491 2492 if (page_is_young(head)) 2493 set_page_young(page_tail); 2494 if (page_is_idle(head)) 2495 set_page_idle(page_tail); 2496 2497 page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); 2498 2499 /* 2500 * always add to the tail because some iterators expect new 2501 * pages to show after the currently processed elements - e.g. 2502 * migrate_pages 2503 */ 2504 lru_add_page_tail(head, page_tail, lruvec, list); 2505 } 2506 2507 static void __split_huge_page(struct page *page, struct list_head *list, 2508 pgoff_t end, unsigned long flags) 2509 { 2510 struct page *head = compound_head(page); 2511 pg_data_t *pgdat = page_pgdat(head); 2512 struct lruvec *lruvec; 2513 struct address_space *swap_cache = NULL; 2514 unsigned long offset = 0; 2515 int i; 2516 2517 lruvec = mem_cgroup_page_lruvec(head, pgdat); 2518 2519 /* complete memcg works before add pages to LRU */ 2520 mem_cgroup_split_huge_fixup(head); 2521 2522 if (PageAnon(head) && PageSwapCache(head)) { 2523 swp_entry_t entry = { .val = page_private(head) }; 2524 2525 offset = swp_offset(entry); 2526 swap_cache = swap_address_space(entry); 2527 xa_lock(&swap_cache->i_pages); 2528 } 2529 2530 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 2531 __split_huge_page_tail(head, i, lruvec, list); 2532 /* Some pages can be beyond i_size: drop them from page cache */ 2533 if (head[i].index >= end) { 2534 ClearPageDirty(head + i); 2535 __delete_from_page_cache(head + i, NULL); 2536 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2537 shmem_uncharge(head->mapping->host, 1); 2538 put_page(head + i); 2539 } else if (!PageAnon(page)) { 2540 __xa_store(&head->mapping->i_pages, head[i].index, 2541 head + i, 0); 2542 } else if (swap_cache) { 2543 __xa_store(&swap_cache->i_pages, offset + i, 2544 head + i, 0); 2545 } 2546 } 2547 2548 ClearPageCompound(head); 2549 2550 split_page_owner(head, HPAGE_PMD_ORDER); 2551 2552 /* See comment in __split_huge_page_tail() */ 2553 if (PageAnon(head)) { 2554 /* Additional pin to swap cache */ 2555 if (PageSwapCache(head)) { 2556 page_ref_add(head, 2); 2557 xa_unlock(&swap_cache->i_pages); 2558 } else { 2559 page_ref_inc(head); 2560 } 2561 } else { 2562 /* Additional pin to page cache */ 2563 page_ref_add(head, 2); 2564 xa_unlock(&head->mapping->i_pages); 2565 } 2566 2567 spin_unlock_irqrestore(&pgdat->lru_lock, flags); 2568 2569 remap_page(head); 2570 2571 for (i = 0; i < HPAGE_PMD_NR; i++) { 2572 struct page *subpage = head + i; 2573 if (subpage == page) 2574 continue; 2575 unlock_page(subpage); 2576 2577 /* 2578 * Subpages may be freed if there wasn't any mapping 2579 * like if add_to_swap() is running on a lru page that 2580 * had its mapping zapped. And freeing these pages 2581 * requires taking the lru_lock so we do the put_page 2582 * of the tail pages after the split is complete. 2583 */ 2584 put_page(subpage); 2585 } 2586 } 2587 2588 int total_mapcount(struct page *page) 2589 { 2590 int i, compound, ret; 2591 2592 VM_BUG_ON_PAGE(PageTail(page), page); 2593 2594 if (likely(!PageCompound(page))) 2595 return atomic_read(&page->_mapcount) + 1; 2596 2597 compound = compound_mapcount(page); 2598 if (PageHuge(page)) 2599 return compound; 2600 ret = compound; 2601 for (i = 0; i < HPAGE_PMD_NR; i++) 2602 ret += atomic_read(&page[i]._mapcount) + 1; 2603 /* File pages has compound_mapcount included in _mapcount */ 2604 if (!PageAnon(page)) 2605 return ret - compound * HPAGE_PMD_NR; 2606 if (PageDoubleMap(page)) 2607 ret -= HPAGE_PMD_NR; 2608 return ret; 2609 } 2610 2611 /* 2612 * This calculates accurately how many mappings a transparent hugepage 2613 * has (unlike page_mapcount() which isn't fully accurate). This full 2614 * accuracy is primarily needed to know if copy-on-write faults can 2615 * reuse the page and change the mapping to read-write instead of 2616 * copying them. At the same time this returns the total_mapcount too. 2617 * 2618 * The function returns the highest mapcount any one of the subpages 2619 * has. If the return value is one, even if different processes are 2620 * mapping different subpages of the transparent hugepage, they can 2621 * all reuse it, because each process is reusing a different subpage. 2622 * 2623 * The total_mapcount is instead counting all virtual mappings of the 2624 * subpages. If the total_mapcount is equal to "one", it tells the 2625 * caller all mappings belong to the same "mm" and in turn the 2626 * anon_vma of the transparent hugepage can become the vma->anon_vma 2627 * local one as no other process may be mapping any of the subpages. 2628 * 2629 * It would be more accurate to replace page_mapcount() with 2630 * page_trans_huge_mapcount(), however we only use 2631 * page_trans_huge_mapcount() in the copy-on-write faults where we 2632 * need full accuracy to avoid breaking page pinning, because 2633 * page_trans_huge_mapcount() is slower than page_mapcount(). 2634 */ 2635 int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 2636 { 2637 int i, ret, _total_mapcount, mapcount; 2638 2639 /* hugetlbfs shouldn't call it */ 2640 VM_BUG_ON_PAGE(PageHuge(page), page); 2641 2642 if (likely(!PageTransCompound(page))) { 2643 mapcount = atomic_read(&page->_mapcount) + 1; 2644 if (total_mapcount) 2645 *total_mapcount = mapcount; 2646 return mapcount; 2647 } 2648 2649 page = compound_head(page); 2650 2651 _total_mapcount = ret = 0; 2652 for (i = 0; i < HPAGE_PMD_NR; i++) { 2653 mapcount = atomic_read(&page[i]._mapcount) + 1; 2654 ret = max(ret, mapcount); 2655 _total_mapcount += mapcount; 2656 } 2657 if (PageDoubleMap(page)) { 2658 ret -= 1; 2659 _total_mapcount -= HPAGE_PMD_NR; 2660 } 2661 mapcount = compound_mapcount(page); 2662 ret += mapcount; 2663 _total_mapcount += mapcount; 2664 if (total_mapcount) 2665 *total_mapcount = _total_mapcount; 2666 return ret; 2667 } 2668 2669 /* Racy check whether the huge page can be split */ 2670 bool can_split_huge_page(struct page *page, int *pextra_pins) 2671 { 2672 int extra_pins; 2673 2674 /* Additional pins from page cache */ 2675 if (PageAnon(page)) 2676 extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; 2677 else 2678 extra_pins = HPAGE_PMD_NR; 2679 if (pextra_pins) 2680 *pextra_pins = extra_pins; 2681 return total_mapcount(page) == page_count(page) - extra_pins - 1; 2682 } 2683 2684 /* 2685 * This function splits huge page into normal pages. @page can point to any 2686 * subpage of huge page to split. Split doesn't change the position of @page. 2687 * 2688 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. 2689 * The huge page must be locked. 2690 * 2691 * If @list is null, tail pages will be added to LRU list, otherwise, to @list. 2692 * 2693 * Both head page and tail pages will inherit mapping, flags, and so on from 2694 * the hugepage. 2695 * 2696 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if 2697 * they are not mapped. 2698 * 2699 * Returns 0 if the hugepage is split successfully. 2700 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under 2701 * us. 2702 */ 2703 int split_huge_page_to_list(struct page *page, struct list_head *list) 2704 { 2705 struct page *head = compound_head(page); 2706 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); 2707 struct deferred_split *ds_queue = get_deferred_split_queue(page); 2708 struct anon_vma *anon_vma = NULL; 2709 struct address_space *mapping = NULL; 2710 int count, mapcount, extra_pins, ret; 2711 bool mlocked; 2712 unsigned long flags; 2713 pgoff_t end; 2714 2715 VM_BUG_ON_PAGE(is_huge_zero_page(page), page); 2716 VM_BUG_ON_PAGE(!PageLocked(page), page); 2717 VM_BUG_ON_PAGE(!PageCompound(page), page); 2718 2719 if (PageWriteback(page)) 2720 return -EBUSY; 2721 2722 if (PageAnon(head)) { 2723 /* 2724 * The caller does not necessarily hold an mmap_sem that would 2725 * prevent the anon_vma disappearing so we first we take a 2726 * reference to it and then lock the anon_vma for write. This 2727 * is similar to page_lock_anon_vma_read except the write lock 2728 * is taken to serialise against parallel split or collapse 2729 * operations. 2730 */ 2731 anon_vma = page_get_anon_vma(head); 2732 if (!anon_vma) { 2733 ret = -EBUSY; 2734 goto out; 2735 } 2736 end = -1; 2737 mapping = NULL; 2738 anon_vma_lock_write(anon_vma); 2739 } else { 2740 mapping = head->mapping; 2741 2742 /* Truncated ? */ 2743 if (!mapping) { 2744 ret = -EBUSY; 2745 goto out; 2746 } 2747 2748 anon_vma = NULL; 2749 i_mmap_lock_read(mapping); 2750 2751 /* 2752 *__split_huge_page() may need to trim off pages beyond EOF: 2753 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, 2754 * which cannot be nested inside the page tree lock. So note 2755 * end now: i_size itself may be changed at any moment, but 2756 * head page lock is good enough to serialize the trimming. 2757 */ 2758 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2759 } 2760 2761 /* 2762 * Racy check if we can split the page, before unmap_page() will 2763 * split PMDs 2764 */ 2765 if (!can_split_huge_page(head, &extra_pins)) { 2766 ret = -EBUSY; 2767 goto out_unlock; 2768 } 2769 2770 mlocked = PageMlocked(page); 2771 unmap_page(head); 2772 VM_BUG_ON_PAGE(compound_mapcount(head), head); 2773 2774 /* Make sure the page is not on per-CPU pagevec as it takes pin */ 2775 if (mlocked) 2776 lru_add_drain(); 2777 2778 /* prevent PageLRU to go away from under us, and freeze lru stats */ 2779 spin_lock_irqsave(&pgdata->lru_lock, flags); 2780 2781 if (mapping) { 2782 XA_STATE(xas, &mapping->i_pages, page_index(head)); 2783 2784 /* 2785 * Check if the head page is present in page cache. 2786 * We assume all tail are present too, if head is there. 2787 */ 2788 xa_lock(&mapping->i_pages); 2789 if (xas_load(&xas) != head) 2790 goto fail; 2791 } 2792 2793 /* Prevent deferred_split_scan() touching ->_refcount */ 2794 spin_lock(&ds_queue->split_queue_lock); 2795 count = page_count(head); 2796 mapcount = total_mapcount(head); 2797 if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { 2798 if (!list_empty(page_deferred_list(head))) { 2799 ds_queue->split_queue_len--; 2800 list_del(page_deferred_list(head)); 2801 } 2802 if (mapping) { 2803 if (PageSwapBacked(page)) 2804 __dec_node_page_state(page, NR_SHMEM_THPS); 2805 else 2806 __dec_node_page_state(page, NR_FILE_THPS); 2807 } 2808 2809 spin_unlock(&ds_queue->split_queue_lock); 2810 __split_huge_page(page, list, end, flags); 2811 if (PageSwapCache(head)) { 2812 swp_entry_t entry = { .val = page_private(head) }; 2813 2814 ret = split_swap_cluster(entry); 2815 } else 2816 ret = 0; 2817 } else { 2818 if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { 2819 pr_alert("total_mapcount: %u, page_count(): %u\n", 2820 mapcount, count); 2821 if (PageTail(page)) 2822 dump_page(head, NULL); 2823 dump_page(page, "total_mapcount(head) > 0"); 2824 BUG(); 2825 } 2826 spin_unlock(&ds_queue->split_queue_lock); 2827 fail: if (mapping) 2828 xa_unlock(&mapping->i_pages); 2829 spin_unlock_irqrestore(&pgdata->lru_lock, flags); 2830 remap_page(head); 2831 ret = -EBUSY; 2832 } 2833 2834 out_unlock: 2835 if (anon_vma) { 2836 anon_vma_unlock_write(anon_vma); 2837 put_anon_vma(anon_vma); 2838 } 2839 if (mapping) 2840 i_mmap_unlock_read(mapping); 2841 out: 2842 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); 2843 return ret; 2844 } 2845 2846 void free_transhuge_page(struct page *page) 2847 { 2848 struct deferred_split *ds_queue = get_deferred_split_queue(page); 2849 unsigned long flags; 2850 2851 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2852 if (!list_empty(page_deferred_list(page))) { 2853 ds_queue->split_queue_len--; 2854 list_del(page_deferred_list(page)); 2855 } 2856 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 2857 free_compound_page(page); 2858 } 2859 2860 void deferred_split_huge_page(struct page *page) 2861 { 2862 struct deferred_split *ds_queue = get_deferred_split_queue(page); 2863 #ifdef CONFIG_MEMCG 2864 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; 2865 #endif 2866 unsigned long flags; 2867 2868 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 2869 2870 /* 2871 * The try_to_unmap() in page reclaim path might reach here too, 2872 * this may cause a race condition to corrupt deferred split queue. 2873 * And, if page reclaim is already handling the same page, it is 2874 * unnecessary to handle it again in shrinker. 2875 * 2876 * Check PageSwapCache to determine if the page is being 2877 * handled by page reclaim since THP swap would add the page into 2878 * swap cache before calling try_to_unmap(). 2879 */ 2880 if (PageSwapCache(page)) 2881 return; 2882 2883 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2884 if (list_empty(page_deferred_list(page))) { 2885 count_vm_event(THP_DEFERRED_SPLIT_PAGE); 2886 list_add_tail(page_deferred_list(page), &ds_queue->split_queue); 2887 ds_queue->split_queue_len++; 2888 #ifdef CONFIG_MEMCG 2889 if (memcg) 2890 memcg_set_shrinker_bit(memcg, page_to_nid(page), 2891 deferred_split_shrinker.id); 2892 #endif 2893 } 2894 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 2895 } 2896 2897 static unsigned long deferred_split_count(struct shrinker *shrink, 2898 struct shrink_control *sc) 2899 { 2900 struct pglist_data *pgdata = NODE_DATA(sc->nid); 2901 struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 2902 2903 #ifdef CONFIG_MEMCG 2904 if (sc->memcg) 2905 ds_queue = &sc->memcg->deferred_split_queue; 2906 #endif 2907 return READ_ONCE(ds_queue->split_queue_len); 2908 } 2909 2910 static unsigned long deferred_split_scan(struct shrinker *shrink, 2911 struct shrink_control *sc) 2912 { 2913 struct pglist_data *pgdata = NODE_DATA(sc->nid); 2914 struct deferred_split *ds_queue = &pgdata->deferred_split_queue; 2915 unsigned long flags; 2916 LIST_HEAD(list), *pos, *next; 2917 struct page *page; 2918 int split = 0; 2919 2920 #ifdef CONFIG_MEMCG 2921 if (sc->memcg) 2922 ds_queue = &sc->memcg->deferred_split_queue; 2923 #endif 2924 2925 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2926 /* Take pin on all head pages to avoid freeing them under us */ 2927 list_for_each_safe(pos, next, &ds_queue->split_queue) { 2928 page = list_entry((void *)pos, struct page, mapping); 2929 page = compound_head(page); 2930 if (get_page_unless_zero(page)) { 2931 list_move(page_deferred_list(page), &list); 2932 } else { 2933 /* We lost race with put_compound_page() */ 2934 list_del_init(page_deferred_list(page)); 2935 ds_queue->split_queue_len--; 2936 } 2937 if (!--sc->nr_to_scan) 2938 break; 2939 } 2940 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 2941 2942 list_for_each_safe(pos, next, &list) { 2943 page = list_entry((void *)pos, struct page, mapping); 2944 if (!trylock_page(page)) 2945 goto next; 2946 /* split_huge_page() removes page from list on success */ 2947 if (!split_huge_page(page)) 2948 split++; 2949 unlock_page(page); 2950 next: 2951 put_page(page); 2952 } 2953 2954 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2955 list_splice_tail(&list, &ds_queue->split_queue); 2956 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 2957 2958 /* 2959 * Stop shrinker if we didn't split any page, but the queue is empty. 2960 * This can happen if pages were freed under us. 2961 */ 2962 if (!split && list_empty(&ds_queue->split_queue)) 2963 return SHRINK_STOP; 2964 return split; 2965 } 2966 2967 static struct shrinker deferred_split_shrinker = { 2968 .count_objects = deferred_split_count, 2969 .scan_objects = deferred_split_scan, 2970 .seeks = DEFAULT_SEEKS, 2971 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | 2972 SHRINKER_NONSLAB, 2973 }; 2974 2975 #ifdef CONFIG_DEBUG_FS 2976 static int split_huge_pages_set(void *data, u64 val) 2977 { 2978 struct zone *zone; 2979 struct page *page; 2980 unsigned long pfn, max_zone_pfn; 2981 unsigned long total = 0, split = 0; 2982 2983 if (val != 1) 2984 return -EINVAL; 2985 2986 for_each_populated_zone(zone) { 2987 max_zone_pfn = zone_end_pfn(zone); 2988 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { 2989 if (!pfn_valid(pfn)) 2990 continue; 2991 2992 page = pfn_to_page(pfn); 2993 if (!get_page_unless_zero(page)) 2994 continue; 2995 2996 if (zone != page_zone(page)) 2997 goto next; 2998 2999 if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) 3000 goto next; 3001 3002 total++; 3003 lock_page(page); 3004 if (!split_huge_page(page)) 3005 split++; 3006 unlock_page(page); 3007 next: 3008 put_page(page); 3009 } 3010 } 3011 3012 pr_info("%lu of %lu THP split\n", split, total); 3013 3014 return 0; 3015 } 3016 DEFINE_DEBUGFS_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, 3017 "%llu\n"); 3018 3019 static int __init split_huge_pages_debugfs(void) 3020 { 3021 debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 3022 &split_huge_pages_fops); 3023 return 0; 3024 } 3025 late_initcall(split_huge_pages_debugfs); 3026 #endif 3027 3028 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 3029 void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 3030 struct page *page) 3031 { 3032 struct vm_area_struct *vma = pvmw->vma; 3033 struct mm_struct *mm = vma->vm_mm; 3034 unsigned long address = pvmw->address; 3035 pmd_t pmdval; 3036 swp_entry_t entry; 3037 pmd_t pmdswp; 3038 3039 if (!(pvmw->pmd && !pvmw->pte)) 3040 return; 3041 3042 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); 3043 pmdval = *pvmw->pmd; 3044 pmdp_invalidate(vma, address, pvmw->pmd); 3045 if (pmd_dirty(pmdval)) 3046 set_page_dirty(page); 3047 entry = make_migration_entry(page, pmd_write(pmdval)); 3048 pmdswp = swp_entry_to_pmd(entry); 3049 if (pmd_soft_dirty(pmdval)) 3050 pmdswp = pmd_swp_mksoft_dirty(pmdswp); 3051 set_pmd_at(mm, address, pvmw->pmd, pmdswp); 3052 page_remove_rmap(page, true); 3053 put_page(page); 3054 } 3055 3056 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) 3057 { 3058 struct vm_area_struct *vma = pvmw->vma; 3059 struct mm_struct *mm = vma->vm_mm; 3060 unsigned long address = pvmw->address; 3061 unsigned long mmun_start = address & HPAGE_PMD_MASK; 3062 pmd_t pmde; 3063 swp_entry_t entry; 3064 3065 if (!(pvmw->pmd && !pvmw->pte)) 3066 return; 3067 3068 entry = pmd_to_swp_entry(*pvmw->pmd); 3069 get_page(new); 3070 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); 3071 if (pmd_swp_soft_dirty(*pvmw->pmd)) 3072 pmde = pmd_mksoft_dirty(pmde); 3073 if (is_write_migration_entry(entry)) 3074 pmde = maybe_pmd_mkwrite(pmde, vma); 3075 3076 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); 3077 if (PageAnon(new)) 3078 page_add_anon_rmap(new, vma, mmun_start, true); 3079 else 3080 page_add_file_rmap(new, true); 3081 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 3082 if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) 3083 mlock_vma_page(new); 3084 update_mmu_cache_pmd(vma, address, pvmw->pmd); 3085 } 3086 #endif 3087