1 /* 2 * Copyright (C) 2009 Red Hat, Inc. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/highmem.h> 11 #include <linux/hugetlb.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/rmap.h> 14 #include <linux/swap.h> 15 #include <linux/shrinker.h> 16 #include <linux/mm_inline.h> 17 #include <linux/kthread.h> 18 #include <linux/khugepaged.h> 19 #include <linux/freezer.h> 20 #include <linux/mman.h> 21 #include <linux/pagemap.h> 22 #include <linux/migrate.h> 23 #include <linux/hashtable.h> 24 25 #include <asm/tlb.h> 26 #include <asm/pgalloc.h> 27 #include "internal.h" 28 29 /* 30 * By default transparent hugepage support is enabled for all mappings 31 * and khugepaged scans all mappings. Defrag is only invoked by 32 * khugepaged hugepage allocations and by page faults inside 33 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived 34 * allocations. 35 */ 36 unsigned long transparent_hugepage_flags __read_mostly = 37 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 38 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 39 #endif 40 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 41 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 42 #endif 43 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| 44 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 45 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 46 47 /* default scan 8*512 pte (or vmas) every 30 second */ 48 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; 49 static unsigned int khugepaged_pages_collapsed; 50 static unsigned int khugepaged_full_scans; 51 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 52 /* during fragmentation poll the hugepage allocator once every minute */ 53 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 54 static struct task_struct *khugepaged_thread __read_mostly; 55 static DEFINE_MUTEX(khugepaged_mutex); 56 static DEFINE_SPINLOCK(khugepaged_mm_lock); 57 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 58 /* 59 * default collapse hugepages if there is at least one pte mapped like 60 * it would have happened if the vma was large enough during page 61 * fault. 62 */ 63 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 64 65 static int khugepaged(void *none); 66 static int khugepaged_slab_init(void); 67 68 #define MM_SLOTS_HASH_BITS 10 69 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 70 71 static struct kmem_cache *mm_slot_cache __read_mostly; 72 73 /** 74 * struct mm_slot - hash lookup from mm to mm_slot 75 * @hash: hash collision list 76 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 77 * @mm: the mm that this information is valid for 78 */ 79 struct mm_slot { 80 struct hlist_node hash; 81 struct list_head mm_node; 82 struct mm_struct *mm; 83 }; 84 85 /** 86 * struct khugepaged_scan - cursor for scanning 87 * @mm_head: the head of the mm list to scan 88 * @mm_slot: the current mm_slot we are scanning 89 * @address: the next address inside that to be scanned 90 * 91 * There is only the one khugepaged_scan instance of this cursor structure. 92 */ 93 struct khugepaged_scan { 94 struct list_head mm_head; 95 struct mm_slot *mm_slot; 96 unsigned long address; 97 }; 98 static struct khugepaged_scan khugepaged_scan = { 99 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 100 }; 101 102 103 static int set_recommended_min_free_kbytes(void) 104 { 105 struct zone *zone; 106 int nr_zones = 0; 107 unsigned long recommended_min; 108 109 if (!khugepaged_enabled()) 110 return 0; 111 112 for_each_populated_zone(zone) 113 nr_zones++; 114 115 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ 116 recommended_min = pageblock_nr_pages * nr_zones * 2; 117 118 /* 119 * Make sure that on average at least two pageblocks are almost free 120 * of another type, one for a migratetype to fall back to and a 121 * second to avoid subsequent fallbacks of other types There are 3 122 * MIGRATE_TYPES we care about. 123 */ 124 recommended_min += pageblock_nr_pages * nr_zones * 125 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 126 127 /* don't ever allow to reserve more than 5% of the lowmem */ 128 recommended_min = min(recommended_min, 129 (unsigned long) nr_free_buffer_pages() / 20); 130 recommended_min <<= (PAGE_SHIFT-10); 131 132 if (recommended_min > min_free_kbytes) 133 min_free_kbytes = recommended_min; 134 setup_per_zone_wmarks(); 135 return 0; 136 } 137 late_initcall(set_recommended_min_free_kbytes); 138 139 static int start_khugepaged(void) 140 { 141 int err = 0; 142 if (khugepaged_enabled()) { 143 if (!khugepaged_thread) 144 khugepaged_thread = kthread_run(khugepaged, NULL, 145 "khugepaged"); 146 if (unlikely(IS_ERR(khugepaged_thread))) { 147 printk(KERN_ERR 148 "khugepaged: kthread_run(khugepaged) failed\n"); 149 err = PTR_ERR(khugepaged_thread); 150 khugepaged_thread = NULL; 151 } 152 153 if (!list_empty(&khugepaged_scan.mm_head)) 154 wake_up_interruptible(&khugepaged_wait); 155 156 set_recommended_min_free_kbytes(); 157 } else if (khugepaged_thread) { 158 kthread_stop(khugepaged_thread); 159 khugepaged_thread = NULL; 160 } 161 162 return err; 163 } 164 165 static atomic_t huge_zero_refcount; 166 static unsigned long huge_zero_pfn __read_mostly; 167 168 static inline bool is_huge_zero_pfn(unsigned long pfn) 169 { 170 unsigned long zero_pfn = ACCESS_ONCE(huge_zero_pfn); 171 return zero_pfn && pfn == zero_pfn; 172 } 173 174 static inline bool is_huge_zero_pmd(pmd_t pmd) 175 { 176 return is_huge_zero_pfn(pmd_pfn(pmd)); 177 } 178 179 static unsigned long get_huge_zero_page(void) 180 { 181 struct page *zero_page; 182 retry: 183 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 184 return ACCESS_ONCE(huge_zero_pfn); 185 186 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 187 HPAGE_PMD_ORDER); 188 if (!zero_page) { 189 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 190 return 0; 191 } 192 count_vm_event(THP_ZERO_PAGE_ALLOC); 193 preempt_disable(); 194 if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) { 195 preempt_enable(); 196 __free_page(zero_page); 197 goto retry; 198 } 199 200 /* We take additional reference here. It will be put back by shrinker */ 201 atomic_set(&huge_zero_refcount, 2); 202 preempt_enable(); 203 return ACCESS_ONCE(huge_zero_pfn); 204 } 205 206 static void put_huge_zero_page(void) 207 { 208 /* 209 * Counter should never go to zero here. Only shrinker can put 210 * last reference. 211 */ 212 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 213 } 214 215 static int shrink_huge_zero_page(struct shrinker *shrink, 216 struct shrink_control *sc) 217 { 218 if (!sc->nr_to_scan) 219 /* we can free zero page only if last reference remains */ 220 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 221 222 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 223 unsigned long zero_pfn = xchg(&huge_zero_pfn, 0); 224 BUG_ON(zero_pfn == 0); 225 __free_page(__pfn_to_page(zero_pfn)); 226 } 227 228 return 0; 229 } 230 231 static struct shrinker huge_zero_page_shrinker = { 232 .shrink = shrink_huge_zero_page, 233 .seeks = DEFAULT_SEEKS, 234 }; 235 236 #ifdef CONFIG_SYSFS 237 238 static ssize_t double_flag_show(struct kobject *kobj, 239 struct kobj_attribute *attr, char *buf, 240 enum transparent_hugepage_flag enabled, 241 enum transparent_hugepage_flag req_madv) 242 { 243 if (test_bit(enabled, &transparent_hugepage_flags)) { 244 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); 245 return sprintf(buf, "[always] madvise never\n"); 246 } else if (test_bit(req_madv, &transparent_hugepage_flags)) 247 return sprintf(buf, "always [madvise] never\n"); 248 else 249 return sprintf(buf, "always madvise [never]\n"); 250 } 251 static ssize_t double_flag_store(struct kobject *kobj, 252 struct kobj_attribute *attr, 253 const char *buf, size_t count, 254 enum transparent_hugepage_flag enabled, 255 enum transparent_hugepage_flag req_madv) 256 { 257 if (!memcmp("always", buf, 258 min(sizeof("always")-1, count))) { 259 set_bit(enabled, &transparent_hugepage_flags); 260 clear_bit(req_madv, &transparent_hugepage_flags); 261 } else if (!memcmp("madvise", buf, 262 min(sizeof("madvise")-1, count))) { 263 clear_bit(enabled, &transparent_hugepage_flags); 264 set_bit(req_madv, &transparent_hugepage_flags); 265 } else if (!memcmp("never", buf, 266 min(sizeof("never")-1, count))) { 267 clear_bit(enabled, &transparent_hugepage_flags); 268 clear_bit(req_madv, &transparent_hugepage_flags); 269 } else 270 return -EINVAL; 271 272 return count; 273 } 274 275 static ssize_t enabled_show(struct kobject *kobj, 276 struct kobj_attribute *attr, char *buf) 277 { 278 return double_flag_show(kobj, attr, buf, 279 TRANSPARENT_HUGEPAGE_FLAG, 280 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 281 } 282 static ssize_t enabled_store(struct kobject *kobj, 283 struct kobj_attribute *attr, 284 const char *buf, size_t count) 285 { 286 ssize_t ret; 287 288 ret = double_flag_store(kobj, attr, buf, count, 289 TRANSPARENT_HUGEPAGE_FLAG, 290 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 291 292 if (ret > 0) { 293 int err; 294 295 mutex_lock(&khugepaged_mutex); 296 err = start_khugepaged(); 297 mutex_unlock(&khugepaged_mutex); 298 299 if (err) 300 ret = err; 301 } 302 303 return ret; 304 } 305 static struct kobj_attribute enabled_attr = 306 __ATTR(enabled, 0644, enabled_show, enabled_store); 307 308 static ssize_t single_flag_show(struct kobject *kobj, 309 struct kobj_attribute *attr, char *buf, 310 enum transparent_hugepage_flag flag) 311 { 312 return sprintf(buf, "%d\n", 313 !!test_bit(flag, &transparent_hugepage_flags)); 314 } 315 316 static ssize_t single_flag_store(struct kobject *kobj, 317 struct kobj_attribute *attr, 318 const char *buf, size_t count, 319 enum transparent_hugepage_flag flag) 320 { 321 unsigned long value; 322 int ret; 323 324 ret = kstrtoul(buf, 10, &value); 325 if (ret < 0) 326 return ret; 327 if (value > 1) 328 return -EINVAL; 329 330 if (value) 331 set_bit(flag, &transparent_hugepage_flags); 332 else 333 clear_bit(flag, &transparent_hugepage_flags); 334 335 return count; 336 } 337 338 /* 339 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 340 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 341 * memory just to allocate one more hugepage. 342 */ 343 static ssize_t defrag_show(struct kobject *kobj, 344 struct kobj_attribute *attr, char *buf) 345 { 346 return double_flag_show(kobj, attr, buf, 347 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 348 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 349 } 350 static ssize_t defrag_store(struct kobject *kobj, 351 struct kobj_attribute *attr, 352 const char *buf, size_t count) 353 { 354 return double_flag_store(kobj, attr, buf, count, 355 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 356 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 357 } 358 static struct kobj_attribute defrag_attr = 359 __ATTR(defrag, 0644, defrag_show, defrag_store); 360 361 static ssize_t use_zero_page_show(struct kobject *kobj, 362 struct kobj_attribute *attr, char *buf) 363 { 364 return single_flag_show(kobj, attr, buf, 365 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 366 } 367 static ssize_t use_zero_page_store(struct kobject *kobj, 368 struct kobj_attribute *attr, const char *buf, size_t count) 369 { 370 return single_flag_store(kobj, attr, buf, count, 371 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 372 } 373 static struct kobj_attribute use_zero_page_attr = 374 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 375 #ifdef CONFIG_DEBUG_VM 376 static ssize_t debug_cow_show(struct kobject *kobj, 377 struct kobj_attribute *attr, char *buf) 378 { 379 return single_flag_show(kobj, attr, buf, 380 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 381 } 382 static ssize_t debug_cow_store(struct kobject *kobj, 383 struct kobj_attribute *attr, 384 const char *buf, size_t count) 385 { 386 return single_flag_store(kobj, attr, buf, count, 387 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 388 } 389 static struct kobj_attribute debug_cow_attr = 390 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 391 #endif /* CONFIG_DEBUG_VM */ 392 393 static struct attribute *hugepage_attr[] = { 394 &enabled_attr.attr, 395 &defrag_attr.attr, 396 &use_zero_page_attr.attr, 397 #ifdef CONFIG_DEBUG_VM 398 &debug_cow_attr.attr, 399 #endif 400 NULL, 401 }; 402 403 static struct attribute_group hugepage_attr_group = { 404 .attrs = hugepage_attr, 405 }; 406 407 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 408 struct kobj_attribute *attr, 409 char *buf) 410 { 411 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 412 } 413 414 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 415 struct kobj_attribute *attr, 416 const char *buf, size_t count) 417 { 418 unsigned long msecs; 419 int err; 420 421 err = strict_strtoul(buf, 10, &msecs); 422 if (err || msecs > UINT_MAX) 423 return -EINVAL; 424 425 khugepaged_scan_sleep_millisecs = msecs; 426 wake_up_interruptible(&khugepaged_wait); 427 428 return count; 429 } 430 static struct kobj_attribute scan_sleep_millisecs_attr = 431 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 432 scan_sleep_millisecs_store); 433 434 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 435 struct kobj_attribute *attr, 436 char *buf) 437 { 438 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 439 } 440 441 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 442 struct kobj_attribute *attr, 443 const char *buf, size_t count) 444 { 445 unsigned long msecs; 446 int err; 447 448 err = strict_strtoul(buf, 10, &msecs); 449 if (err || msecs > UINT_MAX) 450 return -EINVAL; 451 452 khugepaged_alloc_sleep_millisecs = msecs; 453 wake_up_interruptible(&khugepaged_wait); 454 455 return count; 456 } 457 static struct kobj_attribute alloc_sleep_millisecs_attr = 458 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 459 alloc_sleep_millisecs_store); 460 461 static ssize_t pages_to_scan_show(struct kobject *kobj, 462 struct kobj_attribute *attr, 463 char *buf) 464 { 465 return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 466 } 467 static ssize_t pages_to_scan_store(struct kobject *kobj, 468 struct kobj_attribute *attr, 469 const char *buf, size_t count) 470 { 471 int err; 472 unsigned long pages; 473 474 err = strict_strtoul(buf, 10, &pages); 475 if (err || !pages || pages > UINT_MAX) 476 return -EINVAL; 477 478 khugepaged_pages_to_scan = pages; 479 480 return count; 481 } 482 static struct kobj_attribute pages_to_scan_attr = 483 __ATTR(pages_to_scan, 0644, pages_to_scan_show, 484 pages_to_scan_store); 485 486 static ssize_t pages_collapsed_show(struct kobject *kobj, 487 struct kobj_attribute *attr, 488 char *buf) 489 { 490 return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 491 } 492 static struct kobj_attribute pages_collapsed_attr = 493 __ATTR_RO(pages_collapsed); 494 495 static ssize_t full_scans_show(struct kobject *kobj, 496 struct kobj_attribute *attr, 497 char *buf) 498 { 499 return sprintf(buf, "%u\n", khugepaged_full_scans); 500 } 501 static struct kobj_attribute full_scans_attr = 502 __ATTR_RO(full_scans); 503 504 static ssize_t khugepaged_defrag_show(struct kobject *kobj, 505 struct kobj_attribute *attr, char *buf) 506 { 507 return single_flag_show(kobj, attr, buf, 508 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 509 } 510 static ssize_t khugepaged_defrag_store(struct kobject *kobj, 511 struct kobj_attribute *attr, 512 const char *buf, size_t count) 513 { 514 return single_flag_store(kobj, attr, buf, count, 515 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 516 } 517 static struct kobj_attribute khugepaged_defrag_attr = 518 __ATTR(defrag, 0644, khugepaged_defrag_show, 519 khugepaged_defrag_store); 520 521 /* 522 * max_ptes_none controls if khugepaged should collapse hugepages over 523 * any unmapped ptes in turn potentially increasing the memory 524 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 525 * reduce the available free memory in the system as it 526 * runs. Increasing max_ptes_none will instead potentially reduce the 527 * free memory in the system during the khugepaged scan. 528 */ 529 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 530 struct kobj_attribute *attr, 531 char *buf) 532 { 533 return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 534 } 535 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 536 struct kobj_attribute *attr, 537 const char *buf, size_t count) 538 { 539 int err; 540 unsigned long max_ptes_none; 541 542 err = strict_strtoul(buf, 10, &max_ptes_none); 543 if (err || max_ptes_none > HPAGE_PMD_NR-1) 544 return -EINVAL; 545 546 khugepaged_max_ptes_none = max_ptes_none; 547 548 return count; 549 } 550 static struct kobj_attribute khugepaged_max_ptes_none_attr = 551 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 552 khugepaged_max_ptes_none_store); 553 554 static struct attribute *khugepaged_attr[] = { 555 &khugepaged_defrag_attr.attr, 556 &khugepaged_max_ptes_none_attr.attr, 557 &pages_to_scan_attr.attr, 558 &pages_collapsed_attr.attr, 559 &full_scans_attr.attr, 560 &scan_sleep_millisecs_attr.attr, 561 &alloc_sleep_millisecs_attr.attr, 562 NULL, 563 }; 564 565 static struct attribute_group khugepaged_attr_group = { 566 .attrs = khugepaged_attr, 567 .name = "khugepaged", 568 }; 569 570 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 571 { 572 int err; 573 574 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 575 if (unlikely(!*hugepage_kobj)) { 576 printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n"); 577 return -ENOMEM; 578 } 579 580 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 581 if (err) { 582 printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n"); 583 goto delete_obj; 584 } 585 586 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 587 if (err) { 588 printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n"); 589 goto remove_hp_group; 590 } 591 592 return 0; 593 594 remove_hp_group: 595 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 596 delete_obj: 597 kobject_put(*hugepage_kobj); 598 return err; 599 } 600 601 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 602 { 603 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 604 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 605 kobject_put(hugepage_kobj); 606 } 607 #else 608 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 609 { 610 return 0; 611 } 612 613 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 614 { 615 } 616 #endif /* CONFIG_SYSFS */ 617 618 static int __init hugepage_init(void) 619 { 620 int err; 621 struct kobject *hugepage_kobj; 622 623 if (!has_transparent_hugepage()) { 624 transparent_hugepage_flags = 0; 625 return -EINVAL; 626 } 627 628 err = hugepage_init_sysfs(&hugepage_kobj); 629 if (err) 630 return err; 631 632 err = khugepaged_slab_init(); 633 if (err) 634 goto out; 635 636 register_shrinker(&huge_zero_page_shrinker); 637 638 /* 639 * By default disable transparent hugepages on smaller systems, 640 * where the extra memory used could hurt more than TLB overhead 641 * is likely to save. The admin can still enable it through /sys. 642 */ 643 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) 644 transparent_hugepage_flags = 0; 645 646 start_khugepaged(); 647 648 return 0; 649 out: 650 hugepage_exit_sysfs(hugepage_kobj); 651 return err; 652 } 653 module_init(hugepage_init) 654 655 static int __init setup_transparent_hugepage(char *str) 656 { 657 int ret = 0; 658 if (!str) 659 goto out; 660 if (!strcmp(str, "always")) { 661 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 662 &transparent_hugepage_flags); 663 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 664 &transparent_hugepage_flags); 665 ret = 1; 666 } else if (!strcmp(str, "madvise")) { 667 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 668 &transparent_hugepage_flags); 669 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 670 &transparent_hugepage_flags); 671 ret = 1; 672 } else if (!strcmp(str, "never")) { 673 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 674 &transparent_hugepage_flags); 675 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 676 &transparent_hugepage_flags); 677 ret = 1; 678 } 679 out: 680 if (!ret) 681 printk(KERN_WARNING 682 "transparent_hugepage= cannot parse, ignored\n"); 683 return ret; 684 } 685 __setup("transparent_hugepage=", setup_transparent_hugepage); 686 687 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 688 { 689 if (likely(vma->vm_flags & VM_WRITE)) 690 pmd = pmd_mkwrite(pmd); 691 return pmd; 692 } 693 694 static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma) 695 { 696 pmd_t entry; 697 entry = mk_pmd(page, vma->vm_page_prot); 698 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 699 entry = pmd_mkhuge(entry); 700 return entry; 701 } 702 703 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 704 struct vm_area_struct *vma, 705 unsigned long haddr, pmd_t *pmd, 706 struct page *page) 707 { 708 pgtable_t pgtable; 709 710 VM_BUG_ON(!PageCompound(page)); 711 pgtable = pte_alloc_one(mm, haddr); 712 if (unlikely(!pgtable)) 713 return VM_FAULT_OOM; 714 715 clear_huge_page(page, haddr, HPAGE_PMD_NR); 716 __SetPageUptodate(page); 717 718 spin_lock(&mm->page_table_lock); 719 if (unlikely(!pmd_none(*pmd))) { 720 spin_unlock(&mm->page_table_lock); 721 mem_cgroup_uncharge_page(page); 722 put_page(page); 723 pte_free(mm, pgtable); 724 } else { 725 pmd_t entry; 726 entry = mk_huge_pmd(page, vma); 727 /* 728 * The spinlocking to take the lru_lock inside 729 * page_add_new_anon_rmap() acts as a full memory 730 * barrier to be sure clear_huge_page writes become 731 * visible after the set_pmd_at() write. 732 */ 733 page_add_new_anon_rmap(page, vma, haddr); 734 set_pmd_at(mm, haddr, pmd, entry); 735 pgtable_trans_huge_deposit(mm, pgtable); 736 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 737 mm->nr_ptes++; 738 spin_unlock(&mm->page_table_lock); 739 } 740 741 return 0; 742 } 743 744 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 745 { 746 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 747 } 748 749 static inline struct page *alloc_hugepage_vma(int defrag, 750 struct vm_area_struct *vma, 751 unsigned long haddr, int nd, 752 gfp_t extra_gfp) 753 { 754 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), 755 HPAGE_PMD_ORDER, vma, haddr, nd); 756 } 757 758 #ifndef CONFIG_NUMA 759 static inline struct page *alloc_hugepage(int defrag) 760 { 761 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), 762 HPAGE_PMD_ORDER); 763 } 764 #endif 765 766 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 767 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 768 unsigned long zero_pfn) 769 { 770 pmd_t entry; 771 if (!pmd_none(*pmd)) 772 return false; 773 entry = pfn_pmd(zero_pfn, vma->vm_page_prot); 774 entry = pmd_wrprotect(entry); 775 entry = pmd_mkhuge(entry); 776 set_pmd_at(mm, haddr, pmd, entry); 777 pgtable_trans_huge_deposit(mm, pgtable); 778 mm->nr_ptes++; 779 return true; 780 } 781 782 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 783 unsigned long address, pmd_t *pmd, 784 unsigned int flags) 785 { 786 struct page *page; 787 unsigned long haddr = address & HPAGE_PMD_MASK; 788 pte_t *pte; 789 790 if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) { 791 if (unlikely(anon_vma_prepare(vma))) 792 return VM_FAULT_OOM; 793 if (unlikely(khugepaged_enter(vma))) 794 return VM_FAULT_OOM; 795 if (!(flags & FAULT_FLAG_WRITE) && 796 transparent_hugepage_use_zero_page()) { 797 pgtable_t pgtable; 798 unsigned long zero_pfn; 799 bool set; 800 pgtable = pte_alloc_one(mm, haddr); 801 if (unlikely(!pgtable)) 802 return VM_FAULT_OOM; 803 zero_pfn = get_huge_zero_page(); 804 if (unlikely(!zero_pfn)) { 805 pte_free(mm, pgtable); 806 count_vm_event(THP_FAULT_FALLBACK); 807 goto out; 808 } 809 spin_lock(&mm->page_table_lock); 810 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, 811 zero_pfn); 812 spin_unlock(&mm->page_table_lock); 813 if (!set) { 814 pte_free(mm, pgtable); 815 put_huge_zero_page(); 816 } 817 return 0; 818 } 819 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 820 vma, haddr, numa_node_id(), 0); 821 if (unlikely(!page)) { 822 count_vm_event(THP_FAULT_FALLBACK); 823 goto out; 824 } 825 count_vm_event(THP_FAULT_ALLOC); 826 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 827 put_page(page); 828 goto out; 829 } 830 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, 831 page))) { 832 mem_cgroup_uncharge_page(page); 833 put_page(page); 834 goto out; 835 } 836 837 return 0; 838 } 839 out: 840 /* 841 * Use __pte_alloc instead of pte_alloc_map, because we can't 842 * run pte_offset_map on the pmd, if an huge pmd could 843 * materialize from under us from a different thread. 844 */ 845 if (unlikely(pmd_none(*pmd)) && 846 unlikely(__pte_alloc(mm, vma, pmd, address))) 847 return VM_FAULT_OOM; 848 /* if an huge pmd materialized from under us just retry later */ 849 if (unlikely(pmd_trans_huge(*pmd))) 850 return 0; 851 /* 852 * A regular pmd is established and it can't morph into a huge pmd 853 * from under us anymore at this point because we hold the mmap_sem 854 * read mode and khugepaged takes it in write mode. So now it's 855 * safe to run pte_offset_map(). 856 */ 857 pte = pte_offset_map(pmd, address); 858 return handle_pte_fault(mm, vma, address, pte, pmd, flags); 859 } 860 861 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 862 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 863 struct vm_area_struct *vma) 864 { 865 struct page *src_page; 866 pmd_t pmd; 867 pgtable_t pgtable; 868 int ret; 869 870 ret = -ENOMEM; 871 pgtable = pte_alloc_one(dst_mm, addr); 872 if (unlikely(!pgtable)) 873 goto out; 874 875 spin_lock(&dst_mm->page_table_lock); 876 spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING); 877 878 ret = -EAGAIN; 879 pmd = *src_pmd; 880 if (unlikely(!pmd_trans_huge(pmd))) { 881 pte_free(dst_mm, pgtable); 882 goto out_unlock; 883 } 884 /* 885 * mm->page_table_lock is enough to be sure that huge zero pmd is not 886 * under splitting since we don't split the page itself, only pmd to 887 * a page table. 888 */ 889 if (is_huge_zero_pmd(pmd)) { 890 unsigned long zero_pfn; 891 bool set; 892 /* 893 * get_huge_zero_page() will never allocate a new page here, 894 * since we already have a zero page to copy. It just takes a 895 * reference. 896 */ 897 zero_pfn = get_huge_zero_page(); 898 set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 899 zero_pfn); 900 BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */ 901 ret = 0; 902 goto out_unlock; 903 } 904 if (unlikely(pmd_trans_splitting(pmd))) { 905 /* split huge page running from under us */ 906 spin_unlock(&src_mm->page_table_lock); 907 spin_unlock(&dst_mm->page_table_lock); 908 pte_free(dst_mm, pgtable); 909 910 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 911 goto out; 912 } 913 src_page = pmd_page(pmd); 914 VM_BUG_ON(!PageHead(src_page)); 915 get_page(src_page); 916 page_dup_rmap(src_page); 917 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 918 919 pmdp_set_wrprotect(src_mm, addr, src_pmd); 920 pmd = pmd_mkold(pmd_wrprotect(pmd)); 921 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 922 pgtable_trans_huge_deposit(dst_mm, pgtable); 923 dst_mm->nr_ptes++; 924 925 ret = 0; 926 out_unlock: 927 spin_unlock(&src_mm->page_table_lock); 928 spin_unlock(&dst_mm->page_table_lock); 929 out: 930 return ret; 931 } 932 933 void huge_pmd_set_accessed(struct mm_struct *mm, 934 struct vm_area_struct *vma, 935 unsigned long address, 936 pmd_t *pmd, pmd_t orig_pmd, 937 int dirty) 938 { 939 pmd_t entry; 940 unsigned long haddr; 941 942 spin_lock(&mm->page_table_lock); 943 if (unlikely(!pmd_same(*pmd, orig_pmd))) 944 goto unlock; 945 946 entry = pmd_mkyoung(orig_pmd); 947 haddr = address & HPAGE_PMD_MASK; 948 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) 949 update_mmu_cache_pmd(vma, address, pmd); 950 951 unlock: 952 spin_unlock(&mm->page_table_lock); 953 } 954 955 static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm, 956 struct vm_area_struct *vma, unsigned long address, 957 pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr) 958 { 959 pgtable_t pgtable; 960 pmd_t _pmd; 961 struct page *page; 962 int i, ret = 0; 963 unsigned long mmun_start; /* For mmu_notifiers */ 964 unsigned long mmun_end; /* For mmu_notifiers */ 965 966 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 967 if (!page) { 968 ret |= VM_FAULT_OOM; 969 goto out; 970 } 971 972 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { 973 put_page(page); 974 ret |= VM_FAULT_OOM; 975 goto out; 976 } 977 978 clear_user_highpage(page, address); 979 __SetPageUptodate(page); 980 981 mmun_start = haddr; 982 mmun_end = haddr + HPAGE_PMD_SIZE; 983 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 984 985 spin_lock(&mm->page_table_lock); 986 if (unlikely(!pmd_same(*pmd, orig_pmd))) 987 goto out_free_page; 988 989 pmdp_clear_flush(vma, haddr, pmd); 990 /* leave pmd empty until pte is filled */ 991 992 pgtable = pgtable_trans_huge_withdraw(mm); 993 pmd_populate(mm, &_pmd, pgtable); 994 995 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 996 pte_t *pte, entry; 997 if (haddr == (address & PAGE_MASK)) { 998 entry = mk_pte(page, vma->vm_page_prot); 999 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1000 page_add_new_anon_rmap(page, vma, haddr); 1001 } else { 1002 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 1003 entry = pte_mkspecial(entry); 1004 } 1005 pte = pte_offset_map(&_pmd, haddr); 1006 VM_BUG_ON(!pte_none(*pte)); 1007 set_pte_at(mm, haddr, pte, entry); 1008 pte_unmap(pte); 1009 } 1010 smp_wmb(); /* make pte visible before pmd */ 1011 pmd_populate(mm, pmd, pgtable); 1012 spin_unlock(&mm->page_table_lock); 1013 put_huge_zero_page(); 1014 inc_mm_counter(mm, MM_ANONPAGES); 1015 1016 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1017 1018 ret |= VM_FAULT_WRITE; 1019 out: 1020 return ret; 1021 out_free_page: 1022 spin_unlock(&mm->page_table_lock); 1023 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1024 mem_cgroup_uncharge_page(page); 1025 put_page(page); 1026 goto out; 1027 } 1028 1029 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 1030 struct vm_area_struct *vma, 1031 unsigned long address, 1032 pmd_t *pmd, pmd_t orig_pmd, 1033 struct page *page, 1034 unsigned long haddr) 1035 { 1036 pgtable_t pgtable; 1037 pmd_t _pmd; 1038 int ret = 0, i; 1039 struct page **pages; 1040 unsigned long mmun_start; /* For mmu_notifiers */ 1041 unsigned long mmun_end; /* For mmu_notifiers */ 1042 1043 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 1044 GFP_KERNEL); 1045 if (unlikely(!pages)) { 1046 ret |= VM_FAULT_OOM; 1047 goto out; 1048 } 1049 1050 for (i = 0; i < HPAGE_PMD_NR; i++) { 1051 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 1052 __GFP_OTHER_NODE, 1053 vma, address, page_to_nid(page)); 1054 if (unlikely(!pages[i] || 1055 mem_cgroup_newpage_charge(pages[i], mm, 1056 GFP_KERNEL))) { 1057 if (pages[i]) 1058 put_page(pages[i]); 1059 mem_cgroup_uncharge_start(); 1060 while (--i >= 0) { 1061 mem_cgroup_uncharge_page(pages[i]); 1062 put_page(pages[i]); 1063 } 1064 mem_cgroup_uncharge_end(); 1065 kfree(pages); 1066 ret |= VM_FAULT_OOM; 1067 goto out; 1068 } 1069 } 1070 1071 for (i = 0; i < HPAGE_PMD_NR; i++) { 1072 copy_user_highpage(pages[i], page + i, 1073 haddr + PAGE_SIZE * i, vma); 1074 __SetPageUptodate(pages[i]); 1075 cond_resched(); 1076 } 1077 1078 mmun_start = haddr; 1079 mmun_end = haddr + HPAGE_PMD_SIZE; 1080 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1081 1082 spin_lock(&mm->page_table_lock); 1083 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1084 goto out_free_pages; 1085 VM_BUG_ON(!PageHead(page)); 1086 1087 pmdp_clear_flush(vma, haddr, pmd); 1088 /* leave pmd empty until pte is filled */ 1089 1090 pgtable = pgtable_trans_huge_withdraw(mm); 1091 pmd_populate(mm, &_pmd, pgtable); 1092 1093 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1094 pte_t *pte, entry; 1095 entry = mk_pte(pages[i], vma->vm_page_prot); 1096 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1097 page_add_new_anon_rmap(pages[i], vma, haddr); 1098 pte = pte_offset_map(&_pmd, haddr); 1099 VM_BUG_ON(!pte_none(*pte)); 1100 set_pte_at(mm, haddr, pte, entry); 1101 pte_unmap(pte); 1102 } 1103 kfree(pages); 1104 1105 smp_wmb(); /* make pte visible before pmd */ 1106 pmd_populate(mm, pmd, pgtable); 1107 page_remove_rmap(page); 1108 spin_unlock(&mm->page_table_lock); 1109 1110 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1111 1112 ret |= VM_FAULT_WRITE; 1113 put_page(page); 1114 1115 out: 1116 return ret; 1117 1118 out_free_pages: 1119 spin_unlock(&mm->page_table_lock); 1120 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1121 mem_cgroup_uncharge_start(); 1122 for (i = 0; i < HPAGE_PMD_NR; i++) { 1123 mem_cgroup_uncharge_page(pages[i]); 1124 put_page(pages[i]); 1125 } 1126 mem_cgroup_uncharge_end(); 1127 kfree(pages); 1128 goto out; 1129 } 1130 1131 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 1132 unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 1133 { 1134 int ret = 0; 1135 struct page *page = NULL, *new_page; 1136 unsigned long haddr; 1137 unsigned long mmun_start; /* For mmu_notifiers */ 1138 unsigned long mmun_end; /* For mmu_notifiers */ 1139 1140 VM_BUG_ON(!vma->anon_vma); 1141 haddr = address & HPAGE_PMD_MASK; 1142 if (is_huge_zero_pmd(orig_pmd)) 1143 goto alloc; 1144 spin_lock(&mm->page_table_lock); 1145 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1146 goto out_unlock; 1147 1148 page = pmd_page(orig_pmd); 1149 VM_BUG_ON(!PageCompound(page) || !PageHead(page)); 1150 if (page_mapcount(page) == 1) { 1151 pmd_t entry; 1152 entry = pmd_mkyoung(orig_pmd); 1153 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1154 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 1155 update_mmu_cache_pmd(vma, address, pmd); 1156 ret |= VM_FAULT_WRITE; 1157 goto out_unlock; 1158 } 1159 get_page(page); 1160 spin_unlock(&mm->page_table_lock); 1161 alloc: 1162 if (transparent_hugepage_enabled(vma) && 1163 !transparent_hugepage_debug_cow()) 1164 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 1165 vma, haddr, numa_node_id(), 0); 1166 else 1167 new_page = NULL; 1168 1169 if (unlikely(!new_page)) { 1170 count_vm_event(THP_FAULT_FALLBACK); 1171 if (is_huge_zero_pmd(orig_pmd)) { 1172 ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, 1173 address, pmd, orig_pmd, haddr); 1174 } else { 1175 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 1176 pmd, orig_pmd, page, haddr); 1177 if (ret & VM_FAULT_OOM) 1178 split_huge_page(page); 1179 put_page(page); 1180 } 1181 goto out; 1182 } 1183 count_vm_event(THP_FAULT_ALLOC); 1184 1185 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1186 put_page(new_page); 1187 if (page) { 1188 split_huge_page(page); 1189 put_page(page); 1190 } 1191 ret |= VM_FAULT_OOM; 1192 goto out; 1193 } 1194 1195 if (is_huge_zero_pmd(orig_pmd)) 1196 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 1197 else 1198 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 1199 __SetPageUptodate(new_page); 1200 1201 mmun_start = haddr; 1202 mmun_end = haddr + HPAGE_PMD_SIZE; 1203 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1204 1205 spin_lock(&mm->page_table_lock); 1206 if (page) 1207 put_page(page); 1208 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1209 spin_unlock(&mm->page_table_lock); 1210 mem_cgroup_uncharge_page(new_page); 1211 put_page(new_page); 1212 goto out_mn; 1213 } else { 1214 pmd_t entry; 1215 entry = mk_huge_pmd(new_page, vma); 1216 pmdp_clear_flush(vma, haddr, pmd); 1217 page_add_new_anon_rmap(new_page, vma, haddr); 1218 set_pmd_at(mm, haddr, pmd, entry); 1219 update_mmu_cache_pmd(vma, address, pmd); 1220 if (is_huge_zero_pmd(orig_pmd)) { 1221 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1222 put_huge_zero_page(); 1223 } else { 1224 VM_BUG_ON(!PageHead(page)); 1225 page_remove_rmap(page); 1226 put_page(page); 1227 } 1228 ret |= VM_FAULT_WRITE; 1229 } 1230 spin_unlock(&mm->page_table_lock); 1231 out_mn: 1232 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1233 out: 1234 return ret; 1235 out_unlock: 1236 spin_unlock(&mm->page_table_lock); 1237 return ret; 1238 } 1239 1240 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1241 unsigned long addr, 1242 pmd_t *pmd, 1243 unsigned int flags) 1244 { 1245 struct mm_struct *mm = vma->vm_mm; 1246 struct page *page = NULL; 1247 1248 assert_spin_locked(&mm->page_table_lock); 1249 1250 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1251 goto out; 1252 1253 /* Avoid dumping huge zero page */ 1254 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1255 return ERR_PTR(-EFAULT); 1256 1257 page = pmd_page(*pmd); 1258 VM_BUG_ON(!PageHead(page)); 1259 if (flags & FOLL_TOUCH) { 1260 pmd_t _pmd; 1261 /* 1262 * We should set the dirty bit only for FOLL_WRITE but 1263 * for now the dirty bit in the pmd is meaningless. 1264 * And if the dirty bit will become meaningful and 1265 * we'll only set it with FOLL_WRITE, an atomic 1266 * set_bit will be required on the pmd to set the 1267 * young bit, instead of the current set_pmd_at. 1268 */ 1269 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 1270 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); 1271 } 1272 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1273 if (page->mapping && trylock_page(page)) { 1274 lru_add_drain(); 1275 if (page->mapping) 1276 mlock_vma_page(page); 1277 unlock_page(page); 1278 } 1279 } 1280 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1281 VM_BUG_ON(!PageCompound(page)); 1282 if (flags & FOLL_GET) 1283 get_page_foll(page); 1284 1285 out: 1286 return page; 1287 } 1288 1289 /* NUMA hinting page fault entry point for trans huge pmds */ 1290 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 1291 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1292 { 1293 struct page *page; 1294 unsigned long haddr = addr & HPAGE_PMD_MASK; 1295 int target_nid; 1296 int current_nid = -1; 1297 bool migrated; 1298 1299 spin_lock(&mm->page_table_lock); 1300 if (unlikely(!pmd_same(pmd, *pmdp))) 1301 goto out_unlock; 1302 1303 page = pmd_page(pmd); 1304 get_page(page); 1305 current_nid = page_to_nid(page); 1306 count_vm_numa_event(NUMA_HINT_FAULTS); 1307 if (current_nid == numa_node_id()) 1308 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1309 1310 target_nid = mpol_misplaced(page, vma, haddr); 1311 if (target_nid == -1) { 1312 put_page(page); 1313 goto clear_pmdnuma; 1314 } 1315 1316 /* Acquire the page lock to serialise THP migrations */ 1317 spin_unlock(&mm->page_table_lock); 1318 lock_page(page); 1319 1320 /* Confirm the PTE did not while locked */ 1321 spin_lock(&mm->page_table_lock); 1322 if (unlikely(!pmd_same(pmd, *pmdp))) { 1323 unlock_page(page); 1324 put_page(page); 1325 goto out_unlock; 1326 } 1327 spin_unlock(&mm->page_table_lock); 1328 1329 /* Migrate the THP to the requested node */ 1330 migrated = migrate_misplaced_transhuge_page(mm, vma, 1331 pmdp, pmd, addr, page, target_nid); 1332 if (!migrated) 1333 goto check_same; 1334 1335 task_numa_fault(target_nid, HPAGE_PMD_NR, true); 1336 return 0; 1337 1338 check_same: 1339 spin_lock(&mm->page_table_lock); 1340 if (unlikely(!pmd_same(pmd, *pmdp))) 1341 goto out_unlock; 1342 clear_pmdnuma: 1343 pmd = pmd_mknonnuma(pmd); 1344 set_pmd_at(mm, haddr, pmdp, pmd); 1345 VM_BUG_ON(pmd_numa(*pmdp)); 1346 update_mmu_cache_pmd(vma, addr, pmdp); 1347 out_unlock: 1348 spin_unlock(&mm->page_table_lock); 1349 if (current_nid != -1) 1350 task_numa_fault(current_nid, HPAGE_PMD_NR, false); 1351 return 0; 1352 } 1353 1354 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1355 pmd_t *pmd, unsigned long addr) 1356 { 1357 int ret = 0; 1358 1359 if (__pmd_trans_huge_lock(pmd, vma) == 1) { 1360 struct page *page; 1361 pgtable_t pgtable; 1362 pmd_t orig_pmd; 1363 pgtable = pgtable_trans_huge_withdraw(tlb->mm); 1364 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); 1365 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1366 if (is_huge_zero_pmd(orig_pmd)) { 1367 tlb->mm->nr_ptes--; 1368 spin_unlock(&tlb->mm->page_table_lock); 1369 put_huge_zero_page(); 1370 } else { 1371 page = pmd_page(orig_pmd); 1372 page_remove_rmap(page); 1373 VM_BUG_ON(page_mapcount(page) < 0); 1374 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1375 VM_BUG_ON(!PageHead(page)); 1376 tlb->mm->nr_ptes--; 1377 spin_unlock(&tlb->mm->page_table_lock); 1378 tlb_remove_page(tlb, page); 1379 } 1380 pte_free(tlb->mm, pgtable); 1381 ret = 1; 1382 } 1383 return ret; 1384 } 1385 1386 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1387 unsigned long addr, unsigned long end, 1388 unsigned char *vec) 1389 { 1390 int ret = 0; 1391 1392 if (__pmd_trans_huge_lock(pmd, vma) == 1) { 1393 /* 1394 * All logical pages in the range are present 1395 * if backed by a huge page. 1396 */ 1397 spin_unlock(&vma->vm_mm->page_table_lock); 1398 memset(vec, 1, (end - addr) >> PAGE_SHIFT); 1399 ret = 1; 1400 } 1401 1402 return ret; 1403 } 1404 1405 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 1406 unsigned long old_addr, 1407 unsigned long new_addr, unsigned long old_end, 1408 pmd_t *old_pmd, pmd_t *new_pmd) 1409 { 1410 int ret = 0; 1411 pmd_t pmd; 1412 1413 struct mm_struct *mm = vma->vm_mm; 1414 1415 if ((old_addr & ~HPAGE_PMD_MASK) || 1416 (new_addr & ~HPAGE_PMD_MASK) || 1417 old_end - old_addr < HPAGE_PMD_SIZE || 1418 (new_vma->vm_flags & VM_NOHUGEPAGE)) 1419 goto out; 1420 1421 /* 1422 * The destination pmd shouldn't be established, free_pgtables() 1423 * should have release it. 1424 */ 1425 if (WARN_ON(!pmd_none(*new_pmd))) { 1426 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 1427 goto out; 1428 } 1429 1430 ret = __pmd_trans_huge_lock(old_pmd, vma); 1431 if (ret == 1) { 1432 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 1433 VM_BUG_ON(!pmd_none(*new_pmd)); 1434 set_pmd_at(mm, new_addr, new_pmd, pmd); 1435 spin_unlock(&mm->page_table_lock); 1436 } 1437 out: 1438 return ret; 1439 } 1440 1441 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1442 unsigned long addr, pgprot_t newprot, int prot_numa) 1443 { 1444 struct mm_struct *mm = vma->vm_mm; 1445 int ret = 0; 1446 1447 if (__pmd_trans_huge_lock(pmd, vma) == 1) { 1448 pmd_t entry; 1449 entry = pmdp_get_and_clear(mm, addr, pmd); 1450 if (!prot_numa) { 1451 entry = pmd_modify(entry, newprot); 1452 BUG_ON(pmd_write(entry)); 1453 } else { 1454 struct page *page = pmd_page(*pmd); 1455 1456 /* only check non-shared pages */ 1457 if (page_mapcount(page) == 1 && 1458 !pmd_numa(*pmd)) { 1459 entry = pmd_mknuma(entry); 1460 } 1461 } 1462 set_pmd_at(mm, addr, pmd, entry); 1463 spin_unlock(&vma->vm_mm->page_table_lock); 1464 ret = 1; 1465 } 1466 1467 return ret; 1468 } 1469 1470 /* 1471 * Returns 1 if a given pmd maps a stable (not under splitting) thp. 1472 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. 1473 * 1474 * Note that if it returns 1, this routine returns without unlocking page 1475 * table locks. So callers must unlock them. 1476 */ 1477 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) 1478 { 1479 spin_lock(&vma->vm_mm->page_table_lock); 1480 if (likely(pmd_trans_huge(*pmd))) { 1481 if (unlikely(pmd_trans_splitting(*pmd))) { 1482 spin_unlock(&vma->vm_mm->page_table_lock); 1483 wait_split_huge_page(vma->anon_vma, pmd); 1484 return -1; 1485 } else { 1486 /* Thp mapped by 'pmd' is stable, so we can 1487 * handle it as it is. */ 1488 return 1; 1489 } 1490 } 1491 spin_unlock(&vma->vm_mm->page_table_lock); 1492 return 0; 1493 } 1494 1495 pmd_t *page_check_address_pmd(struct page *page, 1496 struct mm_struct *mm, 1497 unsigned long address, 1498 enum page_check_address_pmd_flag flag) 1499 { 1500 pmd_t *pmd, *ret = NULL; 1501 1502 if (address & ~HPAGE_PMD_MASK) 1503 goto out; 1504 1505 pmd = mm_find_pmd(mm, address); 1506 if (!pmd) 1507 goto out; 1508 if (pmd_none(*pmd)) 1509 goto out; 1510 if (pmd_page(*pmd) != page) 1511 goto out; 1512 /* 1513 * split_vma() may create temporary aliased mappings. There is 1514 * no risk as long as all huge pmd are found and have their 1515 * splitting bit set before __split_huge_page_refcount 1516 * runs. Finding the same huge pmd more than once during the 1517 * same rmap walk is not a problem. 1518 */ 1519 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 1520 pmd_trans_splitting(*pmd)) 1521 goto out; 1522 if (pmd_trans_huge(*pmd)) { 1523 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 1524 !pmd_trans_splitting(*pmd)); 1525 ret = pmd; 1526 } 1527 out: 1528 return ret; 1529 } 1530 1531 static int __split_huge_page_splitting(struct page *page, 1532 struct vm_area_struct *vma, 1533 unsigned long address) 1534 { 1535 struct mm_struct *mm = vma->vm_mm; 1536 pmd_t *pmd; 1537 int ret = 0; 1538 /* For mmu_notifiers */ 1539 const unsigned long mmun_start = address; 1540 const unsigned long mmun_end = address + HPAGE_PMD_SIZE; 1541 1542 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1543 spin_lock(&mm->page_table_lock); 1544 pmd = page_check_address_pmd(page, mm, address, 1545 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); 1546 if (pmd) { 1547 /* 1548 * We can't temporarily set the pmd to null in order 1549 * to split it, the pmd must remain marked huge at all 1550 * times or the VM won't take the pmd_trans_huge paths 1551 * and it won't wait on the anon_vma->root->rwsem to 1552 * serialize against split_huge_page*. 1553 */ 1554 pmdp_splitting_flush(vma, address, pmd); 1555 ret = 1; 1556 } 1557 spin_unlock(&mm->page_table_lock); 1558 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1559 1560 return ret; 1561 } 1562 1563 static void __split_huge_page_refcount(struct page *page) 1564 { 1565 int i; 1566 struct zone *zone = page_zone(page); 1567 struct lruvec *lruvec; 1568 int tail_count = 0; 1569 1570 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1571 spin_lock_irq(&zone->lru_lock); 1572 lruvec = mem_cgroup_page_lruvec(page, zone); 1573 1574 compound_lock(page); 1575 /* complete memcg works before add pages to LRU */ 1576 mem_cgroup_split_huge_fixup(page); 1577 1578 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 1579 struct page *page_tail = page + i; 1580 1581 /* tail_page->_mapcount cannot change */ 1582 BUG_ON(page_mapcount(page_tail) < 0); 1583 tail_count += page_mapcount(page_tail); 1584 /* check for overflow */ 1585 BUG_ON(tail_count < 0); 1586 BUG_ON(atomic_read(&page_tail->_count) != 0); 1587 /* 1588 * tail_page->_count is zero and not changing from 1589 * under us. But get_page_unless_zero() may be running 1590 * from under us on the tail_page. If we used 1591 * atomic_set() below instead of atomic_add(), we 1592 * would then run atomic_set() concurrently with 1593 * get_page_unless_zero(), and atomic_set() is 1594 * implemented in C not using locked ops. spin_unlock 1595 * on x86 sometime uses locked ops because of PPro 1596 * errata 66, 92, so unless somebody can guarantee 1597 * atomic_set() here would be safe on all archs (and 1598 * not only on x86), it's safer to use atomic_add(). 1599 */ 1600 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, 1601 &page_tail->_count); 1602 1603 /* after clearing PageTail the gup refcount can be released */ 1604 smp_mb(); 1605 1606 /* 1607 * retain hwpoison flag of the poisoned tail page: 1608 * fix for the unsuitable process killed on Guest Machine(KVM) 1609 * by the memory-failure. 1610 */ 1611 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; 1612 page_tail->flags |= (page->flags & 1613 ((1L << PG_referenced) | 1614 (1L << PG_swapbacked) | 1615 (1L << PG_mlocked) | 1616 (1L << PG_uptodate))); 1617 page_tail->flags |= (1L << PG_dirty); 1618 1619 /* clear PageTail before overwriting first_page */ 1620 smp_wmb(); 1621 1622 /* 1623 * __split_huge_page_splitting() already set the 1624 * splitting bit in all pmd that could map this 1625 * hugepage, that will ensure no CPU can alter the 1626 * mapcount on the head page. The mapcount is only 1627 * accounted in the head page and it has to be 1628 * transferred to all tail pages in the below code. So 1629 * for this code to be safe, the split the mapcount 1630 * can't change. But that doesn't mean userland can't 1631 * keep changing and reading the page contents while 1632 * we transfer the mapcount, so the pmd splitting 1633 * status is achieved setting a reserved bit in the 1634 * pmd, not by clearing the present bit. 1635 */ 1636 page_tail->_mapcount = page->_mapcount; 1637 1638 BUG_ON(page_tail->mapping); 1639 page_tail->mapping = page->mapping; 1640 1641 page_tail->index = page->index + i; 1642 page_nid_xchg_last(page_tail, page_nid_last(page)); 1643 1644 BUG_ON(!PageAnon(page_tail)); 1645 BUG_ON(!PageUptodate(page_tail)); 1646 BUG_ON(!PageDirty(page_tail)); 1647 BUG_ON(!PageSwapBacked(page_tail)); 1648 1649 lru_add_page_tail(page, page_tail, lruvec); 1650 } 1651 atomic_sub(tail_count, &page->_count); 1652 BUG_ON(atomic_read(&page->_count) <= 0); 1653 1654 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); 1655 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 1656 1657 ClearPageCompound(page); 1658 compound_unlock(page); 1659 spin_unlock_irq(&zone->lru_lock); 1660 1661 for (i = 1; i < HPAGE_PMD_NR; i++) { 1662 struct page *page_tail = page + i; 1663 BUG_ON(page_count(page_tail) <= 0); 1664 /* 1665 * Tail pages may be freed if there wasn't any mapping 1666 * like if add_to_swap() is running on a lru page that 1667 * had its mapping zapped. And freeing these pages 1668 * requires taking the lru_lock so we do the put_page 1669 * of the tail pages after the split is complete. 1670 */ 1671 put_page(page_tail); 1672 } 1673 1674 /* 1675 * Only the head page (now become a regular page) is required 1676 * to be pinned by the caller. 1677 */ 1678 BUG_ON(page_count(page) <= 0); 1679 } 1680 1681 static int __split_huge_page_map(struct page *page, 1682 struct vm_area_struct *vma, 1683 unsigned long address) 1684 { 1685 struct mm_struct *mm = vma->vm_mm; 1686 pmd_t *pmd, _pmd; 1687 int ret = 0, i; 1688 pgtable_t pgtable; 1689 unsigned long haddr; 1690 1691 spin_lock(&mm->page_table_lock); 1692 pmd = page_check_address_pmd(page, mm, address, 1693 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); 1694 if (pmd) { 1695 pgtable = pgtable_trans_huge_withdraw(mm); 1696 pmd_populate(mm, &_pmd, pgtable); 1697 1698 haddr = address; 1699 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1700 pte_t *pte, entry; 1701 BUG_ON(PageCompound(page+i)); 1702 entry = mk_pte(page + i, vma->vm_page_prot); 1703 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1704 if (!pmd_write(*pmd)) 1705 entry = pte_wrprotect(entry); 1706 else 1707 BUG_ON(page_mapcount(page) != 1); 1708 if (!pmd_young(*pmd)) 1709 entry = pte_mkold(entry); 1710 if (pmd_numa(*pmd)) 1711 entry = pte_mknuma(entry); 1712 pte = pte_offset_map(&_pmd, haddr); 1713 BUG_ON(!pte_none(*pte)); 1714 set_pte_at(mm, haddr, pte, entry); 1715 pte_unmap(pte); 1716 } 1717 1718 smp_wmb(); /* make pte visible before pmd */ 1719 /* 1720 * Up to this point the pmd is present and huge and 1721 * userland has the whole access to the hugepage 1722 * during the split (which happens in place). If we 1723 * overwrite the pmd with the not-huge version 1724 * pointing to the pte here (which of course we could 1725 * if all CPUs were bug free), userland could trigger 1726 * a small page size TLB miss on the small sized TLB 1727 * while the hugepage TLB entry is still established 1728 * in the huge TLB. Some CPU doesn't like that. See 1729 * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 1730 * Erratum 383 on page 93. Intel should be safe but is 1731 * also warns that it's only safe if the permission 1732 * and cache attributes of the two entries loaded in 1733 * the two TLB is identical (which should be the case 1734 * here). But it is generally safer to never allow 1735 * small and huge TLB entries for the same virtual 1736 * address to be loaded simultaneously. So instead of 1737 * doing "pmd_populate(); flush_tlb_range();" we first 1738 * mark the current pmd notpresent (atomically because 1739 * here the pmd_trans_huge and pmd_trans_splitting 1740 * must remain set at all times on the pmd until the 1741 * split is complete for this pmd), then we flush the 1742 * SMP TLB and finally we write the non-huge version 1743 * of the pmd entry with pmd_populate. 1744 */ 1745 pmdp_invalidate(vma, address, pmd); 1746 pmd_populate(mm, pmd, pgtable); 1747 ret = 1; 1748 } 1749 spin_unlock(&mm->page_table_lock); 1750 1751 return ret; 1752 } 1753 1754 /* must be called with anon_vma->root->rwsem held */ 1755 static void __split_huge_page(struct page *page, 1756 struct anon_vma *anon_vma) 1757 { 1758 int mapcount, mapcount2; 1759 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1760 struct anon_vma_chain *avc; 1761 1762 BUG_ON(!PageHead(page)); 1763 BUG_ON(PageTail(page)); 1764 1765 mapcount = 0; 1766 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1767 struct vm_area_struct *vma = avc->vma; 1768 unsigned long addr = vma_address(page, vma); 1769 BUG_ON(is_vma_temporary_stack(vma)); 1770 mapcount += __split_huge_page_splitting(page, vma, addr); 1771 } 1772 /* 1773 * It is critical that new vmas are added to the tail of the 1774 * anon_vma list. This guarantes that if copy_huge_pmd() runs 1775 * and establishes a child pmd before 1776 * __split_huge_page_splitting() freezes the parent pmd (so if 1777 * we fail to prevent copy_huge_pmd() from running until the 1778 * whole __split_huge_page() is complete), we will still see 1779 * the newly established pmd of the child later during the 1780 * walk, to be able to set it as pmd_trans_splitting too. 1781 */ 1782 if (mapcount != page_mapcount(page)) 1783 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1784 mapcount, page_mapcount(page)); 1785 BUG_ON(mapcount != page_mapcount(page)); 1786 1787 __split_huge_page_refcount(page); 1788 1789 mapcount2 = 0; 1790 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1791 struct vm_area_struct *vma = avc->vma; 1792 unsigned long addr = vma_address(page, vma); 1793 BUG_ON(is_vma_temporary_stack(vma)); 1794 mapcount2 += __split_huge_page_map(page, vma, addr); 1795 } 1796 if (mapcount != mapcount2) 1797 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", 1798 mapcount, mapcount2, page_mapcount(page)); 1799 BUG_ON(mapcount != mapcount2); 1800 } 1801 1802 int split_huge_page(struct page *page) 1803 { 1804 struct anon_vma *anon_vma; 1805 int ret = 1; 1806 1807 BUG_ON(is_huge_zero_pfn(page_to_pfn(page))); 1808 BUG_ON(!PageAnon(page)); 1809 1810 /* 1811 * The caller does not necessarily hold an mmap_sem that would prevent 1812 * the anon_vma disappearing so we first we take a reference to it 1813 * and then lock the anon_vma for write. This is similar to 1814 * page_lock_anon_vma_read except the write lock is taken to serialise 1815 * against parallel split or collapse operations. 1816 */ 1817 anon_vma = page_get_anon_vma(page); 1818 if (!anon_vma) 1819 goto out; 1820 anon_vma_lock_write(anon_vma); 1821 1822 ret = 0; 1823 if (!PageCompound(page)) 1824 goto out_unlock; 1825 1826 BUG_ON(!PageSwapBacked(page)); 1827 __split_huge_page(page, anon_vma); 1828 count_vm_event(THP_SPLIT); 1829 1830 BUG_ON(PageCompound(page)); 1831 out_unlock: 1832 anon_vma_unlock_write(anon_vma); 1833 put_anon_vma(anon_vma); 1834 out: 1835 return ret; 1836 } 1837 1838 #define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) 1839 1840 int hugepage_madvise(struct vm_area_struct *vma, 1841 unsigned long *vm_flags, int advice) 1842 { 1843 struct mm_struct *mm = vma->vm_mm; 1844 1845 switch (advice) { 1846 case MADV_HUGEPAGE: 1847 /* 1848 * Be somewhat over-protective like KSM for now! 1849 */ 1850 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 1851 return -EINVAL; 1852 if (mm->def_flags & VM_NOHUGEPAGE) 1853 return -EINVAL; 1854 *vm_flags &= ~VM_NOHUGEPAGE; 1855 *vm_flags |= VM_HUGEPAGE; 1856 /* 1857 * If the vma become good for khugepaged to scan, 1858 * register it here without waiting a page fault that 1859 * may not happen any time soon. 1860 */ 1861 if (unlikely(khugepaged_enter_vma_merge(vma))) 1862 return -ENOMEM; 1863 break; 1864 case MADV_NOHUGEPAGE: 1865 /* 1866 * Be somewhat over-protective like KSM for now! 1867 */ 1868 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 1869 return -EINVAL; 1870 *vm_flags &= ~VM_HUGEPAGE; 1871 *vm_flags |= VM_NOHUGEPAGE; 1872 /* 1873 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 1874 * this vma even if we leave the mm registered in khugepaged if 1875 * it got registered before VM_NOHUGEPAGE was set. 1876 */ 1877 break; 1878 } 1879 1880 return 0; 1881 } 1882 1883 static int __init khugepaged_slab_init(void) 1884 { 1885 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 1886 sizeof(struct mm_slot), 1887 __alignof__(struct mm_slot), 0, NULL); 1888 if (!mm_slot_cache) 1889 return -ENOMEM; 1890 1891 return 0; 1892 } 1893 1894 static inline struct mm_slot *alloc_mm_slot(void) 1895 { 1896 if (!mm_slot_cache) /* initialization failed */ 1897 return NULL; 1898 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 1899 } 1900 1901 static inline void free_mm_slot(struct mm_slot *mm_slot) 1902 { 1903 kmem_cache_free(mm_slot_cache, mm_slot); 1904 } 1905 1906 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1907 { 1908 struct mm_slot *mm_slot; 1909 1910 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 1911 if (mm == mm_slot->mm) 1912 return mm_slot; 1913 1914 return NULL; 1915 } 1916 1917 static void insert_to_mm_slots_hash(struct mm_struct *mm, 1918 struct mm_slot *mm_slot) 1919 { 1920 mm_slot->mm = mm; 1921 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 1922 } 1923 1924 static inline int khugepaged_test_exit(struct mm_struct *mm) 1925 { 1926 return atomic_read(&mm->mm_users) == 0; 1927 } 1928 1929 int __khugepaged_enter(struct mm_struct *mm) 1930 { 1931 struct mm_slot *mm_slot; 1932 int wakeup; 1933 1934 mm_slot = alloc_mm_slot(); 1935 if (!mm_slot) 1936 return -ENOMEM; 1937 1938 /* __khugepaged_exit() must not run from under us */ 1939 VM_BUG_ON(khugepaged_test_exit(mm)); 1940 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 1941 free_mm_slot(mm_slot); 1942 return 0; 1943 } 1944 1945 spin_lock(&khugepaged_mm_lock); 1946 insert_to_mm_slots_hash(mm, mm_slot); 1947 /* 1948 * Insert just behind the scanning cursor, to let the area settle 1949 * down a little. 1950 */ 1951 wakeup = list_empty(&khugepaged_scan.mm_head); 1952 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 1953 spin_unlock(&khugepaged_mm_lock); 1954 1955 atomic_inc(&mm->mm_count); 1956 if (wakeup) 1957 wake_up_interruptible(&khugepaged_wait); 1958 1959 return 0; 1960 } 1961 1962 int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 1963 { 1964 unsigned long hstart, hend; 1965 if (!vma->anon_vma) 1966 /* 1967 * Not yet faulted in so we will register later in the 1968 * page fault if needed. 1969 */ 1970 return 0; 1971 if (vma->vm_ops) 1972 /* khugepaged not yet working on file or special mappings */ 1973 return 0; 1974 VM_BUG_ON(vma->vm_flags & VM_NO_THP); 1975 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1976 hend = vma->vm_end & HPAGE_PMD_MASK; 1977 if (hstart < hend) 1978 return khugepaged_enter(vma); 1979 return 0; 1980 } 1981 1982 void __khugepaged_exit(struct mm_struct *mm) 1983 { 1984 struct mm_slot *mm_slot; 1985 int free = 0; 1986 1987 spin_lock(&khugepaged_mm_lock); 1988 mm_slot = get_mm_slot(mm); 1989 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 1990 hash_del(&mm_slot->hash); 1991 list_del(&mm_slot->mm_node); 1992 free = 1; 1993 } 1994 spin_unlock(&khugepaged_mm_lock); 1995 1996 if (free) { 1997 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1998 free_mm_slot(mm_slot); 1999 mmdrop(mm); 2000 } else if (mm_slot) { 2001 /* 2002 * This is required to serialize against 2003 * khugepaged_test_exit() (which is guaranteed to run 2004 * under mmap sem read mode). Stop here (after we 2005 * return all pagetables will be destroyed) until 2006 * khugepaged has finished working on the pagetables 2007 * under the mmap_sem. 2008 */ 2009 down_write(&mm->mmap_sem); 2010 up_write(&mm->mmap_sem); 2011 } 2012 } 2013 2014 static void release_pte_page(struct page *page) 2015 { 2016 /* 0 stands for page_is_file_cache(page) == false */ 2017 dec_zone_page_state(page, NR_ISOLATED_ANON + 0); 2018 unlock_page(page); 2019 putback_lru_page(page); 2020 } 2021 2022 static void release_pte_pages(pte_t *pte, pte_t *_pte) 2023 { 2024 while (--_pte >= pte) { 2025 pte_t pteval = *_pte; 2026 if (!pte_none(pteval)) 2027 release_pte_page(pte_page(pteval)); 2028 } 2029 } 2030 2031 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 2032 unsigned long address, 2033 pte_t *pte) 2034 { 2035 struct page *page; 2036 pte_t *_pte; 2037 int referenced = 0, none = 0; 2038 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2039 _pte++, address += PAGE_SIZE) { 2040 pte_t pteval = *_pte; 2041 if (pte_none(pteval)) { 2042 if (++none <= khugepaged_max_ptes_none) 2043 continue; 2044 else 2045 goto out; 2046 } 2047 if (!pte_present(pteval) || !pte_write(pteval)) 2048 goto out; 2049 page = vm_normal_page(vma, address, pteval); 2050 if (unlikely(!page)) 2051 goto out; 2052 2053 VM_BUG_ON(PageCompound(page)); 2054 BUG_ON(!PageAnon(page)); 2055 VM_BUG_ON(!PageSwapBacked(page)); 2056 2057 /* cannot use mapcount: can't collapse if there's a gup pin */ 2058 if (page_count(page) != 1) 2059 goto out; 2060 /* 2061 * We can do it before isolate_lru_page because the 2062 * page can't be freed from under us. NOTE: PG_lock 2063 * is needed to serialize against split_huge_page 2064 * when invoked from the VM. 2065 */ 2066 if (!trylock_page(page)) 2067 goto out; 2068 /* 2069 * Isolate the page to avoid collapsing an hugepage 2070 * currently in use by the VM. 2071 */ 2072 if (isolate_lru_page(page)) { 2073 unlock_page(page); 2074 goto out; 2075 } 2076 /* 0 stands for page_is_file_cache(page) == false */ 2077 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2078 VM_BUG_ON(!PageLocked(page)); 2079 VM_BUG_ON(PageLRU(page)); 2080 2081 /* If there is no mapped pte young don't collapse the page */ 2082 if (pte_young(pteval) || PageReferenced(page) || 2083 mmu_notifier_test_young(vma->vm_mm, address)) 2084 referenced = 1; 2085 } 2086 if (likely(referenced)) 2087 return 1; 2088 out: 2089 release_pte_pages(pte, _pte); 2090 return 0; 2091 } 2092 2093 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 2094 struct vm_area_struct *vma, 2095 unsigned long address, 2096 spinlock_t *ptl) 2097 { 2098 pte_t *_pte; 2099 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 2100 pte_t pteval = *_pte; 2101 struct page *src_page; 2102 2103 if (pte_none(pteval)) { 2104 clear_user_highpage(page, address); 2105 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 2106 } else { 2107 src_page = pte_page(pteval); 2108 copy_user_highpage(page, src_page, address, vma); 2109 VM_BUG_ON(page_mapcount(src_page) != 1); 2110 release_pte_page(src_page); 2111 /* 2112 * ptl mostly unnecessary, but preempt has to 2113 * be disabled to update the per-cpu stats 2114 * inside page_remove_rmap(). 2115 */ 2116 spin_lock(ptl); 2117 /* 2118 * paravirt calls inside pte_clear here are 2119 * superfluous. 2120 */ 2121 pte_clear(vma->vm_mm, address, _pte); 2122 page_remove_rmap(src_page); 2123 spin_unlock(ptl); 2124 free_page_and_swap_cache(src_page); 2125 } 2126 2127 address += PAGE_SIZE; 2128 page++; 2129 } 2130 } 2131 2132 static void khugepaged_alloc_sleep(void) 2133 { 2134 wait_event_freezable_timeout(khugepaged_wait, false, 2135 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 2136 } 2137 2138 #ifdef CONFIG_NUMA 2139 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 2140 { 2141 if (IS_ERR(*hpage)) { 2142 if (!*wait) 2143 return false; 2144 2145 *wait = false; 2146 *hpage = NULL; 2147 khugepaged_alloc_sleep(); 2148 } else if (*hpage) { 2149 put_page(*hpage); 2150 *hpage = NULL; 2151 } 2152 2153 return true; 2154 } 2155 2156 static struct page 2157 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 2158 struct vm_area_struct *vma, unsigned long address, 2159 int node) 2160 { 2161 VM_BUG_ON(*hpage); 2162 /* 2163 * Allocate the page while the vma is still valid and under 2164 * the mmap_sem read mode so there is no memory allocation 2165 * later when we take the mmap_sem in write mode. This is more 2166 * friendly behavior (OTOH it may actually hide bugs) to 2167 * filesystems in userland with daemons allocating memory in 2168 * the userland I/O paths. Allocating memory with the 2169 * mmap_sem in read mode is good idea also to allow greater 2170 * scalability. 2171 */ 2172 *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 2173 node, __GFP_OTHER_NODE); 2174 2175 /* 2176 * After allocating the hugepage, release the mmap_sem read lock in 2177 * preparation for taking it in write mode. 2178 */ 2179 up_read(&mm->mmap_sem); 2180 if (unlikely(!*hpage)) { 2181 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2182 *hpage = ERR_PTR(-ENOMEM); 2183 return NULL; 2184 } 2185 2186 count_vm_event(THP_COLLAPSE_ALLOC); 2187 return *hpage; 2188 } 2189 #else 2190 static struct page *khugepaged_alloc_hugepage(bool *wait) 2191 { 2192 struct page *hpage; 2193 2194 do { 2195 hpage = alloc_hugepage(khugepaged_defrag()); 2196 if (!hpage) { 2197 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2198 if (!*wait) 2199 return NULL; 2200 2201 *wait = false; 2202 khugepaged_alloc_sleep(); 2203 } else 2204 count_vm_event(THP_COLLAPSE_ALLOC); 2205 } while (unlikely(!hpage) && likely(khugepaged_enabled())); 2206 2207 return hpage; 2208 } 2209 2210 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 2211 { 2212 if (!*hpage) 2213 *hpage = khugepaged_alloc_hugepage(wait); 2214 2215 if (unlikely(!*hpage)) 2216 return false; 2217 2218 return true; 2219 } 2220 2221 static struct page 2222 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 2223 struct vm_area_struct *vma, unsigned long address, 2224 int node) 2225 { 2226 up_read(&mm->mmap_sem); 2227 VM_BUG_ON(!*hpage); 2228 return *hpage; 2229 } 2230 #endif 2231 2232 static bool hugepage_vma_check(struct vm_area_struct *vma) 2233 { 2234 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 2235 (vma->vm_flags & VM_NOHUGEPAGE)) 2236 return false; 2237 2238 if (!vma->anon_vma || vma->vm_ops) 2239 return false; 2240 if (is_vma_temporary_stack(vma)) 2241 return false; 2242 VM_BUG_ON(vma->vm_flags & VM_NO_THP); 2243 return true; 2244 } 2245 2246 static void collapse_huge_page(struct mm_struct *mm, 2247 unsigned long address, 2248 struct page **hpage, 2249 struct vm_area_struct *vma, 2250 int node) 2251 { 2252 pmd_t *pmd, _pmd; 2253 pte_t *pte; 2254 pgtable_t pgtable; 2255 struct page *new_page; 2256 spinlock_t *ptl; 2257 int isolated; 2258 unsigned long hstart, hend; 2259 unsigned long mmun_start; /* For mmu_notifiers */ 2260 unsigned long mmun_end; /* For mmu_notifiers */ 2261 2262 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2263 2264 /* release the mmap_sem read lock. */ 2265 new_page = khugepaged_alloc_page(hpage, mm, vma, address, node); 2266 if (!new_page) 2267 return; 2268 2269 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) 2270 return; 2271 2272 /* 2273 * Prevent all access to pagetables with the exception of 2274 * gup_fast later hanlded by the ptep_clear_flush and the VM 2275 * handled by the anon_vma lock + PG_lock. 2276 */ 2277 down_write(&mm->mmap_sem); 2278 if (unlikely(khugepaged_test_exit(mm))) 2279 goto out; 2280 2281 vma = find_vma(mm, address); 2282 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2283 hend = vma->vm_end & HPAGE_PMD_MASK; 2284 if (address < hstart || address + HPAGE_PMD_SIZE > hend) 2285 goto out; 2286 if (!hugepage_vma_check(vma)) 2287 goto out; 2288 pmd = mm_find_pmd(mm, address); 2289 if (!pmd) 2290 goto out; 2291 if (pmd_trans_huge(*pmd)) 2292 goto out; 2293 2294 anon_vma_lock_write(vma->anon_vma); 2295 2296 pte = pte_offset_map(pmd, address); 2297 ptl = pte_lockptr(mm, pmd); 2298 2299 mmun_start = address; 2300 mmun_end = address + HPAGE_PMD_SIZE; 2301 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2302 spin_lock(&mm->page_table_lock); /* probably unnecessary */ 2303 /* 2304 * After this gup_fast can't run anymore. This also removes 2305 * any huge TLB entry from the CPU so we won't allow 2306 * huge and small TLB entries for the same virtual address 2307 * to avoid the risk of CPU bugs in that area. 2308 */ 2309 _pmd = pmdp_clear_flush(vma, address, pmd); 2310 spin_unlock(&mm->page_table_lock); 2311 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2312 2313 spin_lock(ptl); 2314 isolated = __collapse_huge_page_isolate(vma, address, pte); 2315 spin_unlock(ptl); 2316 2317 if (unlikely(!isolated)) { 2318 pte_unmap(pte); 2319 spin_lock(&mm->page_table_lock); 2320 BUG_ON(!pmd_none(*pmd)); 2321 set_pmd_at(mm, address, pmd, _pmd); 2322 spin_unlock(&mm->page_table_lock); 2323 anon_vma_unlock_write(vma->anon_vma); 2324 goto out; 2325 } 2326 2327 /* 2328 * All pages are isolated and locked so anon_vma rmap 2329 * can't run anymore. 2330 */ 2331 anon_vma_unlock_write(vma->anon_vma); 2332 2333 __collapse_huge_page_copy(pte, new_page, vma, address, ptl); 2334 pte_unmap(pte); 2335 __SetPageUptodate(new_page); 2336 pgtable = pmd_pgtable(_pmd); 2337 2338 _pmd = mk_huge_pmd(new_page, vma); 2339 2340 /* 2341 * spin_lock() below is not the equivalent of smp_wmb(), so 2342 * this is needed to avoid the copy_huge_page writes to become 2343 * visible after the set_pmd_at() write. 2344 */ 2345 smp_wmb(); 2346 2347 spin_lock(&mm->page_table_lock); 2348 BUG_ON(!pmd_none(*pmd)); 2349 page_add_new_anon_rmap(new_page, vma, address); 2350 set_pmd_at(mm, address, pmd, _pmd); 2351 update_mmu_cache_pmd(vma, address, pmd); 2352 pgtable_trans_huge_deposit(mm, pgtable); 2353 spin_unlock(&mm->page_table_lock); 2354 2355 *hpage = NULL; 2356 2357 khugepaged_pages_collapsed++; 2358 out_up_write: 2359 up_write(&mm->mmap_sem); 2360 return; 2361 2362 out: 2363 mem_cgroup_uncharge_page(new_page); 2364 goto out_up_write; 2365 } 2366 2367 static int khugepaged_scan_pmd(struct mm_struct *mm, 2368 struct vm_area_struct *vma, 2369 unsigned long address, 2370 struct page **hpage) 2371 { 2372 pmd_t *pmd; 2373 pte_t *pte, *_pte; 2374 int ret = 0, referenced = 0, none = 0; 2375 struct page *page; 2376 unsigned long _address; 2377 spinlock_t *ptl; 2378 int node = NUMA_NO_NODE; 2379 2380 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2381 2382 pmd = mm_find_pmd(mm, address); 2383 if (!pmd) 2384 goto out; 2385 if (pmd_trans_huge(*pmd)) 2386 goto out; 2387 2388 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2389 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 2390 _pte++, _address += PAGE_SIZE) { 2391 pte_t pteval = *_pte; 2392 if (pte_none(pteval)) { 2393 if (++none <= khugepaged_max_ptes_none) 2394 continue; 2395 else 2396 goto out_unmap; 2397 } 2398 if (!pte_present(pteval) || !pte_write(pteval)) 2399 goto out_unmap; 2400 page = vm_normal_page(vma, _address, pteval); 2401 if (unlikely(!page)) 2402 goto out_unmap; 2403 /* 2404 * Chose the node of the first page. This could 2405 * be more sophisticated and look at more pages, 2406 * but isn't for now. 2407 */ 2408 if (node == NUMA_NO_NODE) 2409 node = page_to_nid(page); 2410 VM_BUG_ON(PageCompound(page)); 2411 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2412 goto out_unmap; 2413 /* cannot use mapcount: can't collapse if there's a gup pin */ 2414 if (page_count(page) != 1) 2415 goto out_unmap; 2416 if (pte_young(pteval) || PageReferenced(page) || 2417 mmu_notifier_test_young(vma->vm_mm, address)) 2418 referenced = 1; 2419 } 2420 if (referenced) 2421 ret = 1; 2422 out_unmap: 2423 pte_unmap_unlock(pte, ptl); 2424 if (ret) 2425 /* collapse_huge_page will return with the mmap_sem released */ 2426 collapse_huge_page(mm, address, hpage, vma, node); 2427 out: 2428 return ret; 2429 } 2430 2431 static void collect_mm_slot(struct mm_slot *mm_slot) 2432 { 2433 struct mm_struct *mm = mm_slot->mm; 2434 2435 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2436 2437 if (khugepaged_test_exit(mm)) { 2438 /* free mm_slot */ 2439 hash_del(&mm_slot->hash); 2440 list_del(&mm_slot->mm_node); 2441 2442 /* 2443 * Not strictly needed because the mm exited already. 2444 * 2445 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2446 */ 2447 2448 /* khugepaged_mm_lock actually not necessary for the below */ 2449 free_mm_slot(mm_slot); 2450 mmdrop(mm); 2451 } 2452 } 2453 2454 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2455 struct page **hpage) 2456 __releases(&khugepaged_mm_lock) 2457 __acquires(&khugepaged_mm_lock) 2458 { 2459 struct mm_slot *mm_slot; 2460 struct mm_struct *mm; 2461 struct vm_area_struct *vma; 2462 int progress = 0; 2463 2464 VM_BUG_ON(!pages); 2465 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2466 2467 if (khugepaged_scan.mm_slot) 2468 mm_slot = khugepaged_scan.mm_slot; 2469 else { 2470 mm_slot = list_entry(khugepaged_scan.mm_head.next, 2471 struct mm_slot, mm_node); 2472 khugepaged_scan.address = 0; 2473 khugepaged_scan.mm_slot = mm_slot; 2474 } 2475 spin_unlock(&khugepaged_mm_lock); 2476 2477 mm = mm_slot->mm; 2478 down_read(&mm->mmap_sem); 2479 if (unlikely(khugepaged_test_exit(mm))) 2480 vma = NULL; 2481 else 2482 vma = find_vma(mm, khugepaged_scan.address); 2483 2484 progress++; 2485 for (; vma; vma = vma->vm_next) { 2486 unsigned long hstart, hend; 2487 2488 cond_resched(); 2489 if (unlikely(khugepaged_test_exit(mm))) { 2490 progress++; 2491 break; 2492 } 2493 if (!hugepage_vma_check(vma)) { 2494 skip: 2495 progress++; 2496 continue; 2497 } 2498 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2499 hend = vma->vm_end & HPAGE_PMD_MASK; 2500 if (hstart >= hend) 2501 goto skip; 2502 if (khugepaged_scan.address > hend) 2503 goto skip; 2504 if (khugepaged_scan.address < hstart) 2505 khugepaged_scan.address = hstart; 2506 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2507 2508 while (khugepaged_scan.address < hend) { 2509 int ret; 2510 cond_resched(); 2511 if (unlikely(khugepaged_test_exit(mm))) 2512 goto breakouterloop; 2513 2514 VM_BUG_ON(khugepaged_scan.address < hstart || 2515 khugepaged_scan.address + HPAGE_PMD_SIZE > 2516 hend); 2517 ret = khugepaged_scan_pmd(mm, vma, 2518 khugepaged_scan.address, 2519 hpage); 2520 /* move to next address */ 2521 khugepaged_scan.address += HPAGE_PMD_SIZE; 2522 progress += HPAGE_PMD_NR; 2523 if (ret) 2524 /* we released mmap_sem so break loop */ 2525 goto breakouterloop_mmap_sem; 2526 if (progress >= pages) 2527 goto breakouterloop; 2528 } 2529 } 2530 breakouterloop: 2531 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 2532 breakouterloop_mmap_sem: 2533 2534 spin_lock(&khugepaged_mm_lock); 2535 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2536 /* 2537 * Release the current mm_slot if this mm is about to die, or 2538 * if we scanned all vmas of this mm. 2539 */ 2540 if (khugepaged_test_exit(mm) || !vma) { 2541 /* 2542 * Make sure that if mm_users is reaching zero while 2543 * khugepaged runs here, khugepaged_exit will find 2544 * mm_slot not pointing to the exiting mm. 2545 */ 2546 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2547 khugepaged_scan.mm_slot = list_entry( 2548 mm_slot->mm_node.next, 2549 struct mm_slot, mm_node); 2550 khugepaged_scan.address = 0; 2551 } else { 2552 khugepaged_scan.mm_slot = NULL; 2553 khugepaged_full_scans++; 2554 } 2555 2556 collect_mm_slot(mm_slot); 2557 } 2558 2559 return progress; 2560 } 2561 2562 static int khugepaged_has_work(void) 2563 { 2564 return !list_empty(&khugepaged_scan.mm_head) && 2565 khugepaged_enabled(); 2566 } 2567 2568 static int khugepaged_wait_event(void) 2569 { 2570 return !list_empty(&khugepaged_scan.mm_head) || 2571 kthread_should_stop(); 2572 } 2573 2574 static void khugepaged_do_scan(void) 2575 { 2576 struct page *hpage = NULL; 2577 unsigned int progress = 0, pass_through_head = 0; 2578 unsigned int pages = khugepaged_pages_to_scan; 2579 bool wait = true; 2580 2581 barrier(); /* write khugepaged_pages_to_scan to local stack */ 2582 2583 while (progress < pages) { 2584 if (!khugepaged_prealloc_page(&hpage, &wait)) 2585 break; 2586 2587 cond_resched(); 2588 2589 if (unlikely(kthread_should_stop() || freezing(current))) 2590 break; 2591 2592 spin_lock(&khugepaged_mm_lock); 2593 if (!khugepaged_scan.mm_slot) 2594 pass_through_head++; 2595 if (khugepaged_has_work() && 2596 pass_through_head < 2) 2597 progress += khugepaged_scan_mm_slot(pages - progress, 2598 &hpage); 2599 else 2600 progress = pages; 2601 spin_unlock(&khugepaged_mm_lock); 2602 } 2603 2604 if (!IS_ERR_OR_NULL(hpage)) 2605 put_page(hpage); 2606 } 2607 2608 static void khugepaged_wait_work(void) 2609 { 2610 try_to_freeze(); 2611 2612 if (khugepaged_has_work()) { 2613 if (!khugepaged_scan_sleep_millisecs) 2614 return; 2615 2616 wait_event_freezable_timeout(khugepaged_wait, 2617 kthread_should_stop(), 2618 msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); 2619 return; 2620 } 2621 2622 if (khugepaged_enabled()) 2623 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2624 } 2625 2626 static int khugepaged(void *none) 2627 { 2628 struct mm_slot *mm_slot; 2629 2630 set_freezable(); 2631 set_user_nice(current, 19); 2632 2633 while (!kthread_should_stop()) { 2634 khugepaged_do_scan(); 2635 khugepaged_wait_work(); 2636 } 2637 2638 spin_lock(&khugepaged_mm_lock); 2639 mm_slot = khugepaged_scan.mm_slot; 2640 khugepaged_scan.mm_slot = NULL; 2641 if (mm_slot) 2642 collect_mm_slot(mm_slot); 2643 spin_unlock(&khugepaged_mm_lock); 2644 return 0; 2645 } 2646 2647 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2648 unsigned long haddr, pmd_t *pmd) 2649 { 2650 struct mm_struct *mm = vma->vm_mm; 2651 pgtable_t pgtable; 2652 pmd_t _pmd; 2653 int i; 2654 2655 pmdp_clear_flush(vma, haddr, pmd); 2656 /* leave pmd empty until pte is filled */ 2657 2658 pgtable = pgtable_trans_huge_withdraw(mm); 2659 pmd_populate(mm, &_pmd, pgtable); 2660 2661 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2662 pte_t *pte, entry; 2663 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2664 entry = pte_mkspecial(entry); 2665 pte = pte_offset_map(&_pmd, haddr); 2666 VM_BUG_ON(!pte_none(*pte)); 2667 set_pte_at(mm, haddr, pte, entry); 2668 pte_unmap(pte); 2669 } 2670 smp_wmb(); /* make pte visible before pmd */ 2671 pmd_populate(mm, pmd, pgtable); 2672 put_huge_zero_page(); 2673 } 2674 2675 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, 2676 pmd_t *pmd) 2677 { 2678 struct page *page; 2679 struct mm_struct *mm = vma->vm_mm; 2680 unsigned long haddr = address & HPAGE_PMD_MASK; 2681 unsigned long mmun_start; /* For mmu_notifiers */ 2682 unsigned long mmun_end; /* For mmu_notifiers */ 2683 2684 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); 2685 2686 mmun_start = haddr; 2687 mmun_end = haddr + HPAGE_PMD_SIZE; 2688 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2689 spin_lock(&mm->page_table_lock); 2690 if (unlikely(!pmd_trans_huge(*pmd))) { 2691 spin_unlock(&mm->page_table_lock); 2692 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2693 return; 2694 } 2695 if (is_huge_zero_pmd(*pmd)) { 2696 __split_huge_zero_page_pmd(vma, haddr, pmd); 2697 spin_unlock(&mm->page_table_lock); 2698 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2699 return; 2700 } 2701 page = pmd_page(*pmd); 2702 VM_BUG_ON(!page_count(page)); 2703 get_page(page); 2704 spin_unlock(&mm->page_table_lock); 2705 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2706 2707 split_huge_page(page); 2708 2709 put_page(page); 2710 BUG_ON(pmd_trans_huge(*pmd)); 2711 } 2712 2713 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, 2714 pmd_t *pmd) 2715 { 2716 struct vm_area_struct *vma; 2717 2718 vma = find_vma(mm, address); 2719 BUG_ON(vma == NULL); 2720 split_huge_page_pmd(vma, address, pmd); 2721 } 2722 2723 static void split_huge_page_address(struct mm_struct *mm, 2724 unsigned long address) 2725 { 2726 pmd_t *pmd; 2727 2728 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 2729 2730 pmd = mm_find_pmd(mm, address); 2731 if (!pmd) 2732 return; 2733 /* 2734 * Caller holds the mmap_sem write mode, so a huge pmd cannot 2735 * materialize from under us. 2736 */ 2737 split_huge_page_pmd_mm(mm, address, pmd); 2738 } 2739 2740 void __vma_adjust_trans_huge(struct vm_area_struct *vma, 2741 unsigned long start, 2742 unsigned long end, 2743 long adjust_next) 2744 { 2745 /* 2746 * If the new start address isn't hpage aligned and it could 2747 * previously contain an hugepage: check if we need to split 2748 * an huge pmd. 2749 */ 2750 if (start & ~HPAGE_PMD_MASK && 2751 (start & HPAGE_PMD_MASK) >= vma->vm_start && 2752 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2753 split_huge_page_address(vma->vm_mm, start); 2754 2755 /* 2756 * If the new end address isn't hpage aligned and it could 2757 * previously contain an hugepage: check if we need to split 2758 * an huge pmd. 2759 */ 2760 if (end & ~HPAGE_PMD_MASK && 2761 (end & HPAGE_PMD_MASK) >= vma->vm_start && 2762 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2763 split_huge_page_address(vma->vm_mm, end); 2764 2765 /* 2766 * If we're also updating the vma->vm_next->vm_start, if the new 2767 * vm_next->vm_start isn't page aligned and it could previously 2768 * contain an hugepage: check if we need to split an huge pmd. 2769 */ 2770 if (adjust_next > 0) { 2771 struct vm_area_struct *next = vma->vm_next; 2772 unsigned long nstart = next->vm_start; 2773 nstart += adjust_next << PAGE_SHIFT; 2774 if (nstart & ~HPAGE_PMD_MASK && 2775 (nstart & HPAGE_PMD_MASK) >= next->vm_start && 2776 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2777 split_huge_page_address(next->vm_mm, nstart); 2778 } 2779 } 2780