1 /* 2 * Copyright (C) 2009 Red Hat, Inc. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/highmem.h> 11 #include <linux/hugetlb.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/rmap.h> 14 #include <linux/swap.h> 15 #include <linux/shrinker.h> 16 #include <linux/mm_inline.h> 17 #include <linux/kthread.h> 18 #include <linux/khugepaged.h> 19 #include <linux/freezer.h> 20 #include <linux/mman.h> 21 #include <linux/pagemap.h> 22 #include <linux/migrate.h> 23 #include <linux/hashtable.h> 24 25 #include <asm/tlb.h> 26 #include <asm/pgalloc.h> 27 #include "internal.h" 28 29 /* 30 * By default transparent hugepage support is disabled in order that avoid 31 * to risk increase the memory footprint of applications without a guaranteed 32 * benefit. When transparent hugepage support is enabled, is for all mappings, 33 * and khugepaged scans all mappings. 34 * Defrag is invoked by khugepaged hugepage allocations and by page faults 35 * for all hugepage allocations. 36 */ 37 unsigned long transparent_hugepage_flags __read_mostly = 38 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 39 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 40 #endif 41 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 42 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 43 #endif 44 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| 45 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| 46 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 47 48 /* default scan 8*512 pte (or vmas) every 30 second */ 49 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; 50 static unsigned int khugepaged_pages_collapsed; 51 static unsigned int khugepaged_full_scans; 52 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 53 /* during fragmentation poll the hugepage allocator once every minute */ 54 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 55 static struct task_struct *khugepaged_thread __read_mostly; 56 static DEFINE_MUTEX(khugepaged_mutex); 57 static DEFINE_SPINLOCK(khugepaged_mm_lock); 58 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 59 /* 60 * default collapse hugepages if there is at least one pte mapped like 61 * it would have happened if the vma was large enough during page 62 * fault. 63 */ 64 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 65 66 static int khugepaged(void *none); 67 static int khugepaged_slab_init(void); 68 69 #define MM_SLOTS_HASH_BITS 10 70 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 71 72 static struct kmem_cache *mm_slot_cache __read_mostly; 73 74 /** 75 * struct mm_slot - hash lookup from mm to mm_slot 76 * @hash: hash collision list 77 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 78 * @mm: the mm that this information is valid for 79 */ 80 struct mm_slot { 81 struct hlist_node hash; 82 struct list_head mm_node; 83 struct mm_struct *mm; 84 }; 85 86 /** 87 * struct khugepaged_scan - cursor for scanning 88 * @mm_head: the head of the mm list to scan 89 * @mm_slot: the current mm_slot we are scanning 90 * @address: the next address inside that to be scanned 91 * 92 * There is only the one khugepaged_scan instance of this cursor structure. 93 */ 94 struct khugepaged_scan { 95 struct list_head mm_head; 96 struct mm_slot *mm_slot; 97 unsigned long address; 98 }; 99 static struct khugepaged_scan khugepaged_scan = { 100 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 101 }; 102 103 104 static int set_recommended_min_free_kbytes(void) 105 { 106 struct zone *zone; 107 int nr_zones = 0; 108 unsigned long recommended_min; 109 110 if (!khugepaged_enabled()) 111 return 0; 112 113 for_each_populated_zone(zone) 114 nr_zones++; 115 116 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ 117 recommended_min = pageblock_nr_pages * nr_zones * 2; 118 119 /* 120 * Make sure that on average at least two pageblocks are almost free 121 * of another type, one for a migratetype to fall back to and a 122 * second to avoid subsequent fallbacks of other types There are 3 123 * MIGRATE_TYPES we care about. 124 */ 125 recommended_min += pageblock_nr_pages * nr_zones * 126 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 127 128 /* don't ever allow to reserve more than 5% of the lowmem */ 129 recommended_min = min(recommended_min, 130 (unsigned long) nr_free_buffer_pages() / 20); 131 recommended_min <<= (PAGE_SHIFT-10); 132 133 if (recommended_min > min_free_kbytes) { 134 if (user_min_free_kbytes >= 0) 135 pr_info("raising min_free_kbytes from %d to %lu " 136 "to help transparent hugepage allocations\n", 137 min_free_kbytes, recommended_min); 138 139 min_free_kbytes = recommended_min; 140 } 141 setup_per_zone_wmarks(); 142 return 0; 143 } 144 late_initcall(set_recommended_min_free_kbytes); 145 146 static int start_khugepaged(void) 147 { 148 int err = 0; 149 if (khugepaged_enabled()) { 150 if (!khugepaged_thread) 151 khugepaged_thread = kthread_run(khugepaged, NULL, 152 "khugepaged"); 153 if (unlikely(IS_ERR(khugepaged_thread))) { 154 printk(KERN_ERR 155 "khugepaged: kthread_run(khugepaged) failed\n"); 156 err = PTR_ERR(khugepaged_thread); 157 khugepaged_thread = NULL; 158 } 159 160 if (!list_empty(&khugepaged_scan.mm_head)) 161 wake_up_interruptible(&khugepaged_wait); 162 163 set_recommended_min_free_kbytes(); 164 } else if (khugepaged_thread) { 165 kthread_stop(khugepaged_thread); 166 khugepaged_thread = NULL; 167 } 168 169 return err; 170 } 171 172 static atomic_t huge_zero_refcount; 173 static struct page *huge_zero_page __read_mostly; 174 175 static inline bool is_huge_zero_page(struct page *page) 176 { 177 return ACCESS_ONCE(huge_zero_page) == page; 178 } 179 180 static inline bool is_huge_zero_pmd(pmd_t pmd) 181 { 182 return is_huge_zero_page(pmd_page(pmd)); 183 } 184 185 static struct page *get_huge_zero_page(void) 186 { 187 struct page *zero_page; 188 retry: 189 if (likely(atomic_inc_not_zero(&huge_zero_refcount))) 190 return ACCESS_ONCE(huge_zero_page); 191 192 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, 193 HPAGE_PMD_ORDER); 194 if (!zero_page) { 195 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); 196 return NULL; 197 } 198 count_vm_event(THP_ZERO_PAGE_ALLOC); 199 preempt_disable(); 200 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 201 preempt_enable(); 202 __free_page(zero_page); 203 goto retry; 204 } 205 206 /* We take additional reference here. It will be put back by shrinker */ 207 atomic_set(&huge_zero_refcount, 2); 208 preempt_enable(); 209 return ACCESS_ONCE(huge_zero_page); 210 } 211 212 static void put_huge_zero_page(void) 213 { 214 /* 215 * Counter should never go to zero here. Only shrinker can put 216 * last reference. 217 */ 218 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 219 } 220 221 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, 222 struct shrink_control *sc) 223 { 224 /* we can free zero page only if last reference remains */ 225 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 226 } 227 228 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, 229 struct shrink_control *sc) 230 { 231 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 232 struct page *zero_page = xchg(&huge_zero_page, NULL); 233 BUG_ON(zero_page == NULL); 234 __free_page(zero_page); 235 return HPAGE_PMD_NR; 236 } 237 238 return 0; 239 } 240 241 static struct shrinker huge_zero_page_shrinker = { 242 .count_objects = shrink_huge_zero_page_count, 243 .scan_objects = shrink_huge_zero_page_scan, 244 .seeks = DEFAULT_SEEKS, 245 }; 246 247 #ifdef CONFIG_SYSFS 248 249 static ssize_t double_flag_show(struct kobject *kobj, 250 struct kobj_attribute *attr, char *buf, 251 enum transparent_hugepage_flag enabled, 252 enum transparent_hugepage_flag req_madv) 253 { 254 if (test_bit(enabled, &transparent_hugepage_flags)) { 255 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); 256 return sprintf(buf, "[always] madvise never\n"); 257 } else if (test_bit(req_madv, &transparent_hugepage_flags)) 258 return sprintf(buf, "always [madvise] never\n"); 259 else 260 return sprintf(buf, "always madvise [never]\n"); 261 } 262 static ssize_t double_flag_store(struct kobject *kobj, 263 struct kobj_attribute *attr, 264 const char *buf, size_t count, 265 enum transparent_hugepage_flag enabled, 266 enum transparent_hugepage_flag req_madv) 267 { 268 if (!memcmp("always", buf, 269 min(sizeof("always")-1, count))) { 270 set_bit(enabled, &transparent_hugepage_flags); 271 clear_bit(req_madv, &transparent_hugepage_flags); 272 } else if (!memcmp("madvise", buf, 273 min(sizeof("madvise")-1, count))) { 274 clear_bit(enabled, &transparent_hugepage_flags); 275 set_bit(req_madv, &transparent_hugepage_flags); 276 } else if (!memcmp("never", buf, 277 min(sizeof("never")-1, count))) { 278 clear_bit(enabled, &transparent_hugepage_flags); 279 clear_bit(req_madv, &transparent_hugepage_flags); 280 } else 281 return -EINVAL; 282 283 return count; 284 } 285 286 static ssize_t enabled_show(struct kobject *kobj, 287 struct kobj_attribute *attr, char *buf) 288 { 289 return double_flag_show(kobj, attr, buf, 290 TRANSPARENT_HUGEPAGE_FLAG, 291 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 292 } 293 static ssize_t enabled_store(struct kobject *kobj, 294 struct kobj_attribute *attr, 295 const char *buf, size_t count) 296 { 297 ssize_t ret; 298 299 ret = double_flag_store(kobj, attr, buf, count, 300 TRANSPARENT_HUGEPAGE_FLAG, 301 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 302 303 if (ret > 0) { 304 int err; 305 306 mutex_lock(&khugepaged_mutex); 307 err = start_khugepaged(); 308 mutex_unlock(&khugepaged_mutex); 309 310 if (err) 311 ret = err; 312 } 313 314 return ret; 315 } 316 static struct kobj_attribute enabled_attr = 317 __ATTR(enabled, 0644, enabled_show, enabled_store); 318 319 static ssize_t single_flag_show(struct kobject *kobj, 320 struct kobj_attribute *attr, char *buf, 321 enum transparent_hugepage_flag flag) 322 { 323 return sprintf(buf, "%d\n", 324 !!test_bit(flag, &transparent_hugepage_flags)); 325 } 326 327 static ssize_t single_flag_store(struct kobject *kobj, 328 struct kobj_attribute *attr, 329 const char *buf, size_t count, 330 enum transparent_hugepage_flag flag) 331 { 332 unsigned long value; 333 int ret; 334 335 ret = kstrtoul(buf, 10, &value); 336 if (ret < 0) 337 return ret; 338 if (value > 1) 339 return -EINVAL; 340 341 if (value) 342 set_bit(flag, &transparent_hugepage_flags); 343 else 344 clear_bit(flag, &transparent_hugepage_flags); 345 346 return count; 347 } 348 349 /* 350 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 351 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 352 * memory just to allocate one more hugepage. 353 */ 354 static ssize_t defrag_show(struct kobject *kobj, 355 struct kobj_attribute *attr, char *buf) 356 { 357 return double_flag_show(kobj, attr, buf, 358 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 359 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 360 } 361 static ssize_t defrag_store(struct kobject *kobj, 362 struct kobj_attribute *attr, 363 const char *buf, size_t count) 364 { 365 return double_flag_store(kobj, attr, buf, count, 366 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 367 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 368 } 369 static struct kobj_attribute defrag_attr = 370 __ATTR(defrag, 0644, defrag_show, defrag_store); 371 372 static ssize_t use_zero_page_show(struct kobject *kobj, 373 struct kobj_attribute *attr, char *buf) 374 { 375 return single_flag_show(kobj, attr, buf, 376 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 377 } 378 static ssize_t use_zero_page_store(struct kobject *kobj, 379 struct kobj_attribute *attr, const char *buf, size_t count) 380 { 381 return single_flag_store(kobj, attr, buf, count, 382 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); 383 } 384 static struct kobj_attribute use_zero_page_attr = 385 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); 386 #ifdef CONFIG_DEBUG_VM 387 static ssize_t debug_cow_show(struct kobject *kobj, 388 struct kobj_attribute *attr, char *buf) 389 { 390 return single_flag_show(kobj, attr, buf, 391 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 392 } 393 static ssize_t debug_cow_store(struct kobject *kobj, 394 struct kobj_attribute *attr, 395 const char *buf, size_t count) 396 { 397 return single_flag_store(kobj, attr, buf, count, 398 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 399 } 400 static struct kobj_attribute debug_cow_attr = 401 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 402 #endif /* CONFIG_DEBUG_VM */ 403 404 static struct attribute *hugepage_attr[] = { 405 &enabled_attr.attr, 406 &defrag_attr.attr, 407 &use_zero_page_attr.attr, 408 #ifdef CONFIG_DEBUG_VM 409 &debug_cow_attr.attr, 410 #endif 411 NULL, 412 }; 413 414 static struct attribute_group hugepage_attr_group = { 415 .attrs = hugepage_attr, 416 }; 417 418 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 419 struct kobj_attribute *attr, 420 char *buf) 421 { 422 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 423 } 424 425 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 426 struct kobj_attribute *attr, 427 const char *buf, size_t count) 428 { 429 unsigned long msecs; 430 int err; 431 432 err = kstrtoul(buf, 10, &msecs); 433 if (err || msecs > UINT_MAX) 434 return -EINVAL; 435 436 khugepaged_scan_sleep_millisecs = msecs; 437 wake_up_interruptible(&khugepaged_wait); 438 439 return count; 440 } 441 static struct kobj_attribute scan_sleep_millisecs_attr = 442 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 443 scan_sleep_millisecs_store); 444 445 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 446 struct kobj_attribute *attr, 447 char *buf) 448 { 449 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 450 } 451 452 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 453 struct kobj_attribute *attr, 454 const char *buf, size_t count) 455 { 456 unsigned long msecs; 457 int err; 458 459 err = kstrtoul(buf, 10, &msecs); 460 if (err || msecs > UINT_MAX) 461 return -EINVAL; 462 463 khugepaged_alloc_sleep_millisecs = msecs; 464 wake_up_interruptible(&khugepaged_wait); 465 466 return count; 467 } 468 static struct kobj_attribute alloc_sleep_millisecs_attr = 469 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 470 alloc_sleep_millisecs_store); 471 472 static ssize_t pages_to_scan_show(struct kobject *kobj, 473 struct kobj_attribute *attr, 474 char *buf) 475 { 476 return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 477 } 478 static ssize_t pages_to_scan_store(struct kobject *kobj, 479 struct kobj_attribute *attr, 480 const char *buf, size_t count) 481 { 482 int err; 483 unsigned long pages; 484 485 err = kstrtoul(buf, 10, &pages); 486 if (err || !pages || pages > UINT_MAX) 487 return -EINVAL; 488 489 khugepaged_pages_to_scan = pages; 490 491 return count; 492 } 493 static struct kobj_attribute pages_to_scan_attr = 494 __ATTR(pages_to_scan, 0644, pages_to_scan_show, 495 pages_to_scan_store); 496 497 static ssize_t pages_collapsed_show(struct kobject *kobj, 498 struct kobj_attribute *attr, 499 char *buf) 500 { 501 return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 502 } 503 static struct kobj_attribute pages_collapsed_attr = 504 __ATTR_RO(pages_collapsed); 505 506 static ssize_t full_scans_show(struct kobject *kobj, 507 struct kobj_attribute *attr, 508 char *buf) 509 { 510 return sprintf(buf, "%u\n", khugepaged_full_scans); 511 } 512 static struct kobj_attribute full_scans_attr = 513 __ATTR_RO(full_scans); 514 515 static ssize_t khugepaged_defrag_show(struct kobject *kobj, 516 struct kobj_attribute *attr, char *buf) 517 { 518 return single_flag_show(kobj, attr, buf, 519 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 520 } 521 static ssize_t khugepaged_defrag_store(struct kobject *kobj, 522 struct kobj_attribute *attr, 523 const char *buf, size_t count) 524 { 525 return single_flag_store(kobj, attr, buf, count, 526 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 527 } 528 static struct kobj_attribute khugepaged_defrag_attr = 529 __ATTR(defrag, 0644, khugepaged_defrag_show, 530 khugepaged_defrag_store); 531 532 /* 533 * max_ptes_none controls if khugepaged should collapse hugepages over 534 * any unmapped ptes in turn potentially increasing the memory 535 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 536 * reduce the available free memory in the system as it 537 * runs. Increasing max_ptes_none will instead potentially reduce the 538 * free memory in the system during the khugepaged scan. 539 */ 540 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 541 struct kobj_attribute *attr, 542 char *buf) 543 { 544 return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 545 } 546 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 547 struct kobj_attribute *attr, 548 const char *buf, size_t count) 549 { 550 int err; 551 unsigned long max_ptes_none; 552 553 err = kstrtoul(buf, 10, &max_ptes_none); 554 if (err || max_ptes_none > HPAGE_PMD_NR-1) 555 return -EINVAL; 556 557 khugepaged_max_ptes_none = max_ptes_none; 558 559 return count; 560 } 561 static struct kobj_attribute khugepaged_max_ptes_none_attr = 562 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 563 khugepaged_max_ptes_none_store); 564 565 static struct attribute *khugepaged_attr[] = { 566 &khugepaged_defrag_attr.attr, 567 &khugepaged_max_ptes_none_attr.attr, 568 &pages_to_scan_attr.attr, 569 &pages_collapsed_attr.attr, 570 &full_scans_attr.attr, 571 &scan_sleep_millisecs_attr.attr, 572 &alloc_sleep_millisecs_attr.attr, 573 NULL, 574 }; 575 576 static struct attribute_group khugepaged_attr_group = { 577 .attrs = khugepaged_attr, 578 .name = "khugepaged", 579 }; 580 581 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 582 { 583 int err; 584 585 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 586 if (unlikely(!*hugepage_kobj)) { 587 printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n"); 588 return -ENOMEM; 589 } 590 591 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 592 if (err) { 593 printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n"); 594 goto delete_obj; 595 } 596 597 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 598 if (err) { 599 printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n"); 600 goto remove_hp_group; 601 } 602 603 return 0; 604 605 remove_hp_group: 606 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 607 delete_obj: 608 kobject_put(*hugepage_kobj); 609 return err; 610 } 611 612 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 613 { 614 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 615 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 616 kobject_put(hugepage_kobj); 617 } 618 #else 619 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 620 { 621 return 0; 622 } 623 624 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 625 { 626 } 627 #endif /* CONFIG_SYSFS */ 628 629 static int __init hugepage_init(void) 630 { 631 int err; 632 struct kobject *hugepage_kobj; 633 634 if (!has_transparent_hugepage()) { 635 transparent_hugepage_flags = 0; 636 return -EINVAL; 637 } 638 639 err = hugepage_init_sysfs(&hugepage_kobj); 640 if (err) 641 return err; 642 643 err = khugepaged_slab_init(); 644 if (err) 645 goto out; 646 647 register_shrinker(&huge_zero_page_shrinker); 648 649 /* 650 * By default disable transparent hugepages on smaller systems, 651 * where the extra memory used could hurt more than TLB overhead 652 * is likely to save. The admin can still enable it through /sys. 653 */ 654 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) 655 transparent_hugepage_flags = 0; 656 657 start_khugepaged(); 658 659 return 0; 660 out: 661 hugepage_exit_sysfs(hugepage_kobj); 662 return err; 663 } 664 subsys_initcall(hugepage_init); 665 666 static int __init setup_transparent_hugepage(char *str) 667 { 668 int ret = 0; 669 if (!str) 670 goto out; 671 if (!strcmp(str, "always")) { 672 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 673 &transparent_hugepage_flags); 674 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 675 &transparent_hugepage_flags); 676 ret = 1; 677 } else if (!strcmp(str, "madvise")) { 678 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 679 &transparent_hugepage_flags); 680 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 681 &transparent_hugepage_flags); 682 ret = 1; 683 } else if (!strcmp(str, "never")) { 684 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 685 &transparent_hugepage_flags); 686 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 687 &transparent_hugepage_flags); 688 ret = 1; 689 } 690 out: 691 if (!ret) 692 printk(KERN_WARNING 693 "transparent_hugepage= cannot parse, ignored\n"); 694 return ret; 695 } 696 __setup("transparent_hugepage=", setup_transparent_hugepage); 697 698 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 699 { 700 if (likely(vma->vm_flags & VM_WRITE)) 701 pmd = pmd_mkwrite(pmd); 702 return pmd; 703 } 704 705 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) 706 { 707 pmd_t entry; 708 entry = mk_pmd(page, prot); 709 entry = pmd_mkhuge(entry); 710 return entry; 711 } 712 713 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 714 struct vm_area_struct *vma, 715 unsigned long haddr, pmd_t *pmd, 716 struct page *page) 717 { 718 pgtable_t pgtable; 719 spinlock_t *ptl; 720 721 VM_BUG_ON_PAGE(!PageCompound(page), page); 722 pgtable = pte_alloc_one(mm, haddr); 723 if (unlikely(!pgtable)) 724 return VM_FAULT_OOM; 725 726 clear_huge_page(page, haddr, HPAGE_PMD_NR); 727 /* 728 * The memory barrier inside __SetPageUptodate makes sure that 729 * clear_huge_page writes become visible before the set_pmd_at() 730 * write. 731 */ 732 __SetPageUptodate(page); 733 734 ptl = pmd_lock(mm, pmd); 735 if (unlikely(!pmd_none(*pmd))) { 736 spin_unlock(ptl); 737 mem_cgroup_uncharge_page(page); 738 put_page(page); 739 pte_free(mm, pgtable); 740 } else { 741 pmd_t entry; 742 entry = mk_huge_pmd(page, vma->vm_page_prot); 743 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 744 page_add_new_anon_rmap(page, vma, haddr); 745 pgtable_trans_huge_deposit(mm, pmd, pgtable); 746 set_pmd_at(mm, haddr, pmd, entry); 747 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 748 atomic_long_inc(&mm->nr_ptes); 749 spin_unlock(ptl); 750 } 751 752 return 0; 753 } 754 755 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 756 { 757 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 758 } 759 760 static inline struct page *alloc_hugepage_vma(int defrag, 761 struct vm_area_struct *vma, 762 unsigned long haddr, int nd, 763 gfp_t extra_gfp) 764 { 765 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), 766 HPAGE_PMD_ORDER, vma, haddr, nd); 767 } 768 769 /* Caller must hold page table lock. */ 770 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, 771 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 772 struct page *zero_page) 773 { 774 pmd_t entry; 775 if (!pmd_none(*pmd)) 776 return false; 777 entry = mk_pmd(zero_page, vma->vm_page_prot); 778 entry = pmd_wrprotect(entry); 779 entry = pmd_mkhuge(entry); 780 pgtable_trans_huge_deposit(mm, pmd, pgtable); 781 set_pmd_at(mm, haddr, pmd, entry); 782 atomic_long_inc(&mm->nr_ptes); 783 return true; 784 } 785 786 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 787 unsigned long address, pmd_t *pmd, 788 unsigned int flags) 789 { 790 struct page *page; 791 unsigned long haddr = address & HPAGE_PMD_MASK; 792 793 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 794 return VM_FAULT_FALLBACK; 795 if (unlikely(anon_vma_prepare(vma))) 796 return VM_FAULT_OOM; 797 if (unlikely(khugepaged_enter(vma))) 798 return VM_FAULT_OOM; 799 if (!(flags & FAULT_FLAG_WRITE) && 800 transparent_hugepage_use_zero_page()) { 801 spinlock_t *ptl; 802 pgtable_t pgtable; 803 struct page *zero_page; 804 bool set; 805 pgtable = pte_alloc_one(mm, haddr); 806 if (unlikely(!pgtable)) 807 return VM_FAULT_OOM; 808 zero_page = get_huge_zero_page(); 809 if (unlikely(!zero_page)) { 810 pte_free(mm, pgtable); 811 count_vm_event(THP_FAULT_FALLBACK); 812 return VM_FAULT_FALLBACK; 813 } 814 ptl = pmd_lock(mm, pmd); 815 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, 816 zero_page); 817 spin_unlock(ptl); 818 if (!set) { 819 pte_free(mm, pgtable); 820 put_huge_zero_page(); 821 } 822 return 0; 823 } 824 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 825 vma, haddr, numa_node_id(), 0); 826 if (unlikely(!page)) { 827 count_vm_event(THP_FAULT_FALLBACK); 828 return VM_FAULT_FALLBACK; 829 } 830 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 831 put_page(page); 832 count_vm_event(THP_FAULT_FALLBACK); 833 return VM_FAULT_FALLBACK; 834 } 835 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { 836 mem_cgroup_uncharge_page(page); 837 put_page(page); 838 count_vm_event(THP_FAULT_FALLBACK); 839 return VM_FAULT_FALLBACK; 840 } 841 842 count_vm_event(THP_FAULT_ALLOC); 843 return 0; 844 } 845 846 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 847 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 848 struct vm_area_struct *vma) 849 { 850 spinlock_t *dst_ptl, *src_ptl; 851 struct page *src_page; 852 pmd_t pmd; 853 pgtable_t pgtable; 854 int ret; 855 856 ret = -ENOMEM; 857 pgtable = pte_alloc_one(dst_mm, addr); 858 if (unlikely(!pgtable)) 859 goto out; 860 861 dst_ptl = pmd_lock(dst_mm, dst_pmd); 862 src_ptl = pmd_lockptr(src_mm, src_pmd); 863 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 864 865 ret = -EAGAIN; 866 pmd = *src_pmd; 867 if (unlikely(!pmd_trans_huge(pmd))) { 868 pte_free(dst_mm, pgtable); 869 goto out_unlock; 870 } 871 /* 872 * When page table lock is held, the huge zero pmd should not be 873 * under splitting since we don't split the page itself, only pmd to 874 * a page table. 875 */ 876 if (is_huge_zero_pmd(pmd)) { 877 struct page *zero_page; 878 bool set; 879 /* 880 * get_huge_zero_page() will never allocate a new page here, 881 * since we already have a zero page to copy. It just takes a 882 * reference. 883 */ 884 zero_page = get_huge_zero_page(); 885 set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, 886 zero_page); 887 BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */ 888 ret = 0; 889 goto out_unlock; 890 } 891 892 if (unlikely(pmd_trans_splitting(pmd))) { 893 /* split huge page running from under us */ 894 spin_unlock(src_ptl); 895 spin_unlock(dst_ptl); 896 pte_free(dst_mm, pgtable); 897 898 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 899 goto out; 900 } 901 src_page = pmd_page(pmd); 902 VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 903 get_page(src_page); 904 page_dup_rmap(src_page); 905 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 906 907 pmdp_set_wrprotect(src_mm, addr, src_pmd); 908 pmd = pmd_mkold(pmd_wrprotect(pmd)); 909 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); 910 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 911 atomic_long_inc(&dst_mm->nr_ptes); 912 913 ret = 0; 914 out_unlock: 915 spin_unlock(src_ptl); 916 spin_unlock(dst_ptl); 917 out: 918 return ret; 919 } 920 921 void huge_pmd_set_accessed(struct mm_struct *mm, 922 struct vm_area_struct *vma, 923 unsigned long address, 924 pmd_t *pmd, pmd_t orig_pmd, 925 int dirty) 926 { 927 spinlock_t *ptl; 928 pmd_t entry; 929 unsigned long haddr; 930 931 ptl = pmd_lock(mm, pmd); 932 if (unlikely(!pmd_same(*pmd, orig_pmd))) 933 goto unlock; 934 935 entry = pmd_mkyoung(orig_pmd); 936 haddr = address & HPAGE_PMD_MASK; 937 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) 938 update_mmu_cache_pmd(vma, address, pmd); 939 940 unlock: 941 spin_unlock(ptl); 942 } 943 944 static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm, 945 struct vm_area_struct *vma, unsigned long address, 946 pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr) 947 { 948 spinlock_t *ptl; 949 pgtable_t pgtable; 950 pmd_t _pmd; 951 struct page *page; 952 int i, ret = 0; 953 unsigned long mmun_start; /* For mmu_notifiers */ 954 unsigned long mmun_end; /* For mmu_notifiers */ 955 956 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 957 if (!page) { 958 ret |= VM_FAULT_OOM; 959 goto out; 960 } 961 962 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { 963 put_page(page); 964 ret |= VM_FAULT_OOM; 965 goto out; 966 } 967 968 clear_user_highpage(page, address); 969 __SetPageUptodate(page); 970 971 mmun_start = haddr; 972 mmun_end = haddr + HPAGE_PMD_SIZE; 973 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 974 975 ptl = pmd_lock(mm, pmd); 976 if (unlikely(!pmd_same(*pmd, orig_pmd))) 977 goto out_free_page; 978 979 pmdp_clear_flush(vma, haddr, pmd); 980 /* leave pmd empty until pte is filled */ 981 982 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 983 pmd_populate(mm, &_pmd, pgtable); 984 985 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 986 pte_t *pte, entry; 987 if (haddr == (address & PAGE_MASK)) { 988 entry = mk_pte(page, vma->vm_page_prot); 989 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 990 page_add_new_anon_rmap(page, vma, haddr); 991 } else { 992 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 993 entry = pte_mkspecial(entry); 994 } 995 pte = pte_offset_map(&_pmd, haddr); 996 VM_BUG_ON(!pte_none(*pte)); 997 set_pte_at(mm, haddr, pte, entry); 998 pte_unmap(pte); 999 } 1000 smp_wmb(); /* make pte visible before pmd */ 1001 pmd_populate(mm, pmd, pgtable); 1002 spin_unlock(ptl); 1003 put_huge_zero_page(); 1004 inc_mm_counter(mm, MM_ANONPAGES); 1005 1006 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1007 1008 ret |= VM_FAULT_WRITE; 1009 out: 1010 return ret; 1011 out_free_page: 1012 spin_unlock(ptl); 1013 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1014 mem_cgroup_uncharge_page(page); 1015 put_page(page); 1016 goto out; 1017 } 1018 1019 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 1020 struct vm_area_struct *vma, 1021 unsigned long address, 1022 pmd_t *pmd, pmd_t orig_pmd, 1023 struct page *page, 1024 unsigned long haddr) 1025 { 1026 spinlock_t *ptl; 1027 pgtable_t pgtable; 1028 pmd_t _pmd; 1029 int ret = 0, i; 1030 struct page **pages; 1031 unsigned long mmun_start; /* For mmu_notifiers */ 1032 unsigned long mmun_end; /* For mmu_notifiers */ 1033 1034 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 1035 GFP_KERNEL); 1036 if (unlikely(!pages)) { 1037 ret |= VM_FAULT_OOM; 1038 goto out; 1039 } 1040 1041 for (i = 0; i < HPAGE_PMD_NR; i++) { 1042 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 1043 __GFP_OTHER_NODE, 1044 vma, address, page_to_nid(page)); 1045 if (unlikely(!pages[i] || 1046 mem_cgroup_newpage_charge(pages[i], mm, 1047 GFP_KERNEL))) { 1048 if (pages[i]) 1049 put_page(pages[i]); 1050 mem_cgroup_uncharge_start(); 1051 while (--i >= 0) { 1052 mem_cgroup_uncharge_page(pages[i]); 1053 put_page(pages[i]); 1054 } 1055 mem_cgroup_uncharge_end(); 1056 kfree(pages); 1057 ret |= VM_FAULT_OOM; 1058 goto out; 1059 } 1060 } 1061 1062 for (i = 0; i < HPAGE_PMD_NR; i++) { 1063 copy_user_highpage(pages[i], page + i, 1064 haddr + PAGE_SIZE * i, vma); 1065 __SetPageUptodate(pages[i]); 1066 cond_resched(); 1067 } 1068 1069 mmun_start = haddr; 1070 mmun_end = haddr + HPAGE_PMD_SIZE; 1071 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1072 1073 ptl = pmd_lock(mm, pmd); 1074 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1075 goto out_free_pages; 1076 VM_BUG_ON_PAGE(!PageHead(page), page); 1077 1078 pmdp_clear_flush(vma, haddr, pmd); 1079 /* leave pmd empty until pte is filled */ 1080 1081 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1082 pmd_populate(mm, &_pmd, pgtable); 1083 1084 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1085 pte_t *pte, entry; 1086 entry = mk_pte(pages[i], vma->vm_page_prot); 1087 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1088 page_add_new_anon_rmap(pages[i], vma, haddr); 1089 pte = pte_offset_map(&_pmd, haddr); 1090 VM_BUG_ON(!pte_none(*pte)); 1091 set_pte_at(mm, haddr, pte, entry); 1092 pte_unmap(pte); 1093 } 1094 kfree(pages); 1095 1096 smp_wmb(); /* make pte visible before pmd */ 1097 pmd_populate(mm, pmd, pgtable); 1098 page_remove_rmap(page); 1099 spin_unlock(ptl); 1100 1101 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1102 1103 ret |= VM_FAULT_WRITE; 1104 put_page(page); 1105 1106 out: 1107 return ret; 1108 1109 out_free_pages: 1110 spin_unlock(ptl); 1111 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1112 mem_cgroup_uncharge_start(); 1113 for (i = 0; i < HPAGE_PMD_NR; i++) { 1114 mem_cgroup_uncharge_page(pages[i]); 1115 put_page(pages[i]); 1116 } 1117 mem_cgroup_uncharge_end(); 1118 kfree(pages); 1119 goto out; 1120 } 1121 1122 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 1123 unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 1124 { 1125 spinlock_t *ptl; 1126 int ret = 0; 1127 struct page *page = NULL, *new_page; 1128 unsigned long haddr; 1129 unsigned long mmun_start; /* For mmu_notifiers */ 1130 unsigned long mmun_end; /* For mmu_notifiers */ 1131 1132 ptl = pmd_lockptr(mm, pmd); 1133 VM_BUG_ON(!vma->anon_vma); 1134 haddr = address & HPAGE_PMD_MASK; 1135 if (is_huge_zero_pmd(orig_pmd)) 1136 goto alloc; 1137 spin_lock(ptl); 1138 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1139 goto out_unlock; 1140 1141 page = pmd_page(orig_pmd); 1142 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 1143 if (page_mapcount(page) == 1) { 1144 pmd_t entry; 1145 entry = pmd_mkyoung(orig_pmd); 1146 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1147 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 1148 update_mmu_cache_pmd(vma, address, pmd); 1149 ret |= VM_FAULT_WRITE; 1150 goto out_unlock; 1151 } 1152 get_page(page); 1153 spin_unlock(ptl); 1154 alloc: 1155 if (transparent_hugepage_enabled(vma) && 1156 !transparent_hugepage_debug_cow()) 1157 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 1158 vma, haddr, numa_node_id(), 0); 1159 else 1160 new_page = NULL; 1161 1162 if (unlikely(!new_page)) { 1163 if (!page) { 1164 ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, 1165 address, pmd, orig_pmd, haddr); 1166 } else { 1167 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 1168 pmd, orig_pmd, page, haddr); 1169 if (ret & VM_FAULT_OOM) { 1170 split_huge_page(page); 1171 ret |= VM_FAULT_FALLBACK; 1172 } 1173 put_page(page); 1174 } 1175 count_vm_event(THP_FAULT_FALLBACK); 1176 goto out; 1177 } 1178 1179 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1180 put_page(new_page); 1181 if (page) { 1182 split_huge_page(page); 1183 put_page(page); 1184 } else 1185 split_huge_page_pmd(vma, address, pmd); 1186 ret |= VM_FAULT_FALLBACK; 1187 count_vm_event(THP_FAULT_FALLBACK); 1188 goto out; 1189 } 1190 1191 count_vm_event(THP_FAULT_ALLOC); 1192 1193 if (!page) 1194 clear_huge_page(new_page, haddr, HPAGE_PMD_NR); 1195 else 1196 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 1197 __SetPageUptodate(new_page); 1198 1199 mmun_start = haddr; 1200 mmun_end = haddr + HPAGE_PMD_SIZE; 1201 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1202 1203 spin_lock(ptl); 1204 if (page) 1205 put_page(page); 1206 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1207 spin_unlock(ptl); 1208 mem_cgroup_uncharge_page(new_page); 1209 put_page(new_page); 1210 goto out_mn; 1211 } else { 1212 pmd_t entry; 1213 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1214 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1215 pmdp_clear_flush(vma, haddr, pmd); 1216 page_add_new_anon_rmap(new_page, vma, haddr); 1217 set_pmd_at(mm, haddr, pmd, entry); 1218 update_mmu_cache_pmd(vma, address, pmd); 1219 if (!page) { 1220 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1221 put_huge_zero_page(); 1222 } else { 1223 VM_BUG_ON_PAGE(!PageHead(page), page); 1224 page_remove_rmap(page); 1225 put_page(page); 1226 } 1227 ret |= VM_FAULT_WRITE; 1228 } 1229 spin_unlock(ptl); 1230 out_mn: 1231 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1232 out: 1233 return ret; 1234 out_unlock: 1235 spin_unlock(ptl); 1236 return ret; 1237 } 1238 1239 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1240 unsigned long addr, 1241 pmd_t *pmd, 1242 unsigned int flags) 1243 { 1244 struct mm_struct *mm = vma->vm_mm; 1245 struct page *page = NULL; 1246 1247 assert_spin_locked(pmd_lockptr(mm, pmd)); 1248 1249 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1250 goto out; 1251 1252 /* Avoid dumping huge zero page */ 1253 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1254 return ERR_PTR(-EFAULT); 1255 1256 /* Full NUMA hinting faults to serialise migration in fault paths */ 1257 if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) 1258 goto out; 1259 1260 page = pmd_page(*pmd); 1261 VM_BUG_ON_PAGE(!PageHead(page), page); 1262 if (flags & FOLL_TOUCH) { 1263 pmd_t _pmd; 1264 /* 1265 * We should set the dirty bit only for FOLL_WRITE but 1266 * for now the dirty bit in the pmd is meaningless. 1267 * And if the dirty bit will become meaningful and 1268 * we'll only set it with FOLL_WRITE, an atomic 1269 * set_bit will be required on the pmd to set the 1270 * young bit, instead of the current set_pmd_at. 1271 */ 1272 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 1273 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, 1274 pmd, _pmd, 1)) 1275 update_mmu_cache_pmd(vma, addr, pmd); 1276 } 1277 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1278 if (page->mapping && trylock_page(page)) { 1279 lru_add_drain(); 1280 if (page->mapping) 1281 mlock_vma_page(page); 1282 unlock_page(page); 1283 } 1284 } 1285 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1286 VM_BUG_ON_PAGE(!PageCompound(page), page); 1287 if (flags & FOLL_GET) 1288 get_page_foll(page); 1289 1290 out: 1291 return page; 1292 } 1293 1294 /* NUMA hinting page fault entry point for trans huge pmds */ 1295 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 1296 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 1297 { 1298 spinlock_t *ptl; 1299 struct anon_vma *anon_vma = NULL; 1300 struct page *page; 1301 unsigned long haddr = addr & HPAGE_PMD_MASK; 1302 int page_nid = -1, this_nid = numa_node_id(); 1303 int target_nid, last_cpupid = -1; 1304 bool page_locked; 1305 bool migrated = false; 1306 int flags = 0; 1307 1308 ptl = pmd_lock(mm, pmdp); 1309 if (unlikely(!pmd_same(pmd, *pmdp))) 1310 goto out_unlock; 1311 1312 /* 1313 * If there are potential migrations, wait for completion and retry 1314 * without disrupting NUMA hinting information. Do not relock and 1315 * check_same as the page may no longer be mapped. 1316 */ 1317 if (unlikely(pmd_trans_migrating(*pmdp))) { 1318 spin_unlock(ptl); 1319 wait_migrate_huge_page(vma->anon_vma, pmdp); 1320 goto out; 1321 } 1322 1323 page = pmd_page(pmd); 1324 BUG_ON(is_huge_zero_page(page)); 1325 page_nid = page_to_nid(page); 1326 last_cpupid = page_cpupid_last(page); 1327 count_vm_numa_event(NUMA_HINT_FAULTS); 1328 if (page_nid == this_nid) { 1329 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1330 flags |= TNF_FAULT_LOCAL; 1331 } 1332 1333 /* 1334 * Avoid grouping on DSO/COW pages in specific and RO pages 1335 * in general, RO pages shouldn't hurt as much anyway since 1336 * they can be in shared cache state. 1337 */ 1338 if (!pmd_write(pmd)) 1339 flags |= TNF_NO_GROUP; 1340 1341 /* 1342 * Acquire the page lock to serialise THP migrations but avoid dropping 1343 * page_table_lock if at all possible 1344 */ 1345 page_locked = trylock_page(page); 1346 target_nid = mpol_misplaced(page, vma, haddr); 1347 if (target_nid == -1) { 1348 /* If the page was locked, there are no parallel migrations */ 1349 if (page_locked) 1350 goto clear_pmdnuma; 1351 } 1352 1353 /* Migration could have started since the pmd_trans_migrating check */ 1354 if (!page_locked) { 1355 spin_unlock(ptl); 1356 wait_on_page_locked(page); 1357 page_nid = -1; 1358 goto out; 1359 } 1360 1361 /* 1362 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma 1363 * to serialises splits 1364 */ 1365 get_page(page); 1366 spin_unlock(ptl); 1367 anon_vma = page_lock_anon_vma_read(page); 1368 1369 /* Confirm the PMD did not change while page_table_lock was released */ 1370 spin_lock(ptl); 1371 if (unlikely(!pmd_same(pmd, *pmdp))) { 1372 unlock_page(page); 1373 put_page(page); 1374 page_nid = -1; 1375 goto out_unlock; 1376 } 1377 1378 /* Bail if we fail to protect against THP splits for any reason */ 1379 if (unlikely(!anon_vma)) { 1380 put_page(page); 1381 page_nid = -1; 1382 goto clear_pmdnuma; 1383 } 1384 1385 /* 1386 * Migrate the THP to the requested node, returns with page unlocked 1387 * and pmd_numa cleared. 1388 */ 1389 spin_unlock(ptl); 1390 migrated = migrate_misplaced_transhuge_page(mm, vma, 1391 pmdp, pmd, addr, page, target_nid); 1392 if (migrated) { 1393 flags |= TNF_MIGRATED; 1394 page_nid = target_nid; 1395 } 1396 1397 goto out; 1398 clear_pmdnuma: 1399 BUG_ON(!PageLocked(page)); 1400 pmd = pmd_mknonnuma(pmd); 1401 set_pmd_at(mm, haddr, pmdp, pmd); 1402 VM_BUG_ON(pmd_numa(*pmdp)); 1403 update_mmu_cache_pmd(vma, addr, pmdp); 1404 unlock_page(page); 1405 out_unlock: 1406 spin_unlock(ptl); 1407 1408 out: 1409 if (anon_vma) 1410 page_unlock_anon_vma_read(anon_vma); 1411 1412 if (page_nid != -1) 1413 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); 1414 1415 return 0; 1416 } 1417 1418 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1419 pmd_t *pmd, unsigned long addr) 1420 { 1421 spinlock_t *ptl; 1422 int ret = 0; 1423 1424 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1425 struct page *page; 1426 pgtable_t pgtable; 1427 pmd_t orig_pmd; 1428 /* 1429 * For architectures like ppc64 we look at deposited pgtable 1430 * when calling pmdp_get_and_clear. So do the 1431 * pgtable_trans_huge_withdraw after finishing pmdp related 1432 * operations. 1433 */ 1434 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); 1435 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1436 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); 1437 if (is_huge_zero_pmd(orig_pmd)) { 1438 atomic_long_dec(&tlb->mm->nr_ptes); 1439 spin_unlock(ptl); 1440 put_huge_zero_page(); 1441 } else { 1442 page = pmd_page(orig_pmd); 1443 page_remove_rmap(page); 1444 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1445 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1446 VM_BUG_ON_PAGE(!PageHead(page), page); 1447 atomic_long_dec(&tlb->mm->nr_ptes); 1448 spin_unlock(ptl); 1449 tlb_remove_page(tlb, page); 1450 } 1451 pte_free(tlb->mm, pgtable); 1452 ret = 1; 1453 } 1454 return ret; 1455 } 1456 1457 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1458 unsigned long addr, unsigned long end, 1459 unsigned char *vec) 1460 { 1461 spinlock_t *ptl; 1462 int ret = 0; 1463 1464 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1465 /* 1466 * All logical pages in the range are present 1467 * if backed by a huge page. 1468 */ 1469 spin_unlock(ptl); 1470 memset(vec, 1, (end - addr) >> PAGE_SHIFT); 1471 ret = 1; 1472 } 1473 1474 return ret; 1475 } 1476 1477 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 1478 unsigned long old_addr, 1479 unsigned long new_addr, unsigned long old_end, 1480 pmd_t *old_pmd, pmd_t *new_pmd) 1481 { 1482 spinlock_t *old_ptl, *new_ptl; 1483 int ret = 0; 1484 pmd_t pmd; 1485 1486 struct mm_struct *mm = vma->vm_mm; 1487 1488 if ((old_addr & ~HPAGE_PMD_MASK) || 1489 (new_addr & ~HPAGE_PMD_MASK) || 1490 old_end - old_addr < HPAGE_PMD_SIZE || 1491 (new_vma->vm_flags & VM_NOHUGEPAGE)) 1492 goto out; 1493 1494 /* 1495 * The destination pmd shouldn't be established, free_pgtables() 1496 * should have release it. 1497 */ 1498 if (WARN_ON(!pmd_none(*new_pmd))) { 1499 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 1500 goto out; 1501 } 1502 1503 /* 1504 * We don't have to worry about the ordering of src and dst 1505 * ptlocks because exclusive mmap_sem prevents deadlock. 1506 */ 1507 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); 1508 if (ret == 1) { 1509 new_ptl = pmd_lockptr(mm, new_pmd); 1510 if (new_ptl != old_ptl) 1511 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1512 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 1513 VM_BUG_ON(!pmd_none(*new_pmd)); 1514 1515 if (pmd_move_must_withdraw(new_ptl, old_ptl)) { 1516 pgtable_t pgtable; 1517 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1518 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1519 } 1520 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1521 if (new_ptl != old_ptl) 1522 spin_unlock(new_ptl); 1523 spin_unlock(old_ptl); 1524 } 1525 out: 1526 return ret; 1527 } 1528 1529 /* 1530 * Returns 1531 * - 0 if PMD could not be locked 1532 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary 1533 * - HPAGE_PMD_NR is protections changed and TLB flush necessary 1534 */ 1535 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1536 unsigned long addr, pgprot_t newprot, int prot_numa) 1537 { 1538 struct mm_struct *mm = vma->vm_mm; 1539 spinlock_t *ptl; 1540 int ret = 0; 1541 1542 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1543 pmd_t entry; 1544 ret = 1; 1545 if (!prot_numa) { 1546 entry = pmdp_get_and_clear(mm, addr, pmd); 1547 if (pmd_numa(entry)) 1548 entry = pmd_mknonnuma(entry); 1549 entry = pmd_modify(entry, newprot); 1550 ret = HPAGE_PMD_NR; 1551 set_pmd_at(mm, addr, pmd, entry); 1552 BUG_ON(pmd_write(entry)); 1553 } else { 1554 struct page *page = pmd_page(*pmd); 1555 1556 /* 1557 * Do not trap faults against the zero page. The 1558 * read-only data is likely to be read-cached on the 1559 * local CPU cache and it is less useful to know about 1560 * local vs remote hits on the zero page. 1561 */ 1562 if (!is_huge_zero_page(page) && 1563 !pmd_numa(*pmd)) { 1564 pmdp_set_numa(mm, addr, pmd); 1565 ret = HPAGE_PMD_NR; 1566 } 1567 } 1568 spin_unlock(ptl); 1569 } 1570 1571 return ret; 1572 } 1573 1574 /* 1575 * Returns 1 if a given pmd maps a stable (not under splitting) thp. 1576 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. 1577 * 1578 * Note that if it returns 1, this routine returns without unlocking page 1579 * table locks. So callers must unlock them. 1580 */ 1581 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 1582 spinlock_t **ptl) 1583 { 1584 *ptl = pmd_lock(vma->vm_mm, pmd); 1585 if (likely(pmd_trans_huge(*pmd))) { 1586 if (unlikely(pmd_trans_splitting(*pmd))) { 1587 spin_unlock(*ptl); 1588 wait_split_huge_page(vma->anon_vma, pmd); 1589 return -1; 1590 } else { 1591 /* Thp mapped by 'pmd' is stable, so we can 1592 * handle it as it is. */ 1593 return 1; 1594 } 1595 } 1596 spin_unlock(*ptl); 1597 return 0; 1598 } 1599 1600 /* 1601 * This function returns whether a given @page is mapped onto the @address 1602 * in the virtual space of @mm. 1603 * 1604 * When it's true, this function returns *pmd with holding the page table lock 1605 * and passing it back to the caller via @ptl. 1606 * If it's false, returns NULL without holding the page table lock. 1607 */ 1608 pmd_t *page_check_address_pmd(struct page *page, 1609 struct mm_struct *mm, 1610 unsigned long address, 1611 enum page_check_address_pmd_flag flag, 1612 spinlock_t **ptl) 1613 { 1614 pmd_t *pmd; 1615 1616 if (address & ~HPAGE_PMD_MASK) 1617 return NULL; 1618 1619 pmd = mm_find_pmd(mm, address); 1620 if (!pmd) 1621 return NULL; 1622 *ptl = pmd_lock(mm, pmd); 1623 if (pmd_none(*pmd)) 1624 goto unlock; 1625 if (pmd_page(*pmd) != page) 1626 goto unlock; 1627 /* 1628 * split_vma() may create temporary aliased mappings. There is 1629 * no risk as long as all huge pmd are found and have their 1630 * splitting bit set before __split_huge_page_refcount 1631 * runs. Finding the same huge pmd more than once during the 1632 * same rmap walk is not a problem. 1633 */ 1634 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 1635 pmd_trans_splitting(*pmd)) 1636 goto unlock; 1637 if (pmd_trans_huge(*pmd)) { 1638 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 1639 !pmd_trans_splitting(*pmd)); 1640 return pmd; 1641 } 1642 unlock: 1643 spin_unlock(*ptl); 1644 return NULL; 1645 } 1646 1647 static int __split_huge_page_splitting(struct page *page, 1648 struct vm_area_struct *vma, 1649 unsigned long address) 1650 { 1651 struct mm_struct *mm = vma->vm_mm; 1652 spinlock_t *ptl; 1653 pmd_t *pmd; 1654 int ret = 0; 1655 /* For mmu_notifiers */ 1656 const unsigned long mmun_start = address; 1657 const unsigned long mmun_end = address + HPAGE_PMD_SIZE; 1658 1659 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1660 pmd = page_check_address_pmd(page, mm, address, 1661 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl); 1662 if (pmd) { 1663 /* 1664 * We can't temporarily set the pmd to null in order 1665 * to split it, the pmd must remain marked huge at all 1666 * times or the VM won't take the pmd_trans_huge paths 1667 * and it won't wait on the anon_vma->root->rwsem to 1668 * serialize against split_huge_page*. 1669 */ 1670 pmdp_splitting_flush(vma, address, pmd); 1671 ret = 1; 1672 spin_unlock(ptl); 1673 } 1674 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1675 1676 return ret; 1677 } 1678 1679 static void __split_huge_page_refcount(struct page *page, 1680 struct list_head *list) 1681 { 1682 int i; 1683 struct zone *zone = page_zone(page); 1684 struct lruvec *lruvec; 1685 int tail_count = 0; 1686 1687 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1688 spin_lock_irq(&zone->lru_lock); 1689 lruvec = mem_cgroup_page_lruvec(page, zone); 1690 1691 compound_lock(page); 1692 /* complete memcg works before add pages to LRU */ 1693 mem_cgroup_split_huge_fixup(page); 1694 1695 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 1696 struct page *page_tail = page + i; 1697 1698 /* tail_page->_mapcount cannot change */ 1699 BUG_ON(page_mapcount(page_tail) < 0); 1700 tail_count += page_mapcount(page_tail); 1701 /* check for overflow */ 1702 BUG_ON(tail_count < 0); 1703 BUG_ON(atomic_read(&page_tail->_count) != 0); 1704 /* 1705 * tail_page->_count is zero and not changing from 1706 * under us. But get_page_unless_zero() may be running 1707 * from under us on the tail_page. If we used 1708 * atomic_set() below instead of atomic_add(), we 1709 * would then run atomic_set() concurrently with 1710 * get_page_unless_zero(), and atomic_set() is 1711 * implemented in C not using locked ops. spin_unlock 1712 * on x86 sometime uses locked ops because of PPro 1713 * errata 66, 92, so unless somebody can guarantee 1714 * atomic_set() here would be safe on all archs (and 1715 * not only on x86), it's safer to use atomic_add(). 1716 */ 1717 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, 1718 &page_tail->_count); 1719 1720 /* after clearing PageTail the gup refcount can be released */ 1721 smp_mb(); 1722 1723 /* 1724 * retain hwpoison flag of the poisoned tail page: 1725 * fix for the unsuitable process killed on Guest Machine(KVM) 1726 * by the memory-failure. 1727 */ 1728 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; 1729 page_tail->flags |= (page->flags & 1730 ((1L << PG_referenced) | 1731 (1L << PG_swapbacked) | 1732 (1L << PG_mlocked) | 1733 (1L << PG_uptodate) | 1734 (1L << PG_active) | 1735 (1L << PG_unevictable))); 1736 page_tail->flags |= (1L << PG_dirty); 1737 1738 /* clear PageTail before overwriting first_page */ 1739 smp_wmb(); 1740 1741 /* 1742 * __split_huge_page_splitting() already set the 1743 * splitting bit in all pmd that could map this 1744 * hugepage, that will ensure no CPU can alter the 1745 * mapcount on the head page. The mapcount is only 1746 * accounted in the head page and it has to be 1747 * transferred to all tail pages in the below code. So 1748 * for this code to be safe, the split the mapcount 1749 * can't change. But that doesn't mean userland can't 1750 * keep changing and reading the page contents while 1751 * we transfer the mapcount, so the pmd splitting 1752 * status is achieved setting a reserved bit in the 1753 * pmd, not by clearing the present bit. 1754 */ 1755 page_tail->_mapcount = page->_mapcount; 1756 1757 BUG_ON(page_tail->mapping); 1758 page_tail->mapping = page->mapping; 1759 1760 page_tail->index = page->index + i; 1761 page_cpupid_xchg_last(page_tail, page_cpupid_last(page)); 1762 1763 BUG_ON(!PageAnon(page_tail)); 1764 BUG_ON(!PageUptodate(page_tail)); 1765 BUG_ON(!PageDirty(page_tail)); 1766 BUG_ON(!PageSwapBacked(page_tail)); 1767 1768 lru_add_page_tail(page, page_tail, lruvec, list); 1769 } 1770 atomic_sub(tail_count, &page->_count); 1771 BUG_ON(atomic_read(&page->_count) <= 0); 1772 1773 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); 1774 1775 ClearPageCompound(page); 1776 compound_unlock(page); 1777 spin_unlock_irq(&zone->lru_lock); 1778 1779 for (i = 1; i < HPAGE_PMD_NR; i++) { 1780 struct page *page_tail = page + i; 1781 BUG_ON(page_count(page_tail) <= 0); 1782 /* 1783 * Tail pages may be freed if there wasn't any mapping 1784 * like if add_to_swap() is running on a lru page that 1785 * had its mapping zapped. And freeing these pages 1786 * requires taking the lru_lock so we do the put_page 1787 * of the tail pages after the split is complete. 1788 */ 1789 put_page(page_tail); 1790 } 1791 1792 /* 1793 * Only the head page (now become a regular page) is required 1794 * to be pinned by the caller. 1795 */ 1796 BUG_ON(page_count(page) <= 0); 1797 } 1798 1799 static int __split_huge_page_map(struct page *page, 1800 struct vm_area_struct *vma, 1801 unsigned long address) 1802 { 1803 struct mm_struct *mm = vma->vm_mm; 1804 spinlock_t *ptl; 1805 pmd_t *pmd, _pmd; 1806 int ret = 0, i; 1807 pgtable_t pgtable; 1808 unsigned long haddr; 1809 1810 pmd = page_check_address_pmd(page, mm, address, 1811 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl); 1812 if (pmd) { 1813 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1814 pmd_populate(mm, &_pmd, pgtable); 1815 1816 haddr = address; 1817 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1818 pte_t *pte, entry; 1819 BUG_ON(PageCompound(page+i)); 1820 entry = mk_pte(page + i, vma->vm_page_prot); 1821 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1822 if (!pmd_write(*pmd)) 1823 entry = pte_wrprotect(entry); 1824 else 1825 BUG_ON(page_mapcount(page) != 1); 1826 if (!pmd_young(*pmd)) 1827 entry = pte_mkold(entry); 1828 if (pmd_numa(*pmd)) 1829 entry = pte_mknuma(entry); 1830 pte = pte_offset_map(&_pmd, haddr); 1831 BUG_ON(!pte_none(*pte)); 1832 set_pte_at(mm, haddr, pte, entry); 1833 pte_unmap(pte); 1834 } 1835 1836 smp_wmb(); /* make pte visible before pmd */ 1837 /* 1838 * Up to this point the pmd is present and huge and 1839 * userland has the whole access to the hugepage 1840 * during the split (which happens in place). If we 1841 * overwrite the pmd with the not-huge version 1842 * pointing to the pte here (which of course we could 1843 * if all CPUs were bug free), userland could trigger 1844 * a small page size TLB miss on the small sized TLB 1845 * while the hugepage TLB entry is still established 1846 * in the huge TLB. Some CPU doesn't like that. See 1847 * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 1848 * Erratum 383 on page 93. Intel should be safe but is 1849 * also warns that it's only safe if the permission 1850 * and cache attributes of the two entries loaded in 1851 * the two TLB is identical (which should be the case 1852 * here). But it is generally safer to never allow 1853 * small and huge TLB entries for the same virtual 1854 * address to be loaded simultaneously. So instead of 1855 * doing "pmd_populate(); flush_tlb_range();" we first 1856 * mark the current pmd notpresent (atomically because 1857 * here the pmd_trans_huge and pmd_trans_splitting 1858 * must remain set at all times on the pmd until the 1859 * split is complete for this pmd), then we flush the 1860 * SMP TLB and finally we write the non-huge version 1861 * of the pmd entry with pmd_populate. 1862 */ 1863 pmdp_invalidate(vma, address, pmd); 1864 pmd_populate(mm, pmd, pgtable); 1865 ret = 1; 1866 spin_unlock(ptl); 1867 } 1868 1869 return ret; 1870 } 1871 1872 /* must be called with anon_vma->root->rwsem held */ 1873 static void __split_huge_page(struct page *page, 1874 struct anon_vma *anon_vma, 1875 struct list_head *list) 1876 { 1877 int mapcount, mapcount2; 1878 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1879 struct anon_vma_chain *avc; 1880 1881 BUG_ON(!PageHead(page)); 1882 BUG_ON(PageTail(page)); 1883 1884 mapcount = 0; 1885 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1886 struct vm_area_struct *vma = avc->vma; 1887 unsigned long addr = vma_address(page, vma); 1888 BUG_ON(is_vma_temporary_stack(vma)); 1889 mapcount += __split_huge_page_splitting(page, vma, addr); 1890 } 1891 /* 1892 * It is critical that new vmas are added to the tail of the 1893 * anon_vma list. This guarantes that if copy_huge_pmd() runs 1894 * and establishes a child pmd before 1895 * __split_huge_page_splitting() freezes the parent pmd (so if 1896 * we fail to prevent copy_huge_pmd() from running until the 1897 * whole __split_huge_page() is complete), we will still see 1898 * the newly established pmd of the child later during the 1899 * walk, to be able to set it as pmd_trans_splitting too. 1900 */ 1901 if (mapcount != page_mapcount(page)) 1902 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1903 mapcount, page_mapcount(page)); 1904 BUG_ON(mapcount != page_mapcount(page)); 1905 1906 __split_huge_page_refcount(page, list); 1907 1908 mapcount2 = 0; 1909 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1910 struct vm_area_struct *vma = avc->vma; 1911 unsigned long addr = vma_address(page, vma); 1912 BUG_ON(is_vma_temporary_stack(vma)); 1913 mapcount2 += __split_huge_page_map(page, vma, addr); 1914 } 1915 if (mapcount != mapcount2) 1916 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", 1917 mapcount, mapcount2, page_mapcount(page)); 1918 BUG_ON(mapcount != mapcount2); 1919 } 1920 1921 /* 1922 * Split a hugepage into normal pages. This doesn't change the position of head 1923 * page. If @list is null, tail pages will be added to LRU list, otherwise, to 1924 * @list. Both head page and tail pages will inherit mapping, flags, and so on 1925 * from the hugepage. 1926 * Return 0 if the hugepage is split successfully otherwise return 1. 1927 */ 1928 int split_huge_page_to_list(struct page *page, struct list_head *list) 1929 { 1930 struct anon_vma *anon_vma; 1931 int ret = 1; 1932 1933 BUG_ON(is_huge_zero_page(page)); 1934 BUG_ON(!PageAnon(page)); 1935 1936 /* 1937 * The caller does not necessarily hold an mmap_sem that would prevent 1938 * the anon_vma disappearing so we first we take a reference to it 1939 * and then lock the anon_vma for write. This is similar to 1940 * page_lock_anon_vma_read except the write lock is taken to serialise 1941 * against parallel split or collapse operations. 1942 */ 1943 anon_vma = page_get_anon_vma(page); 1944 if (!anon_vma) 1945 goto out; 1946 anon_vma_lock_write(anon_vma); 1947 1948 ret = 0; 1949 if (!PageCompound(page)) 1950 goto out_unlock; 1951 1952 BUG_ON(!PageSwapBacked(page)); 1953 __split_huge_page(page, anon_vma, list); 1954 count_vm_event(THP_SPLIT); 1955 1956 BUG_ON(PageCompound(page)); 1957 out_unlock: 1958 anon_vma_unlock_write(anon_vma); 1959 put_anon_vma(anon_vma); 1960 out: 1961 return ret; 1962 } 1963 1964 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) 1965 1966 int hugepage_madvise(struct vm_area_struct *vma, 1967 unsigned long *vm_flags, int advice) 1968 { 1969 struct mm_struct *mm = vma->vm_mm; 1970 1971 switch (advice) { 1972 case MADV_HUGEPAGE: 1973 /* 1974 * Be somewhat over-protective like KSM for now! 1975 */ 1976 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 1977 return -EINVAL; 1978 if (mm->def_flags & VM_NOHUGEPAGE) 1979 return -EINVAL; 1980 *vm_flags &= ~VM_NOHUGEPAGE; 1981 *vm_flags |= VM_HUGEPAGE; 1982 /* 1983 * If the vma become good for khugepaged to scan, 1984 * register it here without waiting a page fault that 1985 * may not happen any time soon. 1986 */ 1987 if (unlikely(khugepaged_enter_vma_merge(vma))) 1988 return -ENOMEM; 1989 break; 1990 case MADV_NOHUGEPAGE: 1991 /* 1992 * Be somewhat over-protective like KSM for now! 1993 */ 1994 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 1995 return -EINVAL; 1996 *vm_flags &= ~VM_HUGEPAGE; 1997 *vm_flags |= VM_NOHUGEPAGE; 1998 /* 1999 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 2000 * this vma even if we leave the mm registered in khugepaged if 2001 * it got registered before VM_NOHUGEPAGE was set. 2002 */ 2003 break; 2004 } 2005 2006 return 0; 2007 } 2008 2009 static int __init khugepaged_slab_init(void) 2010 { 2011 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 2012 sizeof(struct mm_slot), 2013 __alignof__(struct mm_slot), 0, NULL); 2014 if (!mm_slot_cache) 2015 return -ENOMEM; 2016 2017 return 0; 2018 } 2019 2020 static inline struct mm_slot *alloc_mm_slot(void) 2021 { 2022 if (!mm_slot_cache) /* initialization failed */ 2023 return NULL; 2024 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 2025 } 2026 2027 static inline void free_mm_slot(struct mm_slot *mm_slot) 2028 { 2029 kmem_cache_free(mm_slot_cache, mm_slot); 2030 } 2031 2032 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 2033 { 2034 struct mm_slot *mm_slot; 2035 2036 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) 2037 if (mm == mm_slot->mm) 2038 return mm_slot; 2039 2040 return NULL; 2041 } 2042 2043 static void insert_to_mm_slots_hash(struct mm_struct *mm, 2044 struct mm_slot *mm_slot) 2045 { 2046 mm_slot->mm = mm; 2047 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); 2048 } 2049 2050 static inline int khugepaged_test_exit(struct mm_struct *mm) 2051 { 2052 return atomic_read(&mm->mm_users) == 0; 2053 } 2054 2055 int __khugepaged_enter(struct mm_struct *mm) 2056 { 2057 struct mm_slot *mm_slot; 2058 int wakeup; 2059 2060 mm_slot = alloc_mm_slot(); 2061 if (!mm_slot) 2062 return -ENOMEM; 2063 2064 /* __khugepaged_exit() must not run from under us */ 2065 VM_BUG_ON(khugepaged_test_exit(mm)); 2066 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 2067 free_mm_slot(mm_slot); 2068 return 0; 2069 } 2070 2071 spin_lock(&khugepaged_mm_lock); 2072 insert_to_mm_slots_hash(mm, mm_slot); 2073 /* 2074 * Insert just behind the scanning cursor, to let the area settle 2075 * down a little. 2076 */ 2077 wakeup = list_empty(&khugepaged_scan.mm_head); 2078 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 2079 spin_unlock(&khugepaged_mm_lock); 2080 2081 atomic_inc(&mm->mm_count); 2082 if (wakeup) 2083 wake_up_interruptible(&khugepaged_wait); 2084 2085 return 0; 2086 } 2087 2088 int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 2089 { 2090 unsigned long hstart, hend; 2091 if (!vma->anon_vma) 2092 /* 2093 * Not yet faulted in so we will register later in the 2094 * page fault if needed. 2095 */ 2096 return 0; 2097 if (vma->vm_ops) 2098 /* khugepaged not yet working on file or special mappings */ 2099 return 0; 2100 VM_BUG_ON(vma->vm_flags & VM_NO_THP); 2101 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2102 hend = vma->vm_end & HPAGE_PMD_MASK; 2103 if (hstart < hend) 2104 return khugepaged_enter(vma); 2105 return 0; 2106 } 2107 2108 void __khugepaged_exit(struct mm_struct *mm) 2109 { 2110 struct mm_slot *mm_slot; 2111 int free = 0; 2112 2113 spin_lock(&khugepaged_mm_lock); 2114 mm_slot = get_mm_slot(mm); 2115 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 2116 hash_del(&mm_slot->hash); 2117 list_del(&mm_slot->mm_node); 2118 free = 1; 2119 } 2120 spin_unlock(&khugepaged_mm_lock); 2121 2122 if (free) { 2123 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2124 free_mm_slot(mm_slot); 2125 mmdrop(mm); 2126 } else if (mm_slot) { 2127 /* 2128 * This is required to serialize against 2129 * khugepaged_test_exit() (which is guaranteed to run 2130 * under mmap sem read mode). Stop here (after we 2131 * return all pagetables will be destroyed) until 2132 * khugepaged has finished working on the pagetables 2133 * under the mmap_sem. 2134 */ 2135 down_write(&mm->mmap_sem); 2136 up_write(&mm->mmap_sem); 2137 } 2138 } 2139 2140 static void release_pte_page(struct page *page) 2141 { 2142 /* 0 stands for page_is_file_cache(page) == false */ 2143 dec_zone_page_state(page, NR_ISOLATED_ANON + 0); 2144 unlock_page(page); 2145 putback_lru_page(page); 2146 } 2147 2148 static void release_pte_pages(pte_t *pte, pte_t *_pte) 2149 { 2150 while (--_pte >= pte) { 2151 pte_t pteval = *_pte; 2152 if (!pte_none(pteval)) 2153 release_pte_page(pte_page(pteval)); 2154 } 2155 } 2156 2157 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 2158 unsigned long address, 2159 pte_t *pte) 2160 { 2161 struct page *page; 2162 pte_t *_pte; 2163 int referenced = 0, none = 0; 2164 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2165 _pte++, address += PAGE_SIZE) { 2166 pte_t pteval = *_pte; 2167 if (pte_none(pteval)) { 2168 if (++none <= khugepaged_max_ptes_none) 2169 continue; 2170 else 2171 goto out; 2172 } 2173 if (!pte_present(pteval) || !pte_write(pteval)) 2174 goto out; 2175 page = vm_normal_page(vma, address, pteval); 2176 if (unlikely(!page)) 2177 goto out; 2178 2179 VM_BUG_ON_PAGE(PageCompound(page), page); 2180 VM_BUG_ON_PAGE(!PageAnon(page), page); 2181 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 2182 2183 /* cannot use mapcount: can't collapse if there's a gup pin */ 2184 if (page_count(page) != 1) 2185 goto out; 2186 /* 2187 * We can do it before isolate_lru_page because the 2188 * page can't be freed from under us. NOTE: PG_lock 2189 * is needed to serialize against split_huge_page 2190 * when invoked from the VM. 2191 */ 2192 if (!trylock_page(page)) 2193 goto out; 2194 /* 2195 * Isolate the page to avoid collapsing an hugepage 2196 * currently in use by the VM. 2197 */ 2198 if (isolate_lru_page(page)) { 2199 unlock_page(page); 2200 goto out; 2201 } 2202 /* 0 stands for page_is_file_cache(page) == false */ 2203 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2204 VM_BUG_ON_PAGE(!PageLocked(page), page); 2205 VM_BUG_ON_PAGE(PageLRU(page), page); 2206 2207 /* If there is no mapped pte young don't collapse the page */ 2208 if (pte_young(pteval) || PageReferenced(page) || 2209 mmu_notifier_test_young(vma->vm_mm, address)) 2210 referenced = 1; 2211 } 2212 if (likely(referenced)) 2213 return 1; 2214 out: 2215 release_pte_pages(pte, _pte); 2216 return 0; 2217 } 2218 2219 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 2220 struct vm_area_struct *vma, 2221 unsigned long address, 2222 spinlock_t *ptl) 2223 { 2224 pte_t *_pte; 2225 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 2226 pte_t pteval = *_pte; 2227 struct page *src_page; 2228 2229 if (pte_none(pteval)) { 2230 clear_user_highpage(page, address); 2231 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 2232 } else { 2233 src_page = pte_page(pteval); 2234 copy_user_highpage(page, src_page, address, vma); 2235 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); 2236 release_pte_page(src_page); 2237 /* 2238 * ptl mostly unnecessary, but preempt has to 2239 * be disabled to update the per-cpu stats 2240 * inside page_remove_rmap(). 2241 */ 2242 spin_lock(ptl); 2243 /* 2244 * paravirt calls inside pte_clear here are 2245 * superfluous. 2246 */ 2247 pte_clear(vma->vm_mm, address, _pte); 2248 page_remove_rmap(src_page); 2249 spin_unlock(ptl); 2250 free_page_and_swap_cache(src_page); 2251 } 2252 2253 address += PAGE_SIZE; 2254 page++; 2255 } 2256 } 2257 2258 static void khugepaged_alloc_sleep(void) 2259 { 2260 wait_event_freezable_timeout(khugepaged_wait, false, 2261 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 2262 } 2263 2264 static int khugepaged_node_load[MAX_NUMNODES]; 2265 2266 #ifdef CONFIG_NUMA 2267 static int khugepaged_find_target_node(void) 2268 { 2269 static int last_khugepaged_target_node = NUMA_NO_NODE; 2270 int nid, target_node = 0, max_value = 0; 2271 2272 /* find first node with max normal pages hit */ 2273 for (nid = 0; nid < MAX_NUMNODES; nid++) 2274 if (khugepaged_node_load[nid] > max_value) { 2275 max_value = khugepaged_node_load[nid]; 2276 target_node = nid; 2277 } 2278 2279 /* do some balance if several nodes have the same hit record */ 2280 if (target_node <= last_khugepaged_target_node) 2281 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; 2282 nid++) 2283 if (max_value == khugepaged_node_load[nid]) { 2284 target_node = nid; 2285 break; 2286 } 2287 2288 last_khugepaged_target_node = target_node; 2289 return target_node; 2290 } 2291 2292 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 2293 { 2294 if (IS_ERR(*hpage)) { 2295 if (!*wait) 2296 return false; 2297 2298 *wait = false; 2299 *hpage = NULL; 2300 khugepaged_alloc_sleep(); 2301 } else if (*hpage) { 2302 put_page(*hpage); 2303 *hpage = NULL; 2304 } 2305 2306 return true; 2307 } 2308 2309 static struct page 2310 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 2311 struct vm_area_struct *vma, unsigned long address, 2312 int node) 2313 { 2314 VM_BUG_ON_PAGE(*hpage, *hpage); 2315 /* 2316 * Allocate the page while the vma is still valid and under 2317 * the mmap_sem read mode so there is no memory allocation 2318 * later when we take the mmap_sem in write mode. This is more 2319 * friendly behavior (OTOH it may actually hide bugs) to 2320 * filesystems in userland with daemons allocating memory in 2321 * the userland I/O paths. Allocating memory with the 2322 * mmap_sem in read mode is good idea also to allow greater 2323 * scalability. 2324 */ 2325 *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask( 2326 khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER); 2327 /* 2328 * After allocating the hugepage, release the mmap_sem read lock in 2329 * preparation for taking it in write mode. 2330 */ 2331 up_read(&mm->mmap_sem); 2332 if (unlikely(!*hpage)) { 2333 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2334 *hpage = ERR_PTR(-ENOMEM); 2335 return NULL; 2336 } 2337 2338 count_vm_event(THP_COLLAPSE_ALLOC); 2339 return *hpage; 2340 } 2341 #else 2342 static int khugepaged_find_target_node(void) 2343 { 2344 return 0; 2345 } 2346 2347 static inline struct page *alloc_hugepage(int defrag) 2348 { 2349 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), 2350 HPAGE_PMD_ORDER); 2351 } 2352 2353 static struct page *khugepaged_alloc_hugepage(bool *wait) 2354 { 2355 struct page *hpage; 2356 2357 do { 2358 hpage = alloc_hugepage(khugepaged_defrag()); 2359 if (!hpage) { 2360 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2361 if (!*wait) 2362 return NULL; 2363 2364 *wait = false; 2365 khugepaged_alloc_sleep(); 2366 } else 2367 count_vm_event(THP_COLLAPSE_ALLOC); 2368 } while (unlikely(!hpage) && likely(khugepaged_enabled())); 2369 2370 return hpage; 2371 } 2372 2373 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) 2374 { 2375 if (!*hpage) 2376 *hpage = khugepaged_alloc_hugepage(wait); 2377 2378 if (unlikely(!*hpage)) 2379 return false; 2380 2381 return true; 2382 } 2383 2384 static struct page 2385 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, 2386 struct vm_area_struct *vma, unsigned long address, 2387 int node) 2388 { 2389 up_read(&mm->mmap_sem); 2390 VM_BUG_ON(!*hpage); 2391 return *hpage; 2392 } 2393 #endif 2394 2395 static bool hugepage_vma_check(struct vm_area_struct *vma) 2396 { 2397 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 2398 (vma->vm_flags & VM_NOHUGEPAGE)) 2399 return false; 2400 2401 if (!vma->anon_vma || vma->vm_ops) 2402 return false; 2403 if (is_vma_temporary_stack(vma)) 2404 return false; 2405 VM_BUG_ON(vma->vm_flags & VM_NO_THP); 2406 return true; 2407 } 2408 2409 static void collapse_huge_page(struct mm_struct *mm, 2410 unsigned long address, 2411 struct page **hpage, 2412 struct vm_area_struct *vma, 2413 int node) 2414 { 2415 pmd_t *pmd, _pmd; 2416 pte_t *pte; 2417 pgtable_t pgtable; 2418 struct page *new_page; 2419 spinlock_t *pmd_ptl, *pte_ptl; 2420 int isolated; 2421 unsigned long hstart, hend; 2422 unsigned long mmun_start; /* For mmu_notifiers */ 2423 unsigned long mmun_end; /* For mmu_notifiers */ 2424 2425 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2426 2427 /* release the mmap_sem read lock. */ 2428 new_page = khugepaged_alloc_page(hpage, mm, vma, address, node); 2429 if (!new_page) 2430 return; 2431 2432 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) 2433 return; 2434 2435 /* 2436 * Prevent all access to pagetables with the exception of 2437 * gup_fast later hanlded by the ptep_clear_flush and the VM 2438 * handled by the anon_vma lock + PG_lock. 2439 */ 2440 down_write(&mm->mmap_sem); 2441 if (unlikely(khugepaged_test_exit(mm))) 2442 goto out; 2443 2444 vma = find_vma(mm, address); 2445 if (!vma) 2446 goto out; 2447 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2448 hend = vma->vm_end & HPAGE_PMD_MASK; 2449 if (address < hstart || address + HPAGE_PMD_SIZE > hend) 2450 goto out; 2451 if (!hugepage_vma_check(vma)) 2452 goto out; 2453 pmd = mm_find_pmd(mm, address); 2454 if (!pmd) 2455 goto out; 2456 if (pmd_trans_huge(*pmd)) 2457 goto out; 2458 2459 anon_vma_lock_write(vma->anon_vma); 2460 2461 pte = pte_offset_map(pmd, address); 2462 pte_ptl = pte_lockptr(mm, pmd); 2463 2464 mmun_start = address; 2465 mmun_end = address + HPAGE_PMD_SIZE; 2466 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2467 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 2468 /* 2469 * After this gup_fast can't run anymore. This also removes 2470 * any huge TLB entry from the CPU so we won't allow 2471 * huge and small TLB entries for the same virtual address 2472 * to avoid the risk of CPU bugs in that area. 2473 */ 2474 _pmd = pmdp_clear_flush(vma, address, pmd); 2475 spin_unlock(pmd_ptl); 2476 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2477 2478 spin_lock(pte_ptl); 2479 isolated = __collapse_huge_page_isolate(vma, address, pte); 2480 spin_unlock(pte_ptl); 2481 2482 if (unlikely(!isolated)) { 2483 pte_unmap(pte); 2484 spin_lock(pmd_ptl); 2485 BUG_ON(!pmd_none(*pmd)); 2486 /* 2487 * We can only use set_pmd_at when establishing 2488 * hugepmds and never for establishing regular pmds that 2489 * points to regular pagetables. Use pmd_populate for that 2490 */ 2491 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); 2492 spin_unlock(pmd_ptl); 2493 anon_vma_unlock_write(vma->anon_vma); 2494 goto out; 2495 } 2496 2497 /* 2498 * All pages are isolated and locked so anon_vma rmap 2499 * can't run anymore. 2500 */ 2501 anon_vma_unlock_write(vma->anon_vma); 2502 2503 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); 2504 pte_unmap(pte); 2505 __SetPageUptodate(new_page); 2506 pgtable = pmd_pgtable(_pmd); 2507 2508 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); 2509 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 2510 2511 /* 2512 * spin_lock() below is not the equivalent of smp_wmb(), so 2513 * this is needed to avoid the copy_huge_page writes to become 2514 * visible after the set_pmd_at() write. 2515 */ 2516 smp_wmb(); 2517 2518 spin_lock(pmd_ptl); 2519 BUG_ON(!pmd_none(*pmd)); 2520 page_add_new_anon_rmap(new_page, vma, address); 2521 pgtable_trans_huge_deposit(mm, pmd, pgtable); 2522 set_pmd_at(mm, address, pmd, _pmd); 2523 update_mmu_cache_pmd(vma, address, pmd); 2524 spin_unlock(pmd_ptl); 2525 2526 *hpage = NULL; 2527 2528 khugepaged_pages_collapsed++; 2529 out_up_write: 2530 up_write(&mm->mmap_sem); 2531 return; 2532 2533 out: 2534 mem_cgroup_uncharge_page(new_page); 2535 goto out_up_write; 2536 } 2537 2538 static int khugepaged_scan_pmd(struct mm_struct *mm, 2539 struct vm_area_struct *vma, 2540 unsigned long address, 2541 struct page **hpage) 2542 { 2543 pmd_t *pmd; 2544 pte_t *pte, *_pte; 2545 int ret = 0, referenced = 0, none = 0; 2546 struct page *page; 2547 unsigned long _address; 2548 spinlock_t *ptl; 2549 int node = NUMA_NO_NODE; 2550 2551 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2552 2553 pmd = mm_find_pmd(mm, address); 2554 if (!pmd) 2555 goto out; 2556 if (pmd_trans_huge(*pmd)) 2557 goto out; 2558 2559 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2560 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2561 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 2562 _pte++, _address += PAGE_SIZE) { 2563 pte_t pteval = *_pte; 2564 if (pte_none(pteval)) { 2565 if (++none <= khugepaged_max_ptes_none) 2566 continue; 2567 else 2568 goto out_unmap; 2569 } 2570 if (!pte_present(pteval) || !pte_write(pteval)) 2571 goto out_unmap; 2572 page = vm_normal_page(vma, _address, pteval); 2573 if (unlikely(!page)) 2574 goto out_unmap; 2575 /* 2576 * Record which node the original page is from and save this 2577 * information to khugepaged_node_load[]. 2578 * Khupaged will allocate hugepage from the node has the max 2579 * hit record. 2580 */ 2581 node = page_to_nid(page); 2582 khugepaged_node_load[node]++; 2583 VM_BUG_ON_PAGE(PageCompound(page), page); 2584 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2585 goto out_unmap; 2586 /* cannot use mapcount: can't collapse if there's a gup pin */ 2587 if (page_count(page) != 1) 2588 goto out_unmap; 2589 if (pte_young(pteval) || PageReferenced(page) || 2590 mmu_notifier_test_young(vma->vm_mm, address)) 2591 referenced = 1; 2592 } 2593 if (referenced) 2594 ret = 1; 2595 out_unmap: 2596 pte_unmap_unlock(pte, ptl); 2597 if (ret) { 2598 node = khugepaged_find_target_node(); 2599 /* collapse_huge_page will return with the mmap_sem released */ 2600 collapse_huge_page(mm, address, hpage, vma, node); 2601 } 2602 out: 2603 return ret; 2604 } 2605 2606 static void collect_mm_slot(struct mm_slot *mm_slot) 2607 { 2608 struct mm_struct *mm = mm_slot->mm; 2609 2610 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2611 2612 if (khugepaged_test_exit(mm)) { 2613 /* free mm_slot */ 2614 hash_del(&mm_slot->hash); 2615 list_del(&mm_slot->mm_node); 2616 2617 /* 2618 * Not strictly needed because the mm exited already. 2619 * 2620 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2621 */ 2622 2623 /* khugepaged_mm_lock actually not necessary for the below */ 2624 free_mm_slot(mm_slot); 2625 mmdrop(mm); 2626 } 2627 } 2628 2629 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2630 struct page **hpage) 2631 __releases(&khugepaged_mm_lock) 2632 __acquires(&khugepaged_mm_lock) 2633 { 2634 struct mm_slot *mm_slot; 2635 struct mm_struct *mm; 2636 struct vm_area_struct *vma; 2637 int progress = 0; 2638 2639 VM_BUG_ON(!pages); 2640 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2641 2642 if (khugepaged_scan.mm_slot) 2643 mm_slot = khugepaged_scan.mm_slot; 2644 else { 2645 mm_slot = list_entry(khugepaged_scan.mm_head.next, 2646 struct mm_slot, mm_node); 2647 khugepaged_scan.address = 0; 2648 khugepaged_scan.mm_slot = mm_slot; 2649 } 2650 spin_unlock(&khugepaged_mm_lock); 2651 2652 mm = mm_slot->mm; 2653 down_read(&mm->mmap_sem); 2654 if (unlikely(khugepaged_test_exit(mm))) 2655 vma = NULL; 2656 else 2657 vma = find_vma(mm, khugepaged_scan.address); 2658 2659 progress++; 2660 for (; vma; vma = vma->vm_next) { 2661 unsigned long hstart, hend; 2662 2663 cond_resched(); 2664 if (unlikely(khugepaged_test_exit(mm))) { 2665 progress++; 2666 break; 2667 } 2668 if (!hugepage_vma_check(vma)) { 2669 skip: 2670 progress++; 2671 continue; 2672 } 2673 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2674 hend = vma->vm_end & HPAGE_PMD_MASK; 2675 if (hstart >= hend) 2676 goto skip; 2677 if (khugepaged_scan.address > hend) 2678 goto skip; 2679 if (khugepaged_scan.address < hstart) 2680 khugepaged_scan.address = hstart; 2681 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2682 2683 while (khugepaged_scan.address < hend) { 2684 int ret; 2685 cond_resched(); 2686 if (unlikely(khugepaged_test_exit(mm))) 2687 goto breakouterloop; 2688 2689 VM_BUG_ON(khugepaged_scan.address < hstart || 2690 khugepaged_scan.address + HPAGE_PMD_SIZE > 2691 hend); 2692 ret = khugepaged_scan_pmd(mm, vma, 2693 khugepaged_scan.address, 2694 hpage); 2695 /* move to next address */ 2696 khugepaged_scan.address += HPAGE_PMD_SIZE; 2697 progress += HPAGE_PMD_NR; 2698 if (ret) 2699 /* we released mmap_sem so break loop */ 2700 goto breakouterloop_mmap_sem; 2701 if (progress >= pages) 2702 goto breakouterloop; 2703 } 2704 } 2705 breakouterloop: 2706 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 2707 breakouterloop_mmap_sem: 2708 2709 spin_lock(&khugepaged_mm_lock); 2710 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2711 /* 2712 * Release the current mm_slot if this mm is about to die, or 2713 * if we scanned all vmas of this mm. 2714 */ 2715 if (khugepaged_test_exit(mm) || !vma) { 2716 /* 2717 * Make sure that if mm_users is reaching zero while 2718 * khugepaged runs here, khugepaged_exit will find 2719 * mm_slot not pointing to the exiting mm. 2720 */ 2721 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2722 khugepaged_scan.mm_slot = list_entry( 2723 mm_slot->mm_node.next, 2724 struct mm_slot, mm_node); 2725 khugepaged_scan.address = 0; 2726 } else { 2727 khugepaged_scan.mm_slot = NULL; 2728 khugepaged_full_scans++; 2729 } 2730 2731 collect_mm_slot(mm_slot); 2732 } 2733 2734 return progress; 2735 } 2736 2737 static int khugepaged_has_work(void) 2738 { 2739 return !list_empty(&khugepaged_scan.mm_head) && 2740 khugepaged_enabled(); 2741 } 2742 2743 static int khugepaged_wait_event(void) 2744 { 2745 return !list_empty(&khugepaged_scan.mm_head) || 2746 kthread_should_stop(); 2747 } 2748 2749 static void khugepaged_do_scan(void) 2750 { 2751 struct page *hpage = NULL; 2752 unsigned int progress = 0, pass_through_head = 0; 2753 unsigned int pages = khugepaged_pages_to_scan; 2754 bool wait = true; 2755 2756 barrier(); /* write khugepaged_pages_to_scan to local stack */ 2757 2758 while (progress < pages) { 2759 if (!khugepaged_prealloc_page(&hpage, &wait)) 2760 break; 2761 2762 cond_resched(); 2763 2764 if (unlikely(kthread_should_stop() || freezing(current))) 2765 break; 2766 2767 spin_lock(&khugepaged_mm_lock); 2768 if (!khugepaged_scan.mm_slot) 2769 pass_through_head++; 2770 if (khugepaged_has_work() && 2771 pass_through_head < 2) 2772 progress += khugepaged_scan_mm_slot(pages - progress, 2773 &hpage); 2774 else 2775 progress = pages; 2776 spin_unlock(&khugepaged_mm_lock); 2777 } 2778 2779 if (!IS_ERR_OR_NULL(hpage)) 2780 put_page(hpage); 2781 } 2782 2783 static void khugepaged_wait_work(void) 2784 { 2785 try_to_freeze(); 2786 2787 if (khugepaged_has_work()) { 2788 if (!khugepaged_scan_sleep_millisecs) 2789 return; 2790 2791 wait_event_freezable_timeout(khugepaged_wait, 2792 kthread_should_stop(), 2793 msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); 2794 return; 2795 } 2796 2797 if (khugepaged_enabled()) 2798 wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); 2799 } 2800 2801 static int khugepaged(void *none) 2802 { 2803 struct mm_slot *mm_slot; 2804 2805 set_freezable(); 2806 set_user_nice(current, 19); 2807 2808 while (!kthread_should_stop()) { 2809 khugepaged_do_scan(); 2810 khugepaged_wait_work(); 2811 } 2812 2813 spin_lock(&khugepaged_mm_lock); 2814 mm_slot = khugepaged_scan.mm_slot; 2815 khugepaged_scan.mm_slot = NULL; 2816 if (mm_slot) 2817 collect_mm_slot(mm_slot); 2818 spin_unlock(&khugepaged_mm_lock); 2819 return 0; 2820 } 2821 2822 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, 2823 unsigned long haddr, pmd_t *pmd) 2824 { 2825 struct mm_struct *mm = vma->vm_mm; 2826 pgtable_t pgtable; 2827 pmd_t _pmd; 2828 int i; 2829 2830 pmdp_clear_flush(vma, haddr, pmd); 2831 /* leave pmd empty until pte is filled */ 2832 2833 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2834 pmd_populate(mm, &_pmd, pgtable); 2835 2836 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2837 pte_t *pte, entry; 2838 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); 2839 entry = pte_mkspecial(entry); 2840 pte = pte_offset_map(&_pmd, haddr); 2841 VM_BUG_ON(!pte_none(*pte)); 2842 set_pte_at(mm, haddr, pte, entry); 2843 pte_unmap(pte); 2844 } 2845 smp_wmb(); /* make pte visible before pmd */ 2846 pmd_populate(mm, pmd, pgtable); 2847 put_huge_zero_page(); 2848 } 2849 2850 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, 2851 pmd_t *pmd) 2852 { 2853 spinlock_t *ptl; 2854 struct page *page; 2855 struct mm_struct *mm = vma->vm_mm; 2856 unsigned long haddr = address & HPAGE_PMD_MASK; 2857 unsigned long mmun_start; /* For mmu_notifiers */ 2858 unsigned long mmun_end; /* For mmu_notifiers */ 2859 2860 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); 2861 2862 mmun_start = haddr; 2863 mmun_end = haddr + HPAGE_PMD_SIZE; 2864 again: 2865 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2866 ptl = pmd_lock(mm, pmd); 2867 if (unlikely(!pmd_trans_huge(*pmd))) { 2868 spin_unlock(ptl); 2869 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2870 return; 2871 } 2872 if (is_huge_zero_pmd(*pmd)) { 2873 __split_huge_zero_page_pmd(vma, haddr, pmd); 2874 spin_unlock(ptl); 2875 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2876 return; 2877 } 2878 page = pmd_page(*pmd); 2879 VM_BUG_ON_PAGE(!page_count(page), page); 2880 get_page(page); 2881 spin_unlock(ptl); 2882 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2883 2884 split_huge_page(page); 2885 2886 put_page(page); 2887 2888 /* 2889 * We don't always have down_write of mmap_sem here: a racing 2890 * do_huge_pmd_wp_page() might have copied-on-write to another 2891 * huge page before our split_huge_page() got the anon_vma lock. 2892 */ 2893 if (unlikely(pmd_trans_huge(*pmd))) 2894 goto again; 2895 } 2896 2897 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, 2898 pmd_t *pmd) 2899 { 2900 struct vm_area_struct *vma; 2901 2902 vma = find_vma(mm, address); 2903 BUG_ON(vma == NULL); 2904 split_huge_page_pmd(vma, address, pmd); 2905 } 2906 2907 static void split_huge_page_address(struct mm_struct *mm, 2908 unsigned long address) 2909 { 2910 pmd_t *pmd; 2911 2912 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 2913 2914 pmd = mm_find_pmd(mm, address); 2915 if (!pmd) 2916 return; 2917 /* 2918 * Caller holds the mmap_sem write mode, so a huge pmd cannot 2919 * materialize from under us. 2920 */ 2921 split_huge_page_pmd_mm(mm, address, pmd); 2922 } 2923 2924 void __vma_adjust_trans_huge(struct vm_area_struct *vma, 2925 unsigned long start, 2926 unsigned long end, 2927 long adjust_next) 2928 { 2929 /* 2930 * If the new start address isn't hpage aligned and it could 2931 * previously contain an hugepage: check if we need to split 2932 * an huge pmd. 2933 */ 2934 if (start & ~HPAGE_PMD_MASK && 2935 (start & HPAGE_PMD_MASK) >= vma->vm_start && 2936 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2937 split_huge_page_address(vma->vm_mm, start); 2938 2939 /* 2940 * If the new end address isn't hpage aligned and it could 2941 * previously contain an hugepage: check if we need to split 2942 * an huge pmd. 2943 */ 2944 if (end & ~HPAGE_PMD_MASK && 2945 (end & HPAGE_PMD_MASK) >= vma->vm_start && 2946 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2947 split_huge_page_address(vma->vm_mm, end); 2948 2949 /* 2950 * If we're also updating the vma->vm_next->vm_start, if the new 2951 * vm_next->vm_start isn't page aligned and it could previously 2952 * contain an hugepage: check if we need to split an huge pmd. 2953 */ 2954 if (adjust_next > 0) { 2955 struct vm_area_struct *next = vma->vm_next; 2956 unsigned long nstart = next->vm_start; 2957 nstart += adjust_next << PAGE_SHIFT; 2958 if (nstart & ~HPAGE_PMD_MASK && 2959 (nstart & HPAGE_PMD_MASK) >= next->vm_start && 2960 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2961 split_huge_page_address(next->vm_mm, nstart); 2962 } 2963 } 2964