1 /* 2 * Copyright (C) 2009 Red Hat, Inc. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/sched.h> 10 #include <linux/highmem.h> 11 #include <linux/hugetlb.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/rmap.h> 14 #include <linux/swap.h> 15 #include <linux/mm_inline.h> 16 #include <linux/kthread.h> 17 #include <linux/khugepaged.h> 18 #include <linux/freezer.h> 19 #include <linux/mman.h> 20 #include <asm/tlb.h> 21 #include <asm/pgalloc.h> 22 #include "internal.h" 23 24 /* 25 * By default transparent hugepage support is enabled for all mappings 26 * and khugepaged scans all mappings. Defrag is only invoked by 27 * khugepaged hugepage allocations and by page faults inside 28 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived 29 * allocations. 30 */ 31 unsigned long transparent_hugepage_flags __read_mostly = 32 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS 33 (1<<TRANSPARENT_HUGEPAGE_FLAG)| 34 #endif 35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE 36 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| 37 #endif 38 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| 39 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 40 41 /* default scan 8*512 pte (or vmas) every 30 second */ 42 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; 43 static unsigned int khugepaged_pages_collapsed; 44 static unsigned int khugepaged_full_scans; 45 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; 46 /* during fragmentation poll the hugepage allocator once every minute */ 47 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; 48 static struct task_struct *khugepaged_thread __read_mostly; 49 static DEFINE_MUTEX(khugepaged_mutex); 50 static DEFINE_SPINLOCK(khugepaged_mm_lock); 51 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); 52 /* 53 * default collapse hugepages if there is at least one pte mapped like 54 * it would have happened if the vma was large enough during page 55 * fault. 56 */ 57 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; 58 59 static int khugepaged(void *none); 60 static int mm_slots_hash_init(void); 61 static int khugepaged_slab_init(void); 62 static void khugepaged_slab_free(void); 63 64 #define MM_SLOTS_HASH_HEADS 1024 65 static struct hlist_head *mm_slots_hash __read_mostly; 66 static struct kmem_cache *mm_slot_cache __read_mostly; 67 68 /** 69 * struct mm_slot - hash lookup from mm to mm_slot 70 * @hash: hash collision list 71 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head 72 * @mm: the mm that this information is valid for 73 */ 74 struct mm_slot { 75 struct hlist_node hash; 76 struct list_head mm_node; 77 struct mm_struct *mm; 78 }; 79 80 /** 81 * struct khugepaged_scan - cursor for scanning 82 * @mm_head: the head of the mm list to scan 83 * @mm_slot: the current mm_slot we are scanning 84 * @address: the next address inside that to be scanned 85 * 86 * There is only the one khugepaged_scan instance of this cursor structure. 87 */ 88 struct khugepaged_scan { 89 struct list_head mm_head; 90 struct mm_slot *mm_slot; 91 unsigned long address; 92 }; 93 static struct khugepaged_scan khugepaged_scan = { 94 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 95 }; 96 97 98 static int set_recommended_min_free_kbytes(void) 99 { 100 struct zone *zone; 101 int nr_zones = 0; 102 unsigned long recommended_min; 103 extern int min_free_kbytes; 104 105 if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG, 106 &transparent_hugepage_flags) && 107 !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 108 &transparent_hugepage_flags)) 109 return 0; 110 111 for_each_populated_zone(zone) 112 nr_zones++; 113 114 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ 115 recommended_min = pageblock_nr_pages * nr_zones * 2; 116 117 /* 118 * Make sure that on average at least two pageblocks are almost free 119 * of another type, one for a migratetype to fall back to and a 120 * second to avoid subsequent fallbacks of other types There are 3 121 * MIGRATE_TYPES we care about. 122 */ 123 recommended_min += pageblock_nr_pages * nr_zones * 124 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; 125 126 /* don't ever allow to reserve more than 5% of the lowmem */ 127 recommended_min = min(recommended_min, 128 (unsigned long) nr_free_buffer_pages() / 20); 129 recommended_min <<= (PAGE_SHIFT-10); 130 131 if (recommended_min > min_free_kbytes) 132 min_free_kbytes = recommended_min; 133 setup_per_zone_wmarks(); 134 return 0; 135 } 136 late_initcall(set_recommended_min_free_kbytes); 137 138 static int start_khugepaged(void) 139 { 140 int err = 0; 141 if (khugepaged_enabled()) { 142 int wakeup; 143 if (unlikely(!mm_slot_cache || !mm_slots_hash)) { 144 err = -ENOMEM; 145 goto out; 146 } 147 mutex_lock(&khugepaged_mutex); 148 if (!khugepaged_thread) 149 khugepaged_thread = kthread_run(khugepaged, NULL, 150 "khugepaged"); 151 if (unlikely(IS_ERR(khugepaged_thread))) { 152 printk(KERN_ERR 153 "khugepaged: kthread_run(khugepaged) failed\n"); 154 err = PTR_ERR(khugepaged_thread); 155 khugepaged_thread = NULL; 156 } 157 wakeup = !list_empty(&khugepaged_scan.mm_head); 158 mutex_unlock(&khugepaged_mutex); 159 if (wakeup) 160 wake_up_interruptible(&khugepaged_wait); 161 162 set_recommended_min_free_kbytes(); 163 } else 164 /* wakeup to exit */ 165 wake_up_interruptible(&khugepaged_wait); 166 out: 167 return err; 168 } 169 170 #ifdef CONFIG_SYSFS 171 172 static ssize_t double_flag_show(struct kobject *kobj, 173 struct kobj_attribute *attr, char *buf, 174 enum transparent_hugepage_flag enabled, 175 enum transparent_hugepage_flag req_madv) 176 { 177 if (test_bit(enabled, &transparent_hugepage_flags)) { 178 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); 179 return sprintf(buf, "[always] madvise never\n"); 180 } else if (test_bit(req_madv, &transparent_hugepage_flags)) 181 return sprintf(buf, "always [madvise] never\n"); 182 else 183 return sprintf(buf, "always madvise [never]\n"); 184 } 185 static ssize_t double_flag_store(struct kobject *kobj, 186 struct kobj_attribute *attr, 187 const char *buf, size_t count, 188 enum transparent_hugepage_flag enabled, 189 enum transparent_hugepage_flag req_madv) 190 { 191 if (!memcmp("always", buf, 192 min(sizeof("always")-1, count))) { 193 set_bit(enabled, &transparent_hugepage_flags); 194 clear_bit(req_madv, &transparent_hugepage_flags); 195 } else if (!memcmp("madvise", buf, 196 min(sizeof("madvise")-1, count))) { 197 clear_bit(enabled, &transparent_hugepage_flags); 198 set_bit(req_madv, &transparent_hugepage_flags); 199 } else if (!memcmp("never", buf, 200 min(sizeof("never")-1, count))) { 201 clear_bit(enabled, &transparent_hugepage_flags); 202 clear_bit(req_madv, &transparent_hugepage_flags); 203 } else 204 return -EINVAL; 205 206 return count; 207 } 208 209 static ssize_t enabled_show(struct kobject *kobj, 210 struct kobj_attribute *attr, char *buf) 211 { 212 return double_flag_show(kobj, attr, buf, 213 TRANSPARENT_HUGEPAGE_FLAG, 214 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 215 } 216 static ssize_t enabled_store(struct kobject *kobj, 217 struct kobj_attribute *attr, 218 const char *buf, size_t count) 219 { 220 ssize_t ret; 221 222 ret = double_flag_store(kobj, attr, buf, count, 223 TRANSPARENT_HUGEPAGE_FLAG, 224 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); 225 226 if (ret > 0) { 227 int err = start_khugepaged(); 228 if (err) 229 ret = err; 230 } 231 232 if (ret > 0 && 233 (test_bit(TRANSPARENT_HUGEPAGE_FLAG, 234 &transparent_hugepage_flags) || 235 test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 236 &transparent_hugepage_flags))) 237 set_recommended_min_free_kbytes(); 238 239 return ret; 240 } 241 static struct kobj_attribute enabled_attr = 242 __ATTR(enabled, 0644, enabled_show, enabled_store); 243 244 static ssize_t single_flag_show(struct kobject *kobj, 245 struct kobj_attribute *attr, char *buf, 246 enum transparent_hugepage_flag flag) 247 { 248 return sprintf(buf, "%d\n", 249 !!test_bit(flag, &transparent_hugepage_flags)); 250 } 251 252 static ssize_t single_flag_store(struct kobject *kobj, 253 struct kobj_attribute *attr, 254 const char *buf, size_t count, 255 enum transparent_hugepage_flag flag) 256 { 257 unsigned long value; 258 int ret; 259 260 ret = kstrtoul(buf, 10, &value); 261 if (ret < 0) 262 return ret; 263 if (value > 1) 264 return -EINVAL; 265 266 if (value) 267 set_bit(flag, &transparent_hugepage_flags); 268 else 269 clear_bit(flag, &transparent_hugepage_flags); 270 271 return count; 272 } 273 274 /* 275 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind 276 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of 277 * memory just to allocate one more hugepage. 278 */ 279 static ssize_t defrag_show(struct kobject *kobj, 280 struct kobj_attribute *attr, char *buf) 281 { 282 return double_flag_show(kobj, attr, buf, 283 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 284 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 285 } 286 static ssize_t defrag_store(struct kobject *kobj, 287 struct kobj_attribute *attr, 288 const char *buf, size_t count) 289 { 290 return double_flag_store(kobj, attr, buf, count, 291 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 292 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); 293 } 294 static struct kobj_attribute defrag_attr = 295 __ATTR(defrag, 0644, defrag_show, defrag_store); 296 297 #ifdef CONFIG_DEBUG_VM 298 static ssize_t debug_cow_show(struct kobject *kobj, 299 struct kobj_attribute *attr, char *buf) 300 { 301 return single_flag_show(kobj, attr, buf, 302 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 303 } 304 static ssize_t debug_cow_store(struct kobject *kobj, 305 struct kobj_attribute *attr, 306 const char *buf, size_t count) 307 { 308 return single_flag_store(kobj, attr, buf, count, 309 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); 310 } 311 static struct kobj_attribute debug_cow_attr = 312 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); 313 #endif /* CONFIG_DEBUG_VM */ 314 315 static struct attribute *hugepage_attr[] = { 316 &enabled_attr.attr, 317 &defrag_attr.attr, 318 #ifdef CONFIG_DEBUG_VM 319 &debug_cow_attr.attr, 320 #endif 321 NULL, 322 }; 323 324 static struct attribute_group hugepage_attr_group = { 325 .attrs = hugepage_attr, 326 }; 327 328 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 329 struct kobj_attribute *attr, 330 char *buf) 331 { 332 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); 333 } 334 335 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, 336 struct kobj_attribute *attr, 337 const char *buf, size_t count) 338 { 339 unsigned long msecs; 340 int err; 341 342 err = strict_strtoul(buf, 10, &msecs); 343 if (err || msecs > UINT_MAX) 344 return -EINVAL; 345 346 khugepaged_scan_sleep_millisecs = msecs; 347 wake_up_interruptible(&khugepaged_wait); 348 349 return count; 350 } 351 static struct kobj_attribute scan_sleep_millisecs_attr = 352 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, 353 scan_sleep_millisecs_store); 354 355 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, 356 struct kobj_attribute *attr, 357 char *buf) 358 { 359 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); 360 } 361 362 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, 363 struct kobj_attribute *attr, 364 const char *buf, size_t count) 365 { 366 unsigned long msecs; 367 int err; 368 369 err = strict_strtoul(buf, 10, &msecs); 370 if (err || msecs > UINT_MAX) 371 return -EINVAL; 372 373 khugepaged_alloc_sleep_millisecs = msecs; 374 wake_up_interruptible(&khugepaged_wait); 375 376 return count; 377 } 378 static struct kobj_attribute alloc_sleep_millisecs_attr = 379 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, 380 alloc_sleep_millisecs_store); 381 382 static ssize_t pages_to_scan_show(struct kobject *kobj, 383 struct kobj_attribute *attr, 384 char *buf) 385 { 386 return sprintf(buf, "%u\n", khugepaged_pages_to_scan); 387 } 388 static ssize_t pages_to_scan_store(struct kobject *kobj, 389 struct kobj_attribute *attr, 390 const char *buf, size_t count) 391 { 392 int err; 393 unsigned long pages; 394 395 err = strict_strtoul(buf, 10, &pages); 396 if (err || !pages || pages > UINT_MAX) 397 return -EINVAL; 398 399 khugepaged_pages_to_scan = pages; 400 401 return count; 402 } 403 static struct kobj_attribute pages_to_scan_attr = 404 __ATTR(pages_to_scan, 0644, pages_to_scan_show, 405 pages_to_scan_store); 406 407 static ssize_t pages_collapsed_show(struct kobject *kobj, 408 struct kobj_attribute *attr, 409 char *buf) 410 { 411 return sprintf(buf, "%u\n", khugepaged_pages_collapsed); 412 } 413 static struct kobj_attribute pages_collapsed_attr = 414 __ATTR_RO(pages_collapsed); 415 416 static ssize_t full_scans_show(struct kobject *kobj, 417 struct kobj_attribute *attr, 418 char *buf) 419 { 420 return sprintf(buf, "%u\n", khugepaged_full_scans); 421 } 422 static struct kobj_attribute full_scans_attr = 423 __ATTR_RO(full_scans); 424 425 static ssize_t khugepaged_defrag_show(struct kobject *kobj, 426 struct kobj_attribute *attr, char *buf) 427 { 428 return single_flag_show(kobj, attr, buf, 429 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 430 } 431 static ssize_t khugepaged_defrag_store(struct kobject *kobj, 432 struct kobj_attribute *attr, 433 const char *buf, size_t count) 434 { 435 return single_flag_store(kobj, attr, buf, count, 436 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); 437 } 438 static struct kobj_attribute khugepaged_defrag_attr = 439 __ATTR(defrag, 0644, khugepaged_defrag_show, 440 khugepaged_defrag_store); 441 442 /* 443 * max_ptes_none controls if khugepaged should collapse hugepages over 444 * any unmapped ptes in turn potentially increasing the memory 445 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not 446 * reduce the available free memory in the system as it 447 * runs. Increasing max_ptes_none will instead potentially reduce the 448 * free memory in the system during the khugepaged scan. 449 */ 450 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, 451 struct kobj_attribute *attr, 452 char *buf) 453 { 454 return sprintf(buf, "%u\n", khugepaged_max_ptes_none); 455 } 456 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, 457 struct kobj_attribute *attr, 458 const char *buf, size_t count) 459 { 460 int err; 461 unsigned long max_ptes_none; 462 463 err = strict_strtoul(buf, 10, &max_ptes_none); 464 if (err || max_ptes_none > HPAGE_PMD_NR-1) 465 return -EINVAL; 466 467 khugepaged_max_ptes_none = max_ptes_none; 468 469 return count; 470 } 471 static struct kobj_attribute khugepaged_max_ptes_none_attr = 472 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, 473 khugepaged_max_ptes_none_store); 474 475 static struct attribute *khugepaged_attr[] = { 476 &khugepaged_defrag_attr.attr, 477 &khugepaged_max_ptes_none_attr.attr, 478 &pages_to_scan_attr.attr, 479 &pages_collapsed_attr.attr, 480 &full_scans_attr.attr, 481 &scan_sleep_millisecs_attr.attr, 482 &alloc_sleep_millisecs_attr.attr, 483 NULL, 484 }; 485 486 static struct attribute_group khugepaged_attr_group = { 487 .attrs = khugepaged_attr, 488 .name = "khugepaged", 489 }; 490 491 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) 492 { 493 int err; 494 495 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 496 if (unlikely(!*hugepage_kobj)) { 497 printk(KERN_ERR "hugepage: failed kobject create\n"); 498 return -ENOMEM; 499 } 500 501 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 502 if (err) { 503 printk(KERN_ERR "hugepage: failed register hugeage group\n"); 504 goto delete_obj; 505 } 506 507 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 508 if (err) { 509 printk(KERN_ERR "hugepage: failed register hugeage group\n"); 510 goto remove_hp_group; 511 } 512 513 return 0; 514 515 remove_hp_group: 516 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); 517 delete_obj: 518 kobject_put(*hugepage_kobj); 519 return err; 520 } 521 522 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) 523 { 524 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); 525 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); 526 kobject_put(hugepage_kobj); 527 } 528 #else 529 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) 530 { 531 return 0; 532 } 533 534 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) 535 { 536 } 537 #endif /* CONFIG_SYSFS */ 538 539 static int __init hugepage_init(void) 540 { 541 int err; 542 struct kobject *hugepage_kobj; 543 544 if (!has_transparent_hugepage()) { 545 transparent_hugepage_flags = 0; 546 return -EINVAL; 547 } 548 549 err = hugepage_init_sysfs(&hugepage_kobj); 550 if (err) 551 return err; 552 553 err = khugepaged_slab_init(); 554 if (err) 555 goto out; 556 557 err = mm_slots_hash_init(); 558 if (err) { 559 khugepaged_slab_free(); 560 goto out; 561 } 562 563 /* 564 * By default disable transparent hugepages on smaller systems, 565 * where the extra memory used could hurt more than TLB overhead 566 * is likely to save. The admin can still enable it through /sys. 567 */ 568 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) 569 transparent_hugepage_flags = 0; 570 571 start_khugepaged(); 572 573 set_recommended_min_free_kbytes(); 574 575 return 0; 576 out: 577 hugepage_exit_sysfs(hugepage_kobj); 578 return err; 579 } 580 module_init(hugepage_init) 581 582 static int __init setup_transparent_hugepage(char *str) 583 { 584 int ret = 0; 585 if (!str) 586 goto out; 587 if (!strcmp(str, "always")) { 588 set_bit(TRANSPARENT_HUGEPAGE_FLAG, 589 &transparent_hugepage_flags); 590 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 591 &transparent_hugepage_flags); 592 ret = 1; 593 } else if (!strcmp(str, "madvise")) { 594 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 595 &transparent_hugepage_flags); 596 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 597 &transparent_hugepage_flags); 598 ret = 1; 599 } else if (!strcmp(str, "never")) { 600 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, 601 &transparent_hugepage_flags); 602 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 603 &transparent_hugepage_flags); 604 ret = 1; 605 } 606 out: 607 if (!ret) 608 printk(KERN_WARNING 609 "transparent_hugepage= cannot parse, ignored\n"); 610 return ret; 611 } 612 __setup("transparent_hugepage=", setup_transparent_hugepage); 613 614 static void prepare_pmd_huge_pte(pgtable_t pgtable, 615 struct mm_struct *mm) 616 { 617 assert_spin_locked(&mm->page_table_lock); 618 619 /* FIFO */ 620 if (!mm->pmd_huge_pte) 621 INIT_LIST_HEAD(&pgtable->lru); 622 else 623 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); 624 mm->pmd_huge_pte = pgtable; 625 } 626 627 static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 628 { 629 if (likely(vma->vm_flags & VM_WRITE)) 630 pmd = pmd_mkwrite(pmd); 631 return pmd; 632 } 633 634 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, 635 struct vm_area_struct *vma, 636 unsigned long haddr, pmd_t *pmd, 637 struct page *page) 638 { 639 int ret = 0; 640 pgtable_t pgtable; 641 642 VM_BUG_ON(!PageCompound(page)); 643 pgtable = pte_alloc_one(mm, haddr); 644 if (unlikely(!pgtable)) { 645 mem_cgroup_uncharge_page(page); 646 put_page(page); 647 return VM_FAULT_OOM; 648 } 649 650 clear_huge_page(page, haddr, HPAGE_PMD_NR); 651 __SetPageUptodate(page); 652 653 spin_lock(&mm->page_table_lock); 654 if (unlikely(!pmd_none(*pmd))) { 655 spin_unlock(&mm->page_table_lock); 656 mem_cgroup_uncharge_page(page); 657 put_page(page); 658 pte_free(mm, pgtable); 659 } else { 660 pmd_t entry; 661 entry = mk_pmd(page, vma->vm_page_prot); 662 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 663 entry = pmd_mkhuge(entry); 664 /* 665 * The spinlocking to take the lru_lock inside 666 * page_add_new_anon_rmap() acts as a full memory 667 * barrier to be sure clear_huge_page writes become 668 * visible after the set_pmd_at() write. 669 */ 670 page_add_new_anon_rmap(page, vma, haddr); 671 set_pmd_at(mm, haddr, pmd, entry); 672 prepare_pmd_huge_pte(pgtable, mm); 673 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 674 spin_unlock(&mm->page_table_lock); 675 } 676 677 return ret; 678 } 679 680 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 681 { 682 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 683 } 684 685 static inline struct page *alloc_hugepage_vma(int defrag, 686 struct vm_area_struct *vma, 687 unsigned long haddr, int nd, 688 gfp_t extra_gfp) 689 { 690 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp), 691 HPAGE_PMD_ORDER, vma, haddr, nd); 692 } 693 694 #ifndef CONFIG_NUMA 695 static inline struct page *alloc_hugepage(int defrag) 696 { 697 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), 698 HPAGE_PMD_ORDER); 699 } 700 #endif 701 702 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 703 unsigned long address, pmd_t *pmd, 704 unsigned int flags) 705 { 706 struct page *page; 707 unsigned long haddr = address & HPAGE_PMD_MASK; 708 pte_t *pte; 709 710 if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) { 711 if (unlikely(anon_vma_prepare(vma))) 712 return VM_FAULT_OOM; 713 if (unlikely(khugepaged_enter(vma))) 714 return VM_FAULT_OOM; 715 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 716 vma, haddr, numa_node_id(), 0); 717 if (unlikely(!page)) { 718 count_vm_event(THP_FAULT_FALLBACK); 719 goto out; 720 } 721 count_vm_event(THP_FAULT_ALLOC); 722 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 723 put_page(page); 724 goto out; 725 } 726 727 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); 728 } 729 out: 730 /* 731 * Use __pte_alloc instead of pte_alloc_map, because we can't 732 * run pte_offset_map on the pmd, if an huge pmd could 733 * materialize from under us from a different thread. 734 */ 735 if (unlikely(__pte_alloc(mm, vma, pmd, address))) 736 return VM_FAULT_OOM; 737 /* if an huge pmd materialized from under us just retry later */ 738 if (unlikely(pmd_trans_huge(*pmd))) 739 return 0; 740 /* 741 * A regular pmd is established and it can't morph into a huge pmd 742 * from under us anymore at this point because we hold the mmap_sem 743 * read mode and khugepaged takes it in write mode. So now it's 744 * safe to run pte_offset_map(). 745 */ 746 pte = pte_offset_map(pmd, address); 747 return handle_pte_fault(mm, vma, address, pte, pmd, flags); 748 } 749 750 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 751 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 752 struct vm_area_struct *vma) 753 { 754 struct page *src_page; 755 pmd_t pmd; 756 pgtable_t pgtable; 757 int ret; 758 759 ret = -ENOMEM; 760 pgtable = pte_alloc_one(dst_mm, addr); 761 if (unlikely(!pgtable)) 762 goto out; 763 764 spin_lock(&dst_mm->page_table_lock); 765 spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING); 766 767 ret = -EAGAIN; 768 pmd = *src_pmd; 769 if (unlikely(!pmd_trans_huge(pmd))) { 770 pte_free(dst_mm, pgtable); 771 goto out_unlock; 772 } 773 if (unlikely(pmd_trans_splitting(pmd))) { 774 /* split huge page running from under us */ 775 spin_unlock(&src_mm->page_table_lock); 776 spin_unlock(&dst_mm->page_table_lock); 777 pte_free(dst_mm, pgtable); 778 779 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 780 goto out; 781 } 782 src_page = pmd_page(pmd); 783 VM_BUG_ON(!PageHead(src_page)); 784 get_page(src_page); 785 page_dup_rmap(src_page); 786 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 787 788 pmdp_set_wrprotect(src_mm, addr, src_pmd); 789 pmd = pmd_mkold(pmd_wrprotect(pmd)); 790 set_pmd_at(dst_mm, addr, dst_pmd, pmd); 791 prepare_pmd_huge_pte(pgtable, dst_mm); 792 793 ret = 0; 794 out_unlock: 795 spin_unlock(&src_mm->page_table_lock); 796 spin_unlock(&dst_mm->page_table_lock); 797 out: 798 return ret; 799 } 800 801 /* no "address" argument so destroys page coloring of some arch */ 802 pgtable_t get_pmd_huge_pte(struct mm_struct *mm) 803 { 804 pgtable_t pgtable; 805 806 assert_spin_locked(&mm->page_table_lock); 807 808 /* FIFO */ 809 pgtable = mm->pmd_huge_pte; 810 if (list_empty(&pgtable->lru)) 811 mm->pmd_huge_pte = NULL; 812 else { 813 mm->pmd_huge_pte = list_entry(pgtable->lru.next, 814 struct page, lru); 815 list_del(&pgtable->lru); 816 } 817 return pgtable; 818 } 819 820 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 821 struct vm_area_struct *vma, 822 unsigned long address, 823 pmd_t *pmd, pmd_t orig_pmd, 824 struct page *page, 825 unsigned long haddr) 826 { 827 pgtable_t pgtable; 828 pmd_t _pmd; 829 int ret = 0, i; 830 struct page **pages; 831 832 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, 833 GFP_KERNEL); 834 if (unlikely(!pages)) { 835 ret |= VM_FAULT_OOM; 836 goto out; 837 } 838 839 for (i = 0; i < HPAGE_PMD_NR; i++) { 840 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 841 __GFP_OTHER_NODE, 842 vma, address, page_to_nid(page)); 843 if (unlikely(!pages[i] || 844 mem_cgroup_newpage_charge(pages[i], mm, 845 GFP_KERNEL))) { 846 if (pages[i]) 847 put_page(pages[i]); 848 mem_cgroup_uncharge_start(); 849 while (--i >= 0) { 850 mem_cgroup_uncharge_page(pages[i]); 851 put_page(pages[i]); 852 } 853 mem_cgroup_uncharge_end(); 854 kfree(pages); 855 ret |= VM_FAULT_OOM; 856 goto out; 857 } 858 } 859 860 for (i = 0; i < HPAGE_PMD_NR; i++) { 861 copy_user_highpage(pages[i], page + i, 862 haddr + PAGE_SIZE * i, vma); 863 __SetPageUptodate(pages[i]); 864 cond_resched(); 865 } 866 867 spin_lock(&mm->page_table_lock); 868 if (unlikely(!pmd_same(*pmd, orig_pmd))) 869 goto out_free_pages; 870 VM_BUG_ON(!PageHead(page)); 871 872 pmdp_clear_flush_notify(vma, haddr, pmd); 873 /* leave pmd empty until pte is filled */ 874 875 pgtable = get_pmd_huge_pte(mm); 876 pmd_populate(mm, &_pmd, pgtable); 877 878 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 879 pte_t *pte, entry; 880 entry = mk_pte(pages[i], vma->vm_page_prot); 881 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 882 page_add_new_anon_rmap(pages[i], vma, haddr); 883 pte = pte_offset_map(&_pmd, haddr); 884 VM_BUG_ON(!pte_none(*pte)); 885 set_pte_at(mm, haddr, pte, entry); 886 pte_unmap(pte); 887 } 888 kfree(pages); 889 890 mm->nr_ptes++; 891 smp_wmb(); /* make pte visible before pmd */ 892 pmd_populate(mm, pmd, pgtable); 893 page_remove_rmap(page); 894 spin_unlock(&mm->page_table_lock); 895 896 ret |= VM_FAULT_WRITE; 897 put_page(page); 898 899 out: 900 return ret; 901 902 out_free_pages: 903 spin_unlock(&mm->page_table_lock); 904 mem_cgroup_uncharge_start(); 905 for (i = 0; i < HPAGE_PMD_NR; i++) { 906 mem_cgroup_uncharge_page(pages[i]); 907 put_page(pages[i]); 908 } 909 mem_cgroup_uncharge_end(); 910 kfree(pages); 911 goto out; 912 } 913 914 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 915 unsigned long address, pmd_t *pmd, pmd_t orig_pmd) 916 { 917 int ret = 0; 918 struct page *page, *new_page; 919 unsigned long haddr; 920 921 VM_BUG_ON(!vma->anon_vma); 922 spin_lock(&mm->page_table_lock); 923 if (unlikely(!pmd_same(*pmd, orig_pmd))) 924 goto out_unlock; 925 926 page = pmd_page(orig_pmd); 927 VM_BUG_ON(!PageCompound(page) || !PageHead(page)); 928 haddr = address & HPAGE_PMD_MASK; 929 if (page_mapcount(page) == 1) { 930 pmd_t entry; 931 entry = pmd_mkyoung(orig_pmd); 932 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 933 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) 934 update_mmu_cache(vma, address, entry); 935 ret |= VM_FAULT_WRITE; 936 goto out_unlock; 937 } 938 get_page(page); 939 spin_unlock(&mm->page_table_lock); 940 941 if (transparent_hugepage_enabled(vma) && 942 !transparent_hugepage_debug_cow()) 943 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 944 vma, haddr, numa_node_id(), 0); 945 else 946 new_page = NULL; 947 948 if (unlikely(!new_page)) { 949 count_vm_event(THP_FAULT_FALLBACK); 950 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 951 pmd, orig_pmd, page, haddr); 952 put_page(page); 953 goto out; 954 } 955 count_vm_event(THP_FAULT_ALLOC); 956 957 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 958 put_page(new_page); 959 put_page(page); 960 ret |= VM_FAULT_OOM; 961 goto out; 962 } 963 964 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); 965 __SetPageUptodate(new_page); 966 967 spin_lock(&mm->page_table_lock); 968 put_page(page); 969 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 970 mem_cgroup_uncharge_page(new_page); 971 put_page(new_page); 972 } else { 973 pmd_t entry; 974 VM_BUG_ON(!PageHead(page)); 975 entry = mk_pmd(new_page, vma->vm_page_prot); 976 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 977 entry = pmd_mkhuge(entry); 978 pmdp_clear_flush_notify(vma, haddr, pmd); 979 page_add_new_anon_rmap(new_page, vma, haddr); 980 set_pmd_at(mm, haddr, pmd, entry); 981 update_mmu_cache(vma, address, entry); 982 page_remove_rmap(page); 983 put_page(page); 984 ret |= VM_FAULT_WRITE; 985 } 986 out_unlock: 987 spin_unlock(&mm->page_table_lock); 988 out: 989 return ret; 990 } 991 992 struct page *follow_trans_huge_pmd(struct mm_struct *mm, 993 unsigned long addr, 994 pmd_t *pmd, 995 unsigned int flags) 996 { 997 struct page *page = NULL; 998 999 assert_spin_locked(&mm->page_table_lock); 1000 1001 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1002 goto out; 1003 1004 page = pmd_page(*pmd); 1005 VM_BUG_ON(!PageHead(page)); 1006 if (flags & FOLL_TOUCH) { 1007 pmd_t _pmd; 1008 /* 1009 * We should set the dirty bit only for FOLL_WRITE but 1010 * for now the dirty bit in the pmd is meaningless. 1011 * And if the dirty bit will become meaningful and 1012 * we'll only set it with FOLL_WRITE, an atomic 1013 * set_bit will be required on the pmd to set the 1014 * young bit, instead of the current set_pmd_at. 1015 */ 1016 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); 1017 set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd); 1018 } 1019 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1020 VM_BUG_ON(!PageCompound(page)); 1021 if (flags & FOLL_GET) 1022 get_page_foll(page); 1023 1024 out: 1025 return page; 1026 } 1027 1028 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 1029 pmd_t *pmd, unsigned long addr) 1030 { 1031 int ret = 0; 1032 1033 spin_lock(&tlb->mm->page_table_lock); 1034 if (likely(pmd_trans_huge(*pmd))) { 1035 if (unlikely(pmd_trans_splitting(*pmd))) { 1036 spin_unlock(&tlb->mm->page_table_lock); 1037 wait_split_huge_page(vma->anon_vma, 1038 pmd); 1039 } else { 1040 struct page *page; 1041 pgtable_t pgtable; 1042 pgtable = get_pmd_huge_pte(tlb->mm); 1043 page = pmd_page(*pmd); 1044 pmd_clear(pmd); 1045 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1046 page_remove_rmap(page); 1047 VM_BUG_ON(page_mapcount(page) < 0); 1048 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1049 VM_BUG_ON(!PageHead(page)); 1050 spin_unlock(&tlb->mm->page_table_lock); 1051 tlb_remove_page(tlb, page); 1052 pte_free(tlb->mm, pgtable); 1053 ret = 1; 1054 } 1055 } else 1056 spin_unlock(&tlb->mm->page_table_lock); 1057 1058 return ret; 1059 } 1060 1061 int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1062 unsigned long addr, unsigned long end, 1063 unsigned char *vec) 1064 { 1065 int ret = 0; 1066 1067 spin_lock(&vma->vm_mm->page_table_lock); 1068 if (likely(pmd_trans_huge(*pmd))) { 1069 ret = !pmd_trans_splitting(*pmd); 1070 spin_unlock(&vma->vm_mm->page_table_lock); 1071 if (unlikely(!ret)) 1072 wait_split_huge_page(vma->anon_vma, pmd); 1073 else { 1074 /* 1075 * All logical pages in the range are present 1076 * if backed by a huge page. 1077 */ 1078 memset(vec, 1, (end - addr) >> PAGE_SHIFT); 1079 } 1080 } else 1081 spin_unlock(&vma->vm_mm->page_table_lock); 1082 1083 return ret; 1084 } 1085 1086 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 1087 unsigned long old_addr, 1088 unsigned long new_addr, unsigned long old_end, 1089 pmd_t *old_pmd, pmd_t *new_pmd) 1090 { 1091 int ret = 0; 1092 pmd_t pmd; 1093 1094 struct mm_struct *mm = vma->vm_mm; 1095 1096 if ((old_addr & ~HPAGE_PMD_MASK) || 1097 (new_addr & ~HPAGE_PMD_MASK) || 1098 old_end - old_addr < HPAGE_PMD_SIZE || 1099 (new_vma->vm_flags & VM_NOHUGEPAGE)) 1100 goto out; 1101 1102 /* 1103 * The destination pmd shouldn't be established, free_pgtables() 1104 * should have release it. 1105 */ 1106 if (WARN_ON(!pmd_none(*new_pmd))) { 1107 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 1108 goto out; 1109 } 1110 1111 spin_lock(&mm->page_table_lock); 1112 if (likely(pmd_trans_huge(*old_pmd))) { 1113 if (pmd_trans_splitting(*old_pmd)) { 1114 spin_unlock(&mm->page_table_lock); 1115 wait_split_huge_page(vma->anon_vma, old_pmd); 1116 ret = -1; 1117 } else { 1118 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); 1119 VM_BUG_ON(!pmd_none(*new_pmd)); 1120 set_pmd_at(mm, new_addr, new_pmd, pmd); 1121 spin_unlock(&mm->page_table_lock); 1122 ret = 1; 1123 } 1124 } else { 1125 spin_unlock(&mm->page_table_lock); 1126 } 1127 out: 1128 return ret; 1129 } 1130 1131 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 1132 unsigned long addr, pgprot_t newprot) 1133 { 1134 struct mm_struct *mm = vma->vm_mm; 1135 int ret = 0; 1136 1137 spin_lock(&mm->page_table_lock); 1138 if (likely(pmd_trans_huge(*pmd))) { 1139 if (unlikely(pmd_trans_splitting(*pmd))) { 1140 spin_unlock(&mm->page_table_lock); 1141 wait_split_huge_page(vma->anon_vma, pmd); 1142 } else { 1143 pmd_t entry; 1144 1145 entry = pmdp_get_and_clear(mm, addr, pmd); 1146 entry = pmd_modify(entry, newprot); 1147 set_pmd_at(mm, addr, pmd, entry); 1148 spin_unlock(&vma->vm_mm->page_table_lock); 1149 ret = 1; 1150 } 1151 } else 1152 spin_unlock(&vma->vm_mm->page_table_lock); 1153 1154 return ret; 1155 } 1156 1157 pmd_t *page_check_address_pmd(struct page *page, 1158 struct mm_struct *mm, 1159 unsigned long address, 1160 enum page_check_address_pmd_flag flag) 1161 { 1162 pgd_t *pgd; 1163 pud_t *pud; 1164 pmd_t *pmd, *ret = NULL; 1165 1166 if (address & ~HPAGE_PMD_MASK) 1167 goto out; 1168 1169 pgd = pgd_offset(mm, address); 1170 if (!pgd_present(*pgd)) 1171 goto out; 1172 1173 pud = pud_offset(pgd, address); 1174 if (!pud_present(*pud)) 1175 goto out; 1176 1177 pmd = pmd_offset(pud, address); 1178 if (pmd_none(*pmd)) 1179 goto out; 1180 if (pmd_page(*pmd) != page) 1181 goto out; 1182 /* 1183 * split_vma() may create temporary aliased mappings. There is 1184 * no risk as long as all huge pmd are found and have their 1185 * splitting bit set before __split_huge_page_refcount 1186 * runs. Finding the same huge pmd more than once during the 1187 * same rmap walk is not a problem. 1188 */ 1189 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 1190 pmd_trans_splitting(*pmd)) 1191 goto out; 1192 if (pmd_trans_huge(*pmd)) { 1193 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 1194 !pmd_trans_splitting(*pmd)); 1195 ret = pmd; 1196 } 1197 out: 1198 return ret; 1199 } 1200 1201 static int __split_huge_page_splitting(struct page *page, 1202 struct vm_area_struct *vma, 1203 unsigned long address) 1204 { 1205 struct mm_struct *mm = vma->vm_mm; 1206 pmd_t *pmd; 1207 int ret = 0; 1208 1209 spin_lock(&mm->page_table_lock); 1210 pmd = page_check_address_pmd(page, mm, address, 1211 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG); 1212 if (pmd) { 1213 /* 1214 * We can't temporarily set the pmd to null in order 1215 * to split it, the pmd must remain marked huge at all 1216 * times or the VM won't take the pmd_trans_huge paths 1217 * and it won't wait on the anon_vma->root->mutex to 1218 * serialize against split_huge_page*. 1219 */ 1220 pmdp_splitting_flush_notify(vma, address, pmd); 1221 ret = 1; 1222 } 1223 spin_unlock(&mm->page_table_lock); 1224 1225 return ret; 1226 } 1227 1228 static void __split_huge_page_refcount(struct page *page) 1229 { 1230 int i; 1231 struct zone *zone = page_zone(page); 1232 int tail_count = 0; 1233 1234 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1235 spin_lock_irq(&zone->lru_lock); 1236 compound_lock(page); 1237 /* complete memcg works before add pages to LRU */ 1238 mem_cgroup_split_huge_fixup(page); 1239 1240 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 1241 struct page *page_tail = page + i; 1242 1243 /* tail_page->_mapcount cannot change */ 1244 BUG_ON(page_mapcount(page_tail) < 0); 1245 tail_count += page_mapcount(page_tail); 1246 /* check for overflow */ 1247 BUG_ON(tail_count < 0); 1248 BUG_ON(atomic_read(&page_tail->_count) != 0); 1249 /* 1250 * tail_page->_count is zero and not changing from 1251 * under us. But get_page_unless_zero() may be running 1252 * from under us on the tail_page. If we used 1253 * atomic_set() below instead of atomic_add(), we 1254 * would then run atomic_set() concurrently with 1255 * get_page_unless_zero(), and atomic_set() is 1256 * implemented in C not using locked ops. spin_unlock 1257 * on x86 sometime uses locked ops because of PPro 1258 * errata 66, 92, so unless somebody can guarantee 1259 * atomic_set() here would be safe on all archs (and 1260 * not only on x86), it's safer to use atomic_add(). 1261 */ 1262 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, 1263 &page_tail->_count); 1264 1265 /* after clearing PageTail the gup refcount can be released */ 1266 smp_mb(); 1267 1268 /* 1269 * retain hwpoison flag of the poisoned tail page: 1270 * fix for the unsuitable process killed on Guest Machine(KVM) 1271 * by the memory-failure. 1272 */ 1273 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; 1274 page_tail->flags |= (page->flags & 1275 ((1L << PG_referenced) | 1276 (1L << PG_swapbacked) | 1277 (1L << PG_mlocked) | 1278 (1L << PG_uptodate))); 1279 page_tail->flags |= (1L << PG_dirty); 1280 1281 /* clear PageTail before overwriting first_page */ 1282 smp_wmb(); 1283 1284 /* 1285 * __split_huge_page_splitting() already set the 1286 * splitting bit in all pmd that could map this 1287 * hugepage, that will ensure no CPU can alter the 1288 * mapcount on the head page. The mapcount is only 1289 * accounted in the head page and it has to be 1290 * transferred to all tail pages in the below code. So 1291 * for this code to be safe, the split the mapcount 1292 * can't change. But that doesn't mean userland can't 1293 * keep changing and reading the page contents while 1294 * we transfer the mapcount, so the pmd splitting 1295 * status is achieved setting a reserved bit in the 1296 * pmd, not by clearing the present bit. 1297 */ 1298 page_tail->_mapcount = page->_mapcount; 1299 1300 BUG_ON(page_tail->mapping); 1301 page_tail->mapping = page->mapping; 1302 1303 page_tail->index = page->index + i; 1304 1305 BUG_ON(!PageAnon(page_tail)); 1306 BUG_ON(!PageUptodate(page_tail)); 1307 BUG_ON(!PageDirty(page_tail)); 1308 BUG_ON(!PageSwapBacked(page_tail)); 1309 1310 1311 lru_add_page_tail(zone, page, page_tail); 1312 } 1313 atomic_sub(tail_count, &page->_count); 1314 BUG_ON(atomic_read(&page->_count) <= 0); 1315 1316 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1317 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 1318 1319 ClearPageCompound(page); 1320 compound_unlock(page); 1321 spin_unlock_irq(&zone->lru_lock); 1322 1323 for (i = 1; i < HPAGE_PMD_NR; i++) { 1324 struct page *page_tail = page + i; 1325 BUG_ON(page_count(page_tail) <= 0); 1326 /* 1327 * Tail pages may be freed if there wasn't any mapping 1328 * like if add_to_swap() is running on a lru page that 1329 * had its mapping zapped. And freeing these pages 1330 * requires taking the lru_lock so we do the put_page 1331 * of the tail pages after the split is complete. 1332 */ 1333 put_page(page_tail); 1334 } 1335 1336 /* 1337 * Only the head page (now become a regular page) is required 1338 * to be pinned by the caller. 1339 */ 1340 BUG_ON(page_count(page) <= 0); 1341 } 1342 1343 static int __split_huge_page_map(struct page *page, 1344 struct vm_area_struct *vma, 1345 unsigned long address) 1346 { 1347 struct mm_struct *mm = vma->vm_mm; 1348 pmd_t *pmd, _pmd; 1349 int ret = 0, i; 1350 pgtable_t pgtable; 1351 unsigned long haddr; 1352 1353 spin_lock(&mm->page_table_lock); 1354 pmd = page_check_address_pmd(page, mm, address, 1355 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG); 1356 if (pmd) { 1357 pgtable = get_pmd_huge_pte(mm); 1358 pmd_populate(mm, &_pmd, pgtable); 1359 1360 for (i = 0, haddr = address; i < HPAGE_PMD_NR; 1361 i++, haddr += PAGE_SIZE) { 1362 pte_t *pte, entry; 1363 BUG_ON(PageCompound(page+i)); 1364 entry = mk_pte(page + i, vma->vm_page_prot); 1365 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1366 if (!pmd_write(*pmd)) 1367 entry = pte_wrprotect(entry); 1368 else 1369 BUG_ON(page_mapcount(page) != 1); 1370 if (!pmd_young(*pmd)) 1371 entry = pte_mkold(entry); 1372 pte = pte_offset_map(&_pmd, haddr); 1373 BUG_ON(!pte_none(*pte)); 1374 set_pte_at(mm, haddr, pte, entry); 1375 pte_unmap(pte); 1376 } 1377 1378 mm->nr_ptes++; 1379 smp_wmb(); /* make pte visible before pmd */ 1380 /* 1381 * Up to this point the pmd is present and huge and 1382 * userland has the whole access to the hugepage 1383 * during the split (which happens in place). If we 1384 * overwrite the pmd with the not-huge version 1385 * pointing to the pte here (which of course we could 1386 * if all CPUs were bug free), userland could trigger 1387 * a small page size TLB miss on the small sized TLB 1388 * while the hugepage TLB entry is still established 1389 * in the huge TLB. Some CPU doesn't like that. See 1390 * http://support.amd.com/us/Processor_TechDocs/41322.pdf, 1391 * Erratum 383 on page 93. Intel should be safe but is 1392 * also warns that it's only safe if the permission 1393 * and cache attributes of the two entries loaded in 1394 * the two TLB is identical (which should be the case 1395 * here). But it is generally safer to never allow 1396 * small and huge TLB entries for the same virtual 1397 * address to be loaded simultaneously. So instead of 1398 * doing "pmd_populate(); flush_tlb_range();" we first 1399 * mark the current pmd notpresent (atomically because 1400 * here the pmd_trans_huge and pmd_trans_splitting 1401 * must remain set at all times on the pmd until the 1402 * split is complete for this pmd), then we flush the 1403 * SMP TLB and finally we write the non-huge version 1404 * of the pmd entry with pmd_populate. 1405 */ 1406 set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd)); 1407 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 1408 pmd_populate(mm, pmd, pgtable); 1409 ret = 1; 1410 } 1411 spin_unlock(&mm->page_table_lock); 1412 1413 return ret; 1414 } 1415 1416 /* must be called with anon_vma->root->mutex hold */ 1417 static void __split_huge_page(struct page *page, 1418 struct anon_vma *anon_vma) 1419 { 1420 int mapcount, mapcount2; 1421 struct anon_vma_chain *avc; 1422 1423 BUG_ON(!PageHead(page)); 1424 BUG_ON(PageTail(page)); 1425 1426 mapcount = 0; 1427 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1428 struct vm_area_struct *vma = avc->vma; 1429 unsigned long addr = vma_address(page, vma); 1430 BUG_ON(is_vma_temporary_stack(vma)); 1431 if (addr == -EFAULT) 1432 continue; 1433 mapcount += __split_huge_page_splitting(page, vma, addr); 1434 } 1435 /* 1436 * It is critical that new vmas are added to the tail of the 1437 * anon_vma list. This guarantes that if copy_huge_pmd() runs 1438 * and establishes a child pmd before 1439 * __split_huge_page_splitting() freezes the parent pmd (so if 1440 * we fail to prevent copy_huge_pmd() from running until the 1441 * whole __split_huge_page() is complete), we will still see 1442 * the newly established pmd of the child later during the 1443 * walk, to be able to set it as pmd_trans_splitting too. 1444 */ 1445 if (mapcount != page_mapcount(page)) 1446 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1447 mapcount, page_mapcount(page)); 1448 BUG_ON(mapcount != page_mapcount(page)); 1449 1450 __split_huge_page_refcount(page); 1451 1452 mapcount2 = 0; 1453 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1454 struct vm_area_struct *vma = avc->vma; 1455 unsigned long addr = vma_address(page, vma); 1456 BUG_ON(is_vma_temporary_stack(vma)); 1457 if (addr == -EFAULT) 1458 continue; 1459 mapcount2 += __split_huge_page_map(page, vma, addr); 1460 } 1461 if (mapcount != mapcount2) 1462 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", 1463 mapcount, mapcount2, page_mapcount(page)); 1464 BUG_ON(mapcount != mapcount2); 1465 } 1466 1467 int split_huge_page(struct page *page) 1468 { 1469 struct anon_vma *anon_vma; 1470 int ret = 1; 1471 1472 BUG_ON(!PageAnon(page)); 1473 anon_vma = page_lock_anon_vma(page); 1474 if (!anon_vma) 1475 goto out; 1476 ret = 0; 1477 if (!PageCompound(page)) 1478 goto out_unlock; 1479 1480 BUG_ON(!PageSwapBacked(page)); 1481 __split_huge_page(page, anon_vma); 1482 count_vm_event(THP_SPLIT); 1483 1484 BUG_ON(PageCompound(page)); 1485 out_unlock: 1486 page_unlock_anon_vma(anon_vma); 1487 out: 1488 return ret; 1489 } 1490 1491 #define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \ 1492 VM_HUGETLB|VM_SHARED|VM_MAYSHARE) 1493 1494 int hugepage_madvise(struct vm_area_struct *vma, 1495 unsigned long *vm_flags, int advice) 1496 { 1497 switch (advice) { 1498 case MADV_HUGEPAGE: 1499 /* 1500 * Be somewhat over-protective like KSM for now! 1501 */ 1502 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 1503 return -EINVAL; 1504 *vm_flags &= ~VM_NOHUGEPAGE; 1505 *vm_flags |= VM_HUGEPAGE; 1506 /* 1507 * If the vma become good for khugepaged to scan, 1508 * register it here without waiting a page fault that 1509 * may not happen any time soon. 1510 */ 1511 if (unlikely(khugepaged_enter_vma_merge(vma))) 1512 return -ENOMEM; 1513 break; 1514 case MADV_NOHUGEPAGE: 1515 /* 1516 * Be somewhat over-protective like KSM for now! 1517 */ 1518 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 1519 return -EINVAL; 1520 *vm_flags &= ~VM_HUGEPAGE; 1521 *vm_flags |= VM_NOHUGEPAGE; 1522 /* 1523 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning 1524 * this vma even if we leave the mm registered in khugepaged if 1525 * it got registered before VM_NOHUGEPAGE was set. 1526 */ 1527 break; 1528 } 1529 1530 return 0; 1531 } 1532 1533 static int __init khugepaged_slab_init(void) 1534 { 1535 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", 1536 sizeof(struct mm_slot), 1537 __alignof__(struct mm_slot), 0, NULL); 1538 if (!mm_slot_cache) 1539 return -ENOMEM; 1540 1541 return 0; 1542 } 1543 1544 static void __init khugepaged_slab_free(void) 1545 { 1546 kmem_cache_destroy(mm_slot_cache); 1547 mm_slot_cache = NULL; 1548 } 1549 1550 static inline struct mm_slot *alloc_mm_slot(void) 1551 { 1552 if (!mm_slot_cache) /* initialization failed */ 1553 return NULL; 1554 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 1555 } 1556 1557 static inline void free_mm_slot(struct mm_slot *mm_slot) 1558 { 1559 kmem_cache_free(mm_slot_cache, mm_slot); 1560 } 1561 1562 static int __init mm_slots_hash_init(void) 1563 { 1564 mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), 1565 GFP_KERNEL); 1566 if (!mm_slots_hash) 1567 return -ENOMEM; 1568 return 0; 1569 } 1570 1571 #if 0 1572 static void __init mm_slots_hash_free(void) 1573 { 1574 kfree(mm_slots_hash); 1575 mm_slots_hash = NULL; 1576 } 1577 #endif 1578 1579 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1580 { 1581 struct mm_slot *mm_slot; 1582 struct hlist_head *bucket; 1583 struct hlist_node *node; 1584 1585 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 1586 % MM_SLOTS_HASH_HEADS]; 1587 hlist_for_each_entry(mm_slot, node, bucket, hash) { 1588 if (mm == mm_slot->mm) 1589 return mm_slot; 1590 } 1591 return NULL; 1592 } 1593 1594 static void insert_to_mm_slots_hash(struct mm_struct *mm, 1595 struct mm_slot *mm_slot) 1596 { 1597 struct hlist_head *bucket; 1598 1599 bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) 1600 % MM_SLOTS_HASH_HEADS]; 1601 mm_slot->mm = mm; 1602 hlist_add_head(&mm_slot->hash, bucket); 1603 } 1604 1605 static inline int khugepaged_test_exit(struct mm_struct *mm) 1606 { 1607 return atomic_read(&mm->mm_users) == 0; 1608 } 1609 1610 int __khugepaged_enter(struct mm_struct *mm) 1611 { 1612 struct mm_slot *mm_slot; 1613 int wakeup; 1614 1615 mm_slot = alloc_mm_slot(); 1616 if (!mm_slot) 1617 return -ENOMEM; 1618 1619 /* __khugepaged_exit() must not run from under us */ 1620 VM_BUG_ON(khugepaged_test_exit(mm)); 1621 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { 1622 free_mm_slot(mm_slot); 1623 return 0; 1624 } 1625 1626 spin_lock(&khugepaged_mm_lock); 1627 insert_to_mm_slots_hash(mm, mm_slot); 1628 /* 1629 * Insert just behind the scanning cursor, to let the area settle 1630 * down a little. 1631 */ 1632 wakeup = list_empty(&khugepaged_scan.mm_head); 1633 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); 1634 spin_unlock(&khugepaged_mm_lock); 1635 1636 atomic_inc(&mm->mm_count); 1637 if (wakeup) 1638 wake_up_interruptible(&khugepaged_wait); 1639 1640 return 0; 1641 } 1642 1643 int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 1644 { 1645 unsigned long hstart, hend; 1646 if (!vma->anon_vma) 1647 /* 1648 * Not yet faulted in so we will register later in the 1649 * page fault if needed. 1650 */ 1651 return 0; 1652 if (vma->vm_ops) 1653 /* khugepaged not yet working on file or special mappings */ 1654 return 0; 1655 /* 1656 * If is_pfn_mapping() is true is_learn_pfn_mapping() must be 1657 * true too, verify it here. 1658 */ 1659 VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); 1660 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1661 hend = vma->vm_end & HPAGE_PMD_MASK; 1662 if (hstart < hend) 1663 return khugepaged_enter(vma); 1664 return 0; 1665 } 1666 1667 void __khugepaged_exit(struct mm_struct *mm) 1668 { 1669 struct mm_slot *mm_slot; 1670 int free = 0; 1671 1672 spin_lock(&khugepaged_mm_lock); 1673 mm_slot = get_mm_slot(mm); 1674 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { 1675 hlist_del(&mm_slot->hash); 1676 list_del(&mm_slot->mm_node); 1677 free = 1; 1678 } 1679 spin_unlock(&khugepaged_mm_lock); 1680 1681 if (free) { 1682 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1683 free_mm_slot(mm_slot); 1684 mmdrop(mm); 1685 } else if (mm_slot) { 1686 /* 1687 * This is required to serialize against 1688 * khugepaged_test_exit() (which is guaranteed to run 1689 * under mmap sem read mode). Stop here (after we 1690 * return all pagetables will be destroyed) until 1691 * khugepaged has finished working on the pagetables 1692 * under the mmap_sem. 1693 */ 1694 down_write(&mm->mmap_sem); 1695 up_write(&mm->mmap_sem); 1696 } 1697 } 1698 1699 static void release_pte_page(struct page *page) 1700 { 1701 /* 0 stands for page_is_file_cache(page) == false */ 1702 dec_zone_page_state(page, NR_ISOLATED_ANON + 0); 1703 unlock_page(page); 1704 putback_lru_page(page); 1705 } 1706 1707 static void release_pte_pages(pte_t *pte, pte_t *_pte) 1708 { 1709 while (--_pte >= pte) { 1710 pte_t pteval = *_pte; 1711 if (!pte_none(pteval)) 1712 release_pte_page(pte_page(pteval)); 1713 } 1714 } 1715 1716 static void release_all_pte_pages(pte_t *pte) 1717 { 1718 release_pte_pages(pte, pte + HPAGE_PMD_NR); 1719 } 1720 1721 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, 1722 unsigned long address, 1723 pte_t *pte) 1724 { 1725 struct page *page; 1726 pte_t *_pte; 1727 int referenced = 0, isolated = 0, none = 0; 1728 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 1729 _pte++, address += PAGE_SIZE) { 1730 pte_t pteval = *_pte; 1731 if (pte_none(pteval)) { 1732 if (++none <= khugepaged_max_ptes_none) 1733 continue; 1734 else { 1735 release_pte_pages(pte, _pte); 1736 goto out; 1737 } 1738 } 1739 if (!pte_present(pteval) || !pte_write(pteval)) { 1740 release_pte_pages(pte, _pte); 1741 goto out; 1742 } 1743 page = vm_normal_page(vma, address, pteval); 1744 if (unlikely(!page)) { 1745 release_pte_pages(pte, _pte); 1746 goto out; 1747 } 1748 VM_BUG_ON(PageCompound(page)); 1749 BUG_ON(!PageAnon(page)); 1750 VM_BUG_ON(!PageSwapBacked(page)); 1751 1752 /* cannot use mapcount: can't collapse if there's a gup pin */ 1753 if (page_count(page) != 1) { 1754 release_pte_pages(pte, _pte); 1755 goto out; 1756 } 1757 /* 1758 * We can do it before isolate_lru_page because the 1759 * page can't be freed from under us. NOTE: PG_lock 1760 * is needed to serialize against split_huge_page 1761 * when invoked from the VM. 1762 */ 1763 if (!trylock_page(page)) { 1764 release_pte_pages(pte, _pte); 1765 goto out; 1766 } 1767 /* 1768 * Isolate the page to avoid collapsing an hugepage 1769 * currently in use by the VM. 1770 */ 1771 if (isolate_lru_page(page)) { 1772 unlock_page(page); 1773 release_pte_pages(pte, _pte); 1774 goto out; 1775 } 1776 /* 0 stands for page_is_file_cache(page) == false */ 1777 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 1778 VM_BUG_ON(!PageLocked(page)); 1779 VM_BUG_ON(PageLRU(page)); 1780 1781 /* If there is no mapped pte young don't collapse the page */ 1782 if (pte_young(pteval) || PageReferenced(page) || 1783 mmu_notifier_test_young(vma->vm_mm, address)) 1784 referenced = 1; 1785 } 1786 if (unlikely(!referenced)) 1787 release_all_pte_pages(pte); 1788 else 1789 isolated = 1; 1790 out: 1791 return isolated; 1792 } 1793 1794 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, 1795 struct vm_area_struct *vma, 1796 unsigned long address, 1797 spinlock_t *ptl) 1798 { 1799 pte_t *_pte; 1800 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { 1801 pte_t pteval = *_pte; 1802 struct page *src_page; 1803 1804 if (pte_none(pteval)) { 1805 clear_user_highpage(page, address); 1806 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); 1807 } else { 1808 src_page = pte_page(pteval); 1809 copy_user_highpage(page, src_page, address, vma); 1810 VM_BUG_ON(page_mapcount(src_page) != 1); 1811 VM_BUG_ON(page_count(src_page) != 2); 1812 release_pte_page(src_page); 1813 /* 1814 * ptl mostly unnecessary, but preempt has to 1815 * be disabled to update the per-cpu stats 1816 * inside page_remove_rmap(). 1817 */ 1818 spin_lock(ptl); 1819 /* 1820 * paravirt calls inside pte_clear here are 1821 * superfluous. 1822 */ 1823 pte_clear(vma->vm_mm, address, _pte); 1824 page_remove_rmap(src_page); 1825 spin_unlock(ptl); 1826 free_page_and_swap_cache(src_page); 1827 } 1828 1829 address += PAGE_SIZE; 1830 page++; 1831 } 1832 } 1833 1834 static void collapse_huge_page(struct mm_struct *mm, 1835 unsigned long address, 1836 struct page **hpage, 1837 struct vm_area_struct *vma, 1838 int node) 1839 { 1840 pgd_t *pgd; 1841 pud_t *pud; 1842 pmd_t *pmd, _pmd; 1843 pte_t *pte; 1844 pgtable_t pgtable; 1845 struct page *new_page; 1846 spinlock_t *ptl; 1847 int isolated; 1848 unsigned long hstart, hend; 1849 1850 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1851 #ifndef CONFIG_NUMA 1852 up_read(&mm->mmap_sem); 1853 VM_BUG_ON(!*hpage); 1854 new_page = *hpage; 1855 #else 1856 VM_BUG_ON(*hpage); 1857 /* 1858 * Allocate the page while the vma is still valid and under 1859 * the mmap_sem read mode so there is no memory allocation 1860 * later when we take the mmap_sem in write mode. This is more 1861 * friendly behavior (OTOH it may actually hide bugs) to 1862 * filesystems in userland with daemons allocating memory in 1863 * the userland I/O paths. Allocating memory with the 1864 * mmap_sem in read mode is good idea also to allow greater 1865 * scalability. 1866 */ 1867 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 1868 node, __GFP_OTHER_NODE); 1869 1870 /* 1871 * After allocating the hugepage, release the mmap_sem read lock in 1872 * preparation for taking it in write mode. 1873 */ 1874 up_read(&mm->mmap_sem); 1875 if (unlikely(!new_page)) { 1876 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 1877 *hpage = ERR_PTR(-ENOMEM); 1878 return; 1879 } 1880 #endif 1881 1882 count_vm_event(THP_COLLAPSE_ALLOC); 1883 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1884 #ifdef CONFIG_NUMA 1885 put_page(new_page); 1886 #endif 1887 return; 1888 } 1889 1890 /* 1891 * Prevent all access to pagetables with the exception of 1892 * gup_fast later hanlded by the ptep_clear_flush and the VM 1893 * handled by the anon_vma lock + PG_lock. 1894 */ 1895 down_write(&mm->mmap_sem); 1896 if (unlikely(khugepaged_test_exit(mm))) 1897 goto out; 1898 1899 vma = find_vma(mm, address); 1900 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1901 hend = vma->vm_end & HPAGE_PMD_MASK; 1902 if (address < hstart || address + HPAGE_PMD_SIZE > hend) 1903 goto out; 1904 1905 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || 1906 (vma->vm_flags & VM_NOHUGEPAGE)) 1907 goto out; 1908 1909 if (!vma->anon_vma || vma->vm_ops) 1910 goto out; 1911 if (is_vma_temporary_stack(vma)) 1912 goto out; 1913 /* 1914 * If is_pfn_mapping() is true is_learn_pfn_mapping() must be 1915 * true too, verify it here. 1916 */ 1917 VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); 1918 1919 pgd = pgd_offset(mm, address); 1920 if (!pgd_present(*pgd)) 1921 goto out; 1922 1923 pud = pud_offset(pgd, address); 1924 if (!pud_present(*pud)) 1925 goto out; 1926 1927 pmd = pmd_offset(pud, address); 1928 /* pmd can't go away or become huge under us */ 1929 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) 1930 goto out; 1931 1932 anon_vma_lock(vma->anon_vma); 1933 1934 pte = pte_offset_map(pmd, address); 1935 ptl = pte_lockptr(mm, pmd); 1936 1937 spin_lock(&mm->page_table_lock); /* probably unnecessary */ 1938 /* 1939 * After this gup_fast can't run anymore. This also removes 1940 * any huge TLB entry from the CPU so we won't allow 1941 * huge and small TLB entries for the same virtual address 1942 * to avoid the risk of CPU bugs in that area. 1943 */ 1944 _pmd = pmdp_clear_flush_notify(vma, address, pmd); 1945 spin_unlock(&mm->page_table_lock); 1946 1947 spin_lock(ptl); 1948 isolated = __collapse_huge_page_isolate(vma, address, pte); 1949 spin_unlock(ptl); 1950 1951 if (unlikely(!isolated)) { 1952 pte_unmap(pte); 1953 spin_lock(&mm->page_table_lock); 1954 BUG_ON(!pmd_none(*pmd)); 1955 set_pmd_at(mm, address, pmd, _pmd); 1956 spin_unlock(&mm->page_table_lock); 1957 anon_vma_unlock(vma->anon_vma); 1958 goto out; 1959 } 1960 1961 /* 1962 * All pages are isolated and locked so anon_vma rmap 1963 * can't run anymore. 1964 */ 1965 anon_vma_unlock(vma->anon_vma); 1966 1967 __collapse_huge_page_copy(pte, new_page, vma, address, ptl); 1968 pte_unmap(pte); 1969 __SetPageUptodate(new_page); 1970 pgtable = pmd_pgtable(_pmd); 1971 VM_BUG_ON(page_count(pgtable) != 1); 1972 VM_BUG_ON(page_mapcount(pgtable) != 0); 1973 1974 _pmd = mk_pmd(new_page, vma->vm_page_prot); 1975 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); 1976 _pmd = pmd_mkhuge(_pmd); 1977 1978 /* 1979 * spin_lock() below is not the equivalent of smp_wmb(), so 1980 * this is needed to avoid the copy_huge_page writes to become 1981 * visible after the set_pmd_at() write. 1982 */ 1983 smp_wmb(); 1984 1985 spin_lock(&mm->page_table_lock); 1986 BUG_ON(!pmd_none(*pmd)); 1987 page_add_new_anon_rmap(new_page, vma, address); 1988 set_pmd_at(mm, address, pmd, _pmd); 1989 update_mmu_cache(vma, address, _pmd); 1990 prepare_pmd_huge_pte(pgtable, mm); 1991 mm->nr_ptes--; 1992 spin_unlock(&mm->page_table_lock); 1993 1994 #ifndef CONFIG_NUMA 1995 *hpage = NULL; 1996 #endif 1997 khugepaged_pages_collapsed++; 1998 out_up_write: 1999 up_write(&mm->mmap_sem); 2000 return; 2001 2002 out: 2003 mem_cgroup_uncharge_page(new_page); 2004 #ifdef CONFIG_NUMA 2005 put_page(new_page); 2006 #endif 2007 goto out_up_write; 2008 } 2009 2010 static int khugepaged_scan_pmd(struct mm_struct *mm, 2011 struct vm_area_struct *vma, 2012 unsigned long address, 2013 struct page **hpage) 2014 { 2015 pgd_t *pgd; 2016 pud_t *pud; 2017 pmd_t *pmd; 2018 pte_t *pte, *_pte; 2019 int ret = 0, referenced = 0, none = 0; 2020 struct page *page; 2021 unsigned long _address; 2022 spinlock_t *ptl; 2023 int node = -1; 2024 2025 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2026 2027 pgd = pgd_offset(mm, address); 2028 if (!pgd_present(*pgd)) 2029 goto out; 2030 2031 pud = pud_offset(pgd, address); 2032 if (!pud_present(*pud)) 2033 goto out; 2034 2035 pmd = pmd_offset(pud, address); 2036 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd)) 2037 goto out; 2038 2039 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2040 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; 2041 _pte++, _address += PAGE_SIZE) { 2042 pte_t pteval = *_pte; 2043 if (pte_none(pteval)) { 2044 if (++none <= khugepaged_max_ptes_none) 2045 continue; 2046 else 2047 goto out_unmap; 2048 } 2049 if (!pte_present(pteval) || !pte_write(pteval)) 2050 goto out_unmap; 2051 page = vm_normal_page(vma, _address, pteval); 2052 if (unlikely(!page)) 2053 goto out_unmap; 2054 /* 2055 * Chose the node of the first page. This could 2056 * be more sophisticated and look at more pages, 2057 * but isn't for now. 2058 */ 2059 if (node == -1) 2060 node = page_to_nid(page); 2061 VM_BUG_ON(PageCompound(page)); 2062 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2063 goto out_unmap; 2064 /* cannot use mapcount: can't collapse if there's a gup pin */ 2065 if (page_count(page) != 1) 2066 goto out_unmap; 2067 if (pte_young(pteval) || PageReferenced(page) || 2068 mmu_notifier_test_young(vma->vm_mm, address)) 2069 referenced = 1; 2070 } 2071 if (referenced) 2072 ret = 1; 2073 out_unmap: 2074 pte_unmap_unlock(pte, ptl); 2075 if (ret) 2076 /* collapse_huge_page will return with the mmap_sem released */ 2077 collapse_huge_page(mm, address, hpage, vma, node); 2078 out: 2079 return ret; 2080 } 2081 2082 static void collect_mm_slot(struct mm_slot *mm_slot) 2083 { 2084 struct mm_struct *mm = mm_slot->mm; 2085 2086 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2087 2088 if (khugepaged_test_exit(mm)) { 2089 /* free mm_slot */ 2090 hlist_del(&mm_slot->hash); 2091 list_del(&mm_slot->mm_node); 2092 2093 /* 2094 * Not strictly needed because the mm exited already. 2095 * 2096 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 2097 */ 2098 2099 /* khugepaged_mm_lock actually not necessary for the below */ 2100 free_mm_slot(mm_slot); 2101 mmdrop(mm); 2102 } 2103 } 2104 2105 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, 2106 struct page **hpage) 2107 __releases(&khugepaged_mm_lock) 2108 __acquires(&khugepaged_mm_lock) 2109 { 2110 struct mm_slot *mm_slot; 2111 struct mm_struct *mm; 2112 struct vm_area_struct *vma; 2113 int progress = 0; 2114 2115 VM_BUG_ON(!pages); 2116 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); 2117 2118 if (khugepaged_scan.mm_slot) 2119 mm_slot = khugepaged_scan.mm_slot; 2120 else { 2121 mm_slot = list_entry(khugepaged_scan.mm_head.next, 2122 struct mm_slot, mm_node); 2123 khugepaged_scan.address = 0; 2124 khugepaged_scan.mm_slot = mm_slot; 2125 } 2126 spin_unlock(&khugepaged_mm_lock); 2127 2128 mm = mm_slot->mm; 2129 down_read(&mm->mmap_sem); 2130 if (unlikely(khugepaged_test_exit(mm))) 2131 vma = NULL; 2132 else 2133 vma = find_vma(mm, khugepaged_scan.address); 2134 2135 progress++; 2136 for (; vma; vma = vma->vm_next) { 2137 unsigned long hstart, hend; 2138 2139 cond_resched(); 2140 if (unlikely(khugepaged_test_exit(mm))) { 2141 progress++; 2142 break; 2143 } 2144 2145 if ((!(vma->vm_flags & VM_HUGEPAGE) && 2146 !khugepaged_always()) || 2147 (vma->vm_flags & VM_NOHUGEPAGE)) { 2148 skip: 2149 progress++; 2150 continue; 2151 } 2152 if (!vma->anon_vma || vma->vm_ops) 2153 goto skip; 2154 if (is_vma_temporary_stack(vma)) 2155 goto skip; 2156 /* 2157 * If is_pfn_mapping() is true is_learn_pfn_mapping() 2158 * must be true too, verify it here. 2159 */ 2160 VM_BUG_ON(is_linear_pfn_mapping(vma) || 2161 vma->vm_flags & VM_NO_THP); 2162 2163 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2164 hend = vma->vm_end & HPAGE_PMD_MASK; 2165 if (hstart >= hend) 2166 goto skip; 2167 if (khugepaged_scan.address > hend) 2168 goto skip; 2169 if (khugepaged_scan.address < hstart) 2170 khugepaged_scan.address = hstart; 2171 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 2172 2173 while (khugepaged_scan.address < hend) { 2174 int ret; 2175 cond_resched(); 2176 if (unlikely(khugepaged_test_exit(mm))) 2177 goto breakouterloop; 2178 2179 VM_BUG_ON(khugepaged_scan.address < hstart || 2180 khugepaged_scan.address + HPAGE_PMD_SIZE > 2181 hend); 2182 ret = khugepaged_scan_pmd(mm, vma, 2183 khugepaged_scan.address, 2184 hpage); 2185 /* move to next address */ 2186 khugepaged_scan.address += HPAGE_PMD_SIZE; 2187 progress += HPAGE_PMD_NR; 2188 if (ret) 2189 /* we released mmap_sem so break loop */ 2190 goto breakouterloop_mmap_sem; 2191 if (progress >= pages) 2192 goto breakouterloop; 2193 } 2194 } 2195 breakouterloop: 2196 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ 2197 breakouterloop_mmap_sem: 2198 2199 spin_lock(&khugepaged_mm_lock); 2200 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2201 /* 2202 * Release the current mm_slot if this mm is about to die, or 2203 * if we scanned all vmas of this mm. 2204 */ 2205 if (khugepaged_test_exit(mm) || !vma) { 2206 /* 2207 * Make sure that if mm_users is reaching zero while 2208 * khugepaged runs here, khugepaged_exit will find 2209 * mm_slot not pointing to the exiting mm. 2210 */ 2211 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { 2212 khugepaged_scan.mm_slot = list_entry( 2213 mm_slot->mm_node.next, 2214 struct mm_slot, mm_node); 2215 khugepaged_scan.address = 0; 2216 } else { 2217 khugepaged_scan.mm_slot = NULL; 2218 khugepaged_full_scans++; 2219 } 2220 2221 collect_mm_slot(mm_slot); 2222 } 2223 2224 return progress; 2225 } 2226 2227 static int khugepaged_has_work(void) 2228 { 2229 return !list_empty(&khugepaged_scan.mm_head) && 2230 khugepaged_enabled(); 2231 } 2232 2233 static int khugepaged_wait_event(void) 2234 { 2235 return !list_empty(&khugepaged_scan.mm_head) || 2236 !khugepaged_enabled(); 2237 } 2238 2239 static void khugepaged_do_scan(struct page **hpage) 2240 { 2241 unsigned int progress = 0, pass_through_head = 0; 2242 unsigned int pages = khugepaged_pages_to_scan; 2243 2244 barrier(); /* write khugepaged_pages_to_scan to local stack */ 2245 2246 while (progress < pages) { 2247 cond_resched(); 2248 2249 #ifndef CONFIG_NUMA 2250 if (!*hpage) { 2251 *hpage = alloc_hugepage(khugepaged_defrag()); 2252 if (unlikely(!*hpage)) { 2253 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2254 break; 2255 } 2256 count_vm_event(THP_COLLAPSE_ALLOC); 2257 } 2258 #else 2259 if (IS_ERR(*hpage)) 2260 break; 2261 #endif 2262 2263 if (unlikely(kthread_should_stop() || freezing(current))) 2264 break; 2265 2266 spin_lock(&khugepaged_mm_lock); 2267 if (!khugepaged_scan.mm_slot) 2268 pass_through_head++; 2269 if (khugepaged_has_work() && 2270 pass_through_head < 2) 2271 progress += khugepaged_scan_mm_slot(pages - progress, 2272 hpage); 2273 else 2274 progress = pages; 2275 spin_unlock(&khugepaged_mm_lock); 2276 } 2277 } 2278 2279 static void khugepaged_alloc_sleep(void) 2280 { 2281 wait_event_freezable_timeout(khugepaged_wait, false, 2282 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 2283 } 2284 2285 #ifndef CONFIG_NUMA 2286 static struct page *khugepaged_alloc_hugepage(void) 2287 { 2288 struct page *hpage; 2289 2290 do { 2291 hpage = alloc_hugepage(khugepaged_defrag()); 2292 if (!hpage) { 2293 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 2294 khugepaged_alloc_sleep(); 2295 } else 2296 count_vm_event(THP_COLLAPSE_ALLOC); 2297 } while (unlikely(!hpage) && 2298 likely(khugepaged_enabled())); 2299 return hpage; 2300 } 2301 #endif 2302 2303 static void khugepaged_loop(void) 2304 { 2305 struct page *hpage; 2306 2307 #ifdef CONFIG_NUMA 2308 hpage = NULL; 2309 #endif 2310 while (likely(khugepaged_enabled())) { 2311 #ifndef CONFIG_NUMA 2312 hpage = khugepaged_alloc_hugepage(); 2313 if (unlikely(!hpage)) 2314 break; 2315 #else 2316 if (IS_ERR(hpage)) { 2317 khugepaged_alloc_sleep(); 2318 hpage = NULL; 2319 } 2320 #endif 2321 2322 khugepaged_do_scan(&hpage); 2323 #ifndef CONFIG_NUMA 2324 if (hpage) 2325 put_page(hpage); 2326 #endif 2327 try_to_freeze(); 2328 if (unlikely(kthread_should_stop())) 2329 break; 2330 if (khugepaged_has_work()) { 2331 if (!khugepaged_scan_sleep_millisecs) 2332 continue; 2333 wait_event_freezable_timeout(khugepaged_wait, false, 2334 msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); 2335 } else if (khugepaged_enabled()) 2336 wait_event_freezable(khugepaged_wait, 2337 khugepaged_wait_event()); 2338 } 2339 } 2340 2341 static int khugepaged(void *none) 2342 { 2343 struct mm_slot *mm_slot; 2344 2345 set_freezable(); 2346 set_user_nice(current, 19); 2347 2348 /* serialize with start_khugepaged() */ 2349 mutex_lock(&khugepaged_mutex); 2350 2351 for (;;) { 2352 mutex_unlock(&khugepaged_mutex); 2353 VM_BUG_ON(khugepaged_thread != current); 2354 khugepaged_loop(); 2355 VM_BUG_ON(khugepaged_thread != current); 2356 2357 mutex_lock(&khugepaged_mutex); 2358 if (!khugepaged_enabled()) 2359 break; 2360 if (unlikely(kthread_should_stop())) 2361 break; 2362 } 2363 2364 spin_lock(&khugepaged_mm_lock); 2365 mm_slot = khugepaged_scan.mm_slot; 2366 khugepaged_scan.mm_slot = NULL; 2367 if (mm_slot) 2368 collect_mm_slot(mm_slot); 2369 spin_unlock(&khugepaged_mm_lock); 2370 2371 khugepaged_thread = NULL; 2372 mutex_unlock(&khugepaged_mutex); 2373 2374 return 0; 2375 } 2376 2377 void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) 2378 { 2379 struct page *page; 2380 2381 spin_lock(&mm->page_table_lock); 2382 if (unlikely(!pmd_trans_huge(*pmd))) { 2383 spin_unlock(&mm->page_table_lock); 2384 return; 2385 } 2386 page = pmd_page(*pmd); 2387 VM_BUG_ON(!page_count(page)); 2388 get_page(page); 2389 spin_unlock(&mm->page_table_lock); 2390 2391 split_huge_page(page); 2392 2393 put_page(page); 2394 BUG_ON(pmd_trans_huge(*pmd)); 2395 } 2396 2397 static void split_huge_page_address(struct mm_struct *mm, 2398 unsigned long address) 2399 { 2400 pgd_t *pgd; 2401 pud_t *pud; 2402 pmd_t *pmd; 2403 2404 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 2405 2406 pgd = pgd_offset(mm, address); 2407 if (!pgd_present(*pgd)) 2408 return; 2409 2410 pud = pud_offset(pgd, address); 2411 if (!pud_present(*pud)) 2412 return; 2413 2414 pmd = pmd_offset(pud, address); 2415 if (!pmd_present(*pmd)) 2416 return; 2417 /* 2418 * Caller holds the mmap_sem write mode, so a huge pmd cannot 2419 * materialize from under us. 2420 */ 2421 split_huge_page_pmd(mm, pmd); 2422 } 2423 2424 void __vma_adjust_trans_huge(struct vm_area_struct *vma, 2425 unsigned long start, 2426 unsigned long end, 2427 long adjust_next) 2428 { 2429 /* 2430 * If the new start address isn't hpage aligned and it could 2431 * previously contain an hugepage: check if we need to split 2432 * an huge pmd. 2433 */ 2434 if (start & ~HPAGE_PMD_MASK && 2435 (start & HPAGE_PMD_MASK) >= vma->vm_start && 2436 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2437 split_huge_page_address(vma->vm_mm, start); 2438 2439 /* 2440 * If the new end address isn't hpage aligned and it could 2441 * previously contain an hugepage: check if we need to split 2442 * an huge pmd. 2443 */ 2444 if (end & ~HPAGE_PMD_MASK && 2445 (end & HPAGE_PMD_MASK) >= vma->vm_start && 2446 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2447 split_huge_page_address(vma->vm_mm, end); 2448 2449 /* 2450 * If we're also updating the vma->vm_next->vm_start, if the new 2451 * vm_next->vm_start isn't page aligned and it could previously 2452 * contain an hugepage: check if we need to split an huge pmd. 2453 */ 2454 if (adjust_next > 0) { 2455 struct vm_area_struct *next = vma->vm_next; 2456 unsigned long nstart = next->vm_start; 2457 nstart += adjust_next << PAGE_SHIFT; 2458 if (nstart & ~HPAGE_PMD_MASK && 2459 (nstart & HPAGE_PMD_MASK) >= next->vm_start && 2460 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) 2461 split_huge_page_address(next->vm_mm, nstart); 2462 } 2463 } 2464