1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Simple NUMA memory policy for the Linux kernel. 4 * 5 * Copyright 2003,2004 Andi Kleen, SuSE Labs. 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 7 * 8 * NUMA policy allows the user to give hints in which node(s) memory should 9 * be allocated. 10 * 11 * Support four policies per VMA and per process: 12 * 13 * The VMA policy has priority over the process policy for a page fault. 14 * 15 * interleave Allocate memory interleaved over a set of nodes, 16 * with normal fallback if it fails. 17 * For VMA based allocations this interleaves based on the 18 * offset into the backing object or offset into the mapping 19 * for anonymous memory. For process policy an process counter 20 * is used. 21 * 22 * bind Only allocate memory on a specific set of nodes, 23 * no fallback. 24 * FIXME: memory is allocated starting with the first node 25 * to the last. It would be better if bind would truly restrict 26 * the allocation to memory nodes instead 27 * 28 * preferred Try a specific node first before normal fallback. 29 * As a special case NUMA_NO_NODE here means do the allocation 30 * on the local CPU. This is normally identical to default, 31 * but useful to set in a VMA when you have a non default 32 * process policy. 33 * 34 * preferred many Try a set of nodes first before normal fallback. This is 35 * similar to preferred without the special case. 36 * 37 * default Allocate on the local node first, or when on a VMA 38 * use the process policy. This is what Linux always did 39 * in a NUMA aware kernel and still does by, ahem, default. 40 * 41 * The process policy is applied for most non interrupt memory allocations 42 * in that process' context. Interrupts ignore the policies and always 43 * try to allocate on the local CPU. The VMA policy is only applied for memory 44 * allocations for a VMA in the VM. 45 * 46 * Currently there are a few corner cases in swapping where the policy 47 * is not applied, but the majority should be handled. When process policy 48 * is used it is not remembered over swap outs/swap ins. 49 * 50 * Only the highest zone in the zone hierarchy gets policied. Allocations 51 * requesting a lower zone just use default policy. This implies that 52 * on systems with highmem kernel lowmem allocation don't get policied. 53 * Same with GFP_DMA allocations. 54 * 55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 56 * all users and remembered even when nobody has memory mapped. 57 */ 58 59 /* Notebook: 60 fix mmap readahead to honour policy and enable policy for any page cache 61 object 62 statistics for bigpages 63 global policy for page cache? currently it uses process policy. Requires 64 first item above. 65 handle mremap for shared memory (currently ignored for the policy) 66 grows down? 67 make bind policy root only? It can trigger oom much faster and the 68 kernel is not always grateful with that. 69 */ 70 71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72 73 #include <linux/mempolicy.h> 74 #include <linux/pagewalk.h> 75 #include <linux/highmem.h> 76 #include <linux/hugetlb.h> 77 #include <linux/kernel.h> 78 #include <linux/sched.h> 79 #include <linux/sched/mm.h> 80 #include <linux/sched/numa_balancing.h> 81 #include <linux/sched/task.h> 82 #include <linux/nodemask.h> 83 #include <linux/cpuset.h> 84 #include <linux/slab.h> 85 #include <linux/string.h> 86 #include <linux/export.h> 87 #include <linux/nsproxy.h> 88 #include <linux/interrupt.h> 89 #include <linux/init.h> 90 #include <linux/compat.h> 91 #include <linux/ptrace.h> 92 #include <linux/swap.h> 93 #include <linux/seq_file.h> 94 #include <linux/proc_fs.h> 95 #include <linux/migrate.h> 96 #include <linux/ksm.h> 97 #include <linux/rmap.h> 98 #include <linux/security.h> 99 #include <linux/syscalls.h> 100 #include <linux/ctype.h> 101 #include <linux/mm_inline.h> 102 #include <linux/mmu_notifier.h> 103 #include <linux/printk.h> 104 #include <linux/swapops.h> 105 106 #include <asm/tlbflush.h> 107 #include <asm/tlb.h> 108 #include <linux/uaccess.h> 109 110 #include "internal.h" 111 112 /* Internal flags */ 113 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 114 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 115 116 static struct kmem_cache *policy_cache; 117 static struct kmem_cache *sn_cache; 118 119 /* Highest zone. An specific allocation for a zone below that is not 120 policied. */ 121 enum zone_type policy_zone = 0; 122 123 /* 124 * run-time system-wide default policy => local allocation 125 */ 126 static struct mempolicy default_policy = { 127 .refcnt = ATOMIC_INIT(1), /* never free it */ 128 .mode = MPOL_LOCAL, 129 }; 130 131 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 132 133 /** 134 * numa_map_to_online_node - Find closest online node 135 * @node: Node id to start the search 136 * 137 * Lookup the next closest node by distance if @nid is not online. 138 * 139 * Return: this @node if it is online, otherwise the closest node by distance 140 */ 141 int numa_map_to_online_node(int node) 142 { 143 int min_dist = INT_MAX, dist, n, min_node; 144 145 if (node == NUMA_NO_NODE || node_online(node)) 146 return node; 147 148 min_node = node; 149 for_each_online_node(n) { 150 dist = node_distance(node, n); 151 if (dist < min_dist) { 152 min_dist = dist; 153 min_node = n; 154 } 155 } 156 157 return min_node; 158 } 159 EXPORT_SYMBOL_GPL(numa_map_to_online_node); 160 161 struct mempolicy *get_task_policy(struct task_struct *p) 162 { 163 struct mempolicy *pol = p->mempolicy; 164 int node; 165 166 if (pol) 167 return pol; 168 169 node = numa_node_id(); 170 if (node != NUMA_NO_NODE) { 171 pol = &preferred_node_policy[node]; 172 /* preferred_node_policy is not initialised early in boot */ 173 if (pol->mode) 174 return pol; 175 } 176 177 return &default_policy; 178 } 179 180 static const struct mempolicy_operations { 181 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 182 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 183 } mpol_ops[MPOL_MAX]; 184 185 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 186 { 187 return pol->flags & MPOL_MODE_FLAGS; 188 } 189 190 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 191 const nodemask_t *rel) 192 { 193 nodemask_t tmp; 194 nodes_fold(tmp, *orig, nodes_weight(*rel)); 195 nodes_onto(*ret, tmp, *rel); 196 } 197 198 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 199 { 200 if (nodes_empty(*nodes)) 201 return -EINVAL; 202 pol->nodes = *nodes; 203 return 0; 204 } 205 206 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 207 { 208 if (nodes_empty(*nodes)) 209 return -EINVAL; 210 211 nodes_clear(pol->nodes); 212 node_set(first_node(*nodes), pol->nodes); 213 return 0; 214 } 215 216 /* 217 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 218 * any, for the new policy. mpol_new() has already validated the nodes 219 * parameter with respect to the policy mode and flags. 220 * 221 * Must be called holding task's alloc_lock to protect task's mems_allowed 222 * and mempolicy. May also be called holding the mmap_lock for write. 223 */ 224 static int mpol_set_nodemask(struct mempolicy *pol, 225 const nodemask_t *nodes, struct nodemask_scratch *nsc) 226 { 227 int ret; 228 229 /* 230 * Default (pol==NULL) resp. local memory policies are not a 231 * subject of any remapping. They also do not need any special 232 * constructor. 233 */ 234 if (!pol || pol->mode == MPOL_LOCAL) 235 return 0; 236 237 /* Check N_MEMORY */ 238 nodes_and(nsc->mask1, 239 cpuset_current_mems_allowed, node_states[N_MEMORY]); 240 241 VM_BUG_ON(!nodes); 242 243 if (pol->flags & MPOL_F_RELATIVE_NODES) 244 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 245 else 246 nodes_and(nsc->mask2, *nodes, nsc->mask1); 247 248 if (mpol_store_user_nodemask(pol)) 249 pol->w.user_nodemask = *nodes; 250 else 251 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 252 253 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 254 return ret; 255 } 256 257 /* 258 * This function just creates a new policy, does some check and simple 259 * initialization. You must invoke mpol_set_nodemask() to set nodes. 260 */ 261 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262 nodemask_t *nodes) 263 { 264 struct mempolicy *policy; 265 266 pr_debug("setting mode %d flags %d nodes[0] %lx\n", 267 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268 269 if (mode == MPOL_DEFAULT) { 270 if (nodes && !nodes_empty(*nodes)) 271 return ERR_PTR(-EINVAL); 272 return NULL; 273 } 274 VM_BUG_ON(!nodes); 275 276 /* 277 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 278 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 279 * All other modes require a valid pointer to a non-empty nodemask. 280 */ 281 if (mode == MPOL_PREFERRED) { 282 if (nodes_empty(*nodes)) { 283 if (((flags & MPOL_F_STATIC_NODES) || 284 (flags & MPOL_F_RELATIVE_NODES))) 285 return ERR_PTR(-EINVAL); 286 287 mode = MPOL_LOCAL; 288 } 289 } else if (mode == MPOL_LOCAL) { 290 if (!nodes_empty(*nodes) || 291 (flags & MPOL_F_STATIC_NODES) || 292 (flags & MPOL_F_RELATIVE_NODES)) 293 return ERR_PTR(-EINVAL); 294 } else if (nodes_empty(*nodes)) 295 return ERR_PTR(-EINVAL); 296 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 297 if (!policy) 298 return ERR_PTR(-ENOMEM); 299 atomic_set(&policy->refcnt, 1); 300 policy->mode = mode; 301 policy->flags = flags; 302 policy->home_node = NUMA_NO_NODE; 303 304 return policy; 305 } 306 307 /* Slow path of a mpol destructor. */ 308 void __mpol_put(struct mempolicy *p) 309 { 310 if (!atomic_dec_and_test(&p->refcnt)) 311 return; 312 kmem_cache_free(policy_cache, p); 313 } 314 315 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 316 { 317 } 318 319 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 320 { 321 nodemask_t tmp; 322 323 if (pol->flags & MPOL_F_STATIC_NODES) 324 nodes_and(tmp, pol->w.user_nodemask, *nodes); 325 else if (pol->flags & MPOL_F_RELATIVE_NODES) 326 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 327 else { 328 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 329 *nodes); 330 pol->w.cpuset_mems_allowed = *nodes; 331 } 332 333 if (nodes_empty(tmp)) 334 tmp = *nodes; 335 336 pol->nodes = tmp; 337 } 338 339 static void mpol_rebind_preferred(struct mempolicy *pol, 340 const nodemask_t *nodes) 341 { 342 pol->w.cpuset_mems_allowed = *nodes; 343 } 344 345 /* 346 * mpol_rebind_policy - Migrate a policy to a different set of nodes 347 * 348 * Per-vma policies are protected by mmap_lock. Allocations using per-task 349 * policies are protected by task->mems_allowed_seq to prevent a premature 350 * OOM/allocation failure due to parallel nodemask modification. 351 */ 352 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 353 { 354 if (!pol || pol->mode == MPOL_LOCAL) 355 return; 356 if (!mpol_store_user_nodemask(pol) && 357 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 358 return; 359 360 mpol_ops[pol->mode].rebind(pol, newmask); 361 } 362 363 /* 364 * Wrapper for mpol_rebind_policy() that just requires task 365 * pointer, and updates task mempolicy. 366 * 367 * Called with task's alloc_lock held. 368 */ 369 370 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 371 { 372 mpol_rebind_policy(tsk->mempolicy, new); 373 } 374 375 /* 376 * Rebind each vma in mm to new nodemask. 377 * 378 * Call holding a reference to mm. Takes mm->mmap_lock during call. 379 */ 380 381 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 382 { 383 struct vm_area_struct *vma; 384 VMA_ITERATOR(vmi, mm, 0); 385 386 mmap_write_lock(mm); 387 for_each_vma(vmi, vma) 388 mpol_rebind_policy(vma->vm_policy, new); 389 mmap_write_unlock(mm); 390 } 391 392 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 393 [MPOL_DEFAULT] = { 394 .rebind = mpol_rebind_default, 395 }, 396 [MPOL_INTERLEAVE] = { 397 .create = mpol_new_nodemask, 398 .rebind = mpol_rebind_nodemask, 399 }, 400 [MPOL_PREFERRED] = { 401 .create = mpol_new_preferred, 402 .rebind = mpol_rebind_preferred, 403 }, 404 [MPOL_BIND] = { 405 .create = mpol_new_nodemask, 406 .rebind = mpol_rebind_nodemask, 407 }, 408 [MPOL_LOCAL] = { 409 .rebind = mpol_rebind_default, 410 }, 411 [MPOL_PREFERRED_MANY] = { 412 .create = mpol_new_nodemask, 413 .rebind = mpol_rebind_preferred, 414 }, 415 }; 416 417 static int migrate_page_add(struct page *page, struct list_head *pagelist, 418 unsigned long flags); 419 420 struct queue_pages { 421 struct list_head *pagelist; 422 unsigned long flags; 423 nodemask_t *nmask; 424 unsigned long start; 425 unsigned long end; 426 struct vm_area_struct *first; 427 }; 428 429 /* 430 * Check if the page's nid is in qp->nmask. 431 * 432 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 433 * in the invert of qp->nmask. 434 */ 435 static inline bool queue_pages_required(struct page *page, 436 struct queue_pages *qp) 437 { 438 int nid = page_to_nid(page); 439 unsigned long flags = qp->flags; 440 441 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 442 } 443 444 /* 445 * queue_pages_pmd() has three possible return values: 446 * 0 - pages are placed on the right node or queued successfully, or 447 * special page is met, i.e. huge zero page. 448 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 449 * specified. 450 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 451 * existing page was already on a node that does not follow the 452 * policy. 453 */ 454 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 455 unsigned long end, struct mm_walk *walk) 456 __releases(ptl) 457 { 458 int ret = 0; 459 struct page *page; 460 struct queue_pages *qp = walk->private; 461 unsigned long flags; 462 463 if (unlikely(is_pmd_migration_entry(*pmd))) { 464 ret = -EIO; 465 goto unlock; 466 } 467 page = pmd_page(*pmd); 468 if (is_huge_zero_page(page)) { 469 walk->action = ACTION_CONTINUE; 470 goto unlock; 471 } 472 if (!queue_pages_required(page, qp)) 473 goto unlock; 474 475 flags = qp->flags; 476 /* go to thp migration */ 477 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 478 if (!vma_migratable(walk->vma) || 479 migrate_page_add(page, qp->pagelist, flags)) { 480 ret = 1; 481 goto unlock; 482 } 483 } else 484 ret = -EIO; 485 unlock: 486 spin_unlock(ptl); 487 return ret; 488 } 489 490 /* 491 * Scan through pages checking if pages follow certain conditions, 492 * and move them to the pagelist if they do. 493 * 494 * queue_pages_pte_range() has three possible return values: 495 * 0 - pages are placed on the right node or queued successfully, or 496 * special page is met, i.e. zero page. 497 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 498 * specified. 499 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 500 * on a node that does not follow the policy. 501 */ 502 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 503 unsigned long end, struct mm_walk *walk) 504 { 505 struct vm_area_struct *vma = walk->vma; 506 struct page *page; 507 struct queue_pages *qp = walk->private; 508 unsigned long flags = qp->flags; 509 bool has_unmovable = false; 510 pte_t *pte, *mapped_pte; 511 spinlock_t *ptl; 512 513 ptl = pmd_trans_huge_lock(pmd, vma); 514 if (ptl) 515 return queue_pages_pmd(pmd, ptl, addr, end, walk); 516 517 if (pmd_trans_unstable(pmd)) 518 return 0; 519 520 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 521 for (; addr != end; pte++, addr += PAGE_SIZE) { 522 if (!pte_present(*pte)) 523 continue; 524 page = vm_normal_page(vma, addr, *pte); 525 if (!page || is_zone_device_page(page)) 526 continue; 527 /* 528 * vm_normal_page() filters out zero pages, but there might 529 * still be PageReserved pages to skip, perhaps in a VDSO. 530 */ 531 if (PageReserved(page)) 532 continue; 533 if (!queue_pages_required(page, qp)) 534 continue; 535 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 536 /* MPOL_MF_STRICT must be specified if we get here */ 537 if (!vma_migratable(vma)) { 538 has_unmovable = true; 539 break; 540 } 541 542 /* 543 * Do not abort immediately since there may be 544 * temporary off LRU pages in the range. Still 545 * need migrate other LRU pages. 546 */ 547 if (migrate_page_add(page, qp->pagelist, flags)) 548 has_unmovable = true; 549 } else 550 break; 551 } 552 pte_unmap_unlock(mapped_pte, ptl); 553 cond_resched(); 554 555 if (has_unmovable) 556 return 1; 557 558 return addr != end ? -EIO : 0; 559 } 560 561 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 562 unsigned long addr, unsigned long end, 563 struct mm_walk *walk) 564 { 565 int ret = 0; 566 #ifdef CONFIG_HUGETLB_PAGE 567 struct queue_pages *qp = walk->private; 568 unsigned long flags = (qp->flags & MPOL_MF_VALID); 569 struct page *page; 570 spinlock_t *ptl; 571 pte_t entry; 572 573 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 574 entry = huge_ptep_get(pte); 575 if (!pte_present(entry)) 576 goto unlock; 577 page = pte_page(entry); 578 if (!queue_pages_required(page, qp)) 579 goto unlock; 580 581 if (flags == MPOL_MF_STRICT) { 582 /* 583 * STRICT alone means only detecting misplaced page and no 584 * need to further check other vma. 585 */ 586 ret = -EIO; 587 goto unlock; 588 } 589 590 if (!vma_migratable(walk->vma)) { 591 /* 592 * Must be STRICT with MOVE*, otherwise .test_walk() have 593 * stopped walking current vma. 594 * Detecting misplaced page but allow migrating pages which 595 * have been queued. 596 */ 597 ret = 1; 598 goto unlock; 599 } 600 601 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 602 if (flags & (MPOL_MF_MOVE_ALL) || 603 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 && 604 !hugetlb_pmd_shared(pte))) { 605 if (isolate_hugetlb(page, qp->pagelist) && 606 (flags & MPOL_MF_STRICT)) 607 /* 608 * Failed to isolate page but allow migrating pages 609 * which have been queued. 610 */ 611 ret = 1; 612 } 613 unlock: 614 spin_unlock(ptl); 615 #else 616 BUG(); 617 #endif 618 return ret; 619 } 620 621 #ifdef CONFIG_NUMA_BALANCING 622 /* 623 * This is used to mark a range of virtual addresses to be inaccessible. 624 * These are later cleared by a NUMA hinting fault. Depending on these 625 * faults, pages may be migrated for better NUMA placement. 626 * 627 * This is assuming that NUMA faults are handled using PROT_NONE. If 628 * an architecture makes a different choice, it will need further 629 * changes to the core. 630 */ 631 unsigned long change_prot_numa(struct vm_area_struct *vma, 632 unsigned long addr, unsigned long end) 633 { 634 struct mmu_gather tlb; 635 long nr_updated; 636 637 tlb_gather_mmu(&tlb, vma->vm_mm); 638 639 nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); 640 if (nr_updated > 0) 641 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 642 643 tlb_finish_mmu(&tlb); 644 645 return nr_updated; 646 } 647 #else 648 static unsigned long change_prot_numa(struct vm_area_struct *vma, 649 unsigned long addr, unsigned long end) 650 { 651 return 0; 652 } 653 #endif /* CONFIG_NUMA_BALANCING */ 654 655 static int queue_pages_test_walk(unsigned long start, unsigned long end, 656 struct mm_walk *walk) 657 { 658 struct vm_area_struct *next, *vma = walk->vma; 659 struct queue_pages *qp = walk->private; 660 unsigned long endvma = vma->vm_end; 661 unsigned long flags = qp->flags; 662 663 /* range check first */ 664 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 665 666 if (!qp->first) { 667 qp->first = vma; 668 if (!(flags & MPOL_MF_DISCONTIG_OK) && 669 (qp->start < vma->vm_start)) 670 /* hole at head side of range */ 671 return -EFAULT; 672 } 673 next = find_vma(vma->vm_mm, vma->vm_end); 674 if (!(flags & MPOL_MF_DISCONTIG_OK) && 675 ((vma->vm_end < qp->end) && 676 (!next || vma->vm_end < next->vm_start))) 677 /* hole at middle or tail of range */ 678 return -EFAULT; 679 680 /* 681 * Need check MPOL_MF_STRICT to return -EIO if possible 682 * regardless of vma_migratable 683 */ 684 if (!vma_migratable(vma) && 685 !(flags & MPOL_MF_STRICT)) 686 return 1; 687 688 if (endvma > end) 689 endvma = end; 690 691 if (flags & MPOL_MF_LAZY) { 692 /* Similar to task_numa_work, skip inaccessible VMAs */ 693 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 694 !(vma->vm_flags & VM_MIXEDMAP)) 695 change_prot_numa(vma, start, endvma); 696 return 1; 697 } 698 699 /* queue pages from current vma */ 700 if (flags & MPOL_MF_VALID) 701 return 0; 702 return 1; 703 } 704 705 static const struct mm_walk_ops queue_pages_walk_ops = { 706 .hugetlb_entry = queue_pages_hugetlb, 707 .pmd_entry = queue_pages_pte_range, 708 .test_walk = queue_pages_test_walk, 709 }; 710 711 /* 712 * Walk through page tables and collect pages to be migrated. 713 * 714 * If pages found in a given range are on a set of nodes (determined by 715 * @nodes and @flags,) it's isolated and queued to the pagelist which is 716 * passed via @private. 717 * 718 * queue_pages_range() has three possible return values: 719 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 720 * specified. 721 * 0 - queue pages successfully or no misplaced page. 722 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 723 * memory range specified by nodemask and maxnode points outside 724 * your accessible address space (-EFAULT) 725 */ 726 static int 727 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 728 nodemask_t *nodes, unsigned long flags, 729 struct list_head *pagelist) 730 { 731 int err; 732 struct queue_pages qp = { 733 .pagelist = pagelist, 734 .flags = flags, 735 .nmask = nodes, 736 .start = start, 737 .end = end, 738 .first = NULL, 739 }; 740 741 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 742 743 if (!qp.first) 744 /* whole range in hole */ 745 err = -EFAULT; 746 747 return err; 748 } 749 750 /* 751 * Apply policy to a single VMA 752 * This must be called with the mmap_lock held for writing. 753 */ 754 static int vma_replace_policy(struct vm_area_struct *vma, 755 struct mempolicy *pol) 756 { 757 int err; 758 struct mempolicy *old; 759 struct mempolicy *new; 760 761 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 762 vma->vm_start, vma->vm_end, vma->vm_pgoff, 763 vma->vm_ops, vma->vm_file, 764 vma->vm_ops ? vma->vm_ops->set_policy : NULL); 765 766 new = mpol_dup(pol); 767 if (IS_ERR(new)) 768 return PTR_ERR(new); 769 770 if (vma->vm_ops && vma->vm_ops->set_policy) { 771 err = vma->vm_ops->set_policy(vma, new); 772 if (err) 773 goto err_out; 774 } 775 776 old = vma->vm_policy; 777 vma->vm_policy = new; /* protected by mmap_lock */ 778 mpol_put(old); 779 780 return 0; 781 err_out: 782 mpol_put(new); 783 return err; 784 } 785 786 /* Step 2: apply policy to a range and do splits. */ 787 static int mbind_range(struct mm_struct *mm, unsigned long start, 788 unsigned long end, struct mempolicy *new_pol) 789 { 790 MA_STATE(mas, &mm->mm_mt, start, start); 791 struct vm_area_struct *prev; 792 struct vm_area_struct *vma; 793 int err = 0; 794 pgoff_t pgoff; 795 796 prev = mas_prev(&mas, 0); 797 if (unlikely(!prev)) 798 mas_set(&mas, start); 799 800 vma = mas_find(&mas, end - 1); 801 if (WARN_ON(!vma)) 802 return 0; 803 804 if (start > vma->vm_start) 805 prev = vma; 806 807 for (; vma; vma = mas_next(&mas, end - 1)) { 808 unsigned long vmstart = max(start, vma->vm_start); 809 unsigned long vmend = min(end, vma->vm_end); 810 811 if (mpol_equal(vma_policy(vma), new_pol)) 812 goto next; 813 814 pgoff = vma->vm_pgoff + 815 ((vmstart - vma->vm_start) >> PAGE_SHIFT); 816 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 817 vma->anon_vma, vma->vm_file, pgoff, 818 new_pol, vma->vm_userfaultfd_ctx, 819 anon_vma_name(vma)); 820 if (prev) { 821 /* vma_merge() invalidated the mas */ 822 mas_pause(&mas); 823 vma = prev; 824 goto replace; 825 } 826 if (vma->vm_start != vmstart) { 827 err = split_vma(vma->vm_mm, vma, vmstart, 1); 828 if (err) 829 goto out; 830 /* split_vma() invalidated the mas */ 831 mas_pause(&mas); 832 } 833 if (vma->vm_end != vmend) { 834 err = split_vma(vma->vm_mm, vma, vmend, 0); 835 if (err) 836 goto out; 837 /* split_vma() invalidated the mas */ 838 mas_pause(&mas); 839 } 840 replace: 841 err = vma_replace_policy(vma, new_pol); 842 if (err) 843 goto out; 844 next: 845 prev = vma; 846 } 847 848 out: 849 return err; 850 } 851 852 /* Set the process memory policy */ 853 static long do_set_mempolicy(unsigned short mode, unsigned short flags, 854 nodemask_t *nodes) 855 { 856 struct mempolicy *new, *old; 857 NODEMASK_SCRATCH(scratch); 858 int ret; 859 860 if (!scratch) 861 return -ENOMEM; 862 863 new = mpol_new(mode, flags, nodes); 864 if (IS_ERR(new)) { 865 ret = PTR_ERR(new); 866 goto out; 867 } 868 869 task_lock(current); 870 ret = mpol_set_nodemask(new, nodes, scratch); 871 if (ret) { 872 task_unlock(current); 873 mpol_put(new); 874 goto out; 875 } 876 877 old = current->mempolicy; 878 current->mempolicy = new; 879 if (new && new->mode == MPOL_INTERLEAVE) 880 current->il_prev = MAX_NUMNODES-1; 881 task_unlock(current); 882 mpol_put(old); 883 ret = 0; 884 out: 885 NODEMASK_SCRATCH_FREE(scratch); 886 return ret; 887 } 888 889 /* 890 * Return nodemask for policy for get_mempolicy() query 891 * 892 * Called with task's alloc_lock held 893 */ 894 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 895 { 896 nodes_clear(*nodes); 897 if (p == &default_policy) 898 return; 899 900 switch (p->mode) { 901 case MPOL_BIND: 902 case MPOL_INTERLEAVE: 903 case MPOL_PREFERRED: 904 case MPOL_PREFERRED_MANY: 905 *nodes = p->nodes; 906 break; 907 case MPOL_LOCAL: 908 /* return empty node mask for local allocation */ 909 break; 910 default: 911 BUG(); 912 } 913 } 914 915 static int lookup_node(struct mm_struct *mm, unsigned long addr) 916 { 917 struct page *p = NULL; 918 int ret; 919 920 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 921 if (ret > 0) { 922 ret = page_to_nid(p); 923 put_page(p); 924 } 925 return ret; 926 } 927 928 /* Retrieve NUMA policy */ 929 static long do_get_mempolicy(int *policy, nodemask_t *nmask, 930 unsigned long addr, unsigned long flags) 931 { 932 int err; 933 struct mm_struct *mm = current->mm; 934 struct vm_area_struct *vma = NULL; 935 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 936 937 if (flags & 938 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 939 return -EINVAL; 940 941 if (flags & MPOL_F_MEMS_ALLOWED) { 942 if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 943 return -EINVAL; 944 *policy = 0; /* just so it's initialized */ 945 task_lock(current); 946 *nmask = cpuset_current_mems_allowed; 947 task_unlock(current); 948 return 0; 949 } 950 951 if (flags & MPOL_F_ADDR) { 952 /* 953 * Do NOT fall back to task policy if the 954 * vma/shared policy at addr is NULL. We 955 * want to return MPOL_DEFAULT in this case. 956 */ 957 mmap_read_lock(mm); 958 vma = vma_lookup(mm, addr); 959 if (!vma) { 960 mmap_read_unlock(mm); 961 return -EFAULT; 962 } 963 if (vma->vm_ops && vma->vm_ops->get_policy) 964 pol = vma->vm_ops->get_policy(vma, addr); 965 else 966 pol = vma->vm_policy; 967 } else if (addr) 968 return -EINVAL; 969 970 if (!pol) 971 pol = &default_policy; /* indicates default behavior */ 972 973 if (flags & MPOL_F_NODE) { 974 if (flags & MPOL_F_ADDR) { 975 /* 976 * Take a refcount on the mpol, because we are about to 977 * drop the mmap_lock, after which only "pol" remains 978 * valid, "vma" is stale. 979 */ 980 pol_refcount = pol; 981 vma = NULL; 982 mpol_get(pol); 983 mmap_read_unlock(mm); 984 err = lookup_node(mm, addr); 985 if (err < 0) 986 goto out; 987 *policy = err; 988 } else if (pol == current->mempolicy && 989 pol->mode == MPOL_INTERLEAVE) { 990 *policy = next_node_in(current->il_prev, pol->nodes); 991 } else { 992 err = -EINVAL; 993 goto out; 994 } 995 } else { 996 *policy = pol == &default_policy ? MPOL_DEFAULT : 997 pol->mode; 998 /* 999 * Internal mempolicy flags must be masked off before exposing 1000 * the policy to userspace. 1001 */ 1002 *policy |= (pol->flags & MPOL_MODE_FLAGS); 1003 } 1004 1005 err = 0; 1006 if (nmask) { 1007 if (mpol_store_user_nodemask(pol)) { 1008 *nmask = pol->w.user_nodemask; 1009 } else { 1010 task_lock(current); 1011 get_policy_nodemask(pol, nmask); 1012 task_unlock(current); 1013 } 1014 } 1015 1016 out: 1017 mpol_cond_put(pol); 1018 if (vma) 1019 mmap_read_unlock(mm); 1020 if (pol_refcount) 1021 mpol_put(pol_refcount); 1022 return err; 1023 } 1024 1025 #ifdef CONFIG_MIGRATION 1026 /* 1027 * page migration, thp tail pages can be passed. 1028 */ 1029 static int migrate_page_add(struct page *page, struct list_head *pagelist, 1030 unsigned long flags) 1031 { 1032 struct page *head = compound_head(page); 1033 /* 1034 * Avoid migrating a page that is shared with others. 1035 */ 1036 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1037 if (!isolate_lru_page(head)) { 1038 list_add_tail(&head->lru, pagelist); 1039 mod_node_page_state(page_pgdat(head), 1040 NR_ISOLATED_ANON + page_is_file_lru(head), 1041 thp_nr_pages(head)); 1042 } else if (flags & MPOL_MF_STRICT) { 1043 /* 1044 * Non-movable page may reach here. And, there may be 1045 * temporary off LRU pages or non-LRU movable pages. 1046 * Treat them as unmovable pages since they can't be 1047 * isolated, so they can't be moved at the moment. It 1048 * should return -EIO for this case too. 1049 */ 1050 return -EIO; 1051 } 1052 } 1053 1054 return 0; 1055 } 1056 1057 /* 1058 * Migrate pages from one node to a target node. 1059 * Returns error or the number of pages not migrated. 1060 */ 1061 static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1062 int flags) 1063 { 1064 nodemask_t nmask; 1065 struct vm_area_struct *vma; 1066 LIST_HEAD(pagelist); 1067 int err = 0; 1068 struct migration_target_control mtc = { 1069 .nid = dest, 1070 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1071 }; 1072 1073 nodes_clear(nmask); 1074 node_set(source, nmask); 1075 1076 /* 1077 * This does not "check" the range but isolates all pages that 1078 * need migration. Between passing in the full user address 1079 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 1080 */ 1081 vma = find_vma(mm, 0); 1082 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 1083 queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, 1084 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 1085 1086 if (!list_empty(&pagelist)) { 1087 err = migrate_pages(&pagelist, alloc_migration_target, NULL, 1088 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1089 if (err) 1090 putback_movable_pages(&pagelist); 1091 } 1092 1093 return err; 1094 } 1095 1096 /* 1097 * Move pages between the two nodesets so as to preserve the physical 1098 * layout as much as possible. 1099 * 1100 * Returns the number of page that could not be moved. 1101 */ 1102 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1103 const nodemask_t *to, int flags) 1104 { 1105 int busy = 0; 1106 int err = 0; 1107 nodemask_t tmp; 1108 1109 lru_cache_disable(); 1110 1111 mmap_read_lock(mm); 1112 1113 /* 1114 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 1115 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 1116 * bit in 'tmp', and return that <source, dest> pair for migration. 1117 * The pair of nodemasks 'to' and 'from' define the map. 1118 * 1119 * If no pair of bits is found that way, fallback to picking some 1120 * pair of 'source' and 'dest' bits that are not the same. If the 1121 * 'source' and 'dest' bits are the same, this represents a node 1122 * that will be migrating to itself, so no pages need move. 1123 * 1124 * If no bits are left in 'tmp', or if all remaining bits left 1125 * in 'tmp' correspond to the same bit in 'to', return false 1126 * (nothing left to migrate). 1127 * 1128 * This lets us pick a pair of nodes to migrate between, such that 1129 * if possible the dest node is not already occupied by some other 1130 * source node, minimizing the risk of overloading the memory on a 1131 * node that would happen if we migrated incoming memory to a node 1132 * before migrating outgoing memory source that same node. 1133 * 1134 * A single scan of tmp is sufficient. As we go, we remember the 1135 * most recent <s, d> pair that moved (s != d). If we find a pair 1136 * that not only moved, but what's better, moved to an empty slot 1137 * (d is not set in tmp), then we break out then, with that pair. 1138 * Otherwise when we finish scanning from_tmp, we at least have the 1139 * most recent <s, d> pair that moved. If we get all the way through 1140 * the scan of tmp without finding any node that moved, much less 1141 * moved to an empty node, then there is nothing left worth migrating. 1142 */ 1143 1144 tmp = *from; 1145 while (!nodes_empty(tmp)) { 1146 int s, d; 1147 int source = NUMA_NO_NODE; 1148 int dest = 0; 1149 1150 for_each_node_mask(s, tmp) { 1151 1152 /* 1153 * do_migrate_pages() tries to maintain the relative 1154 * node relationship of the pages established between 1155 * threads and memory areas. 1156 * 1157 * However if the number of source nodes is not equal to 1158 * the number of destination nodes we can not preserve 1159 * this node relative relationship. In that case, skip 1160 * copying memory from a node that is in the destination 1161 * mask. 1162 * 1163 * Example: [2,3,4] -> [3,4,5] moves everything. 1164 * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 1165 */ 1166 1167 if ((nodes_weight(*from) != nodes_weight(*to)) && 1168 (node_isset(s, *to))) 1169 continue; 1170 1171 d = node_remap(s, *from, *to); 1172 if (s == d) 1173 continue; 1174 1175 source = s; /* Node moved. Memorize */ 1176 dest = d; 1177 1178 /* dest not in remaining from nodes? */ 1179 if (!node_isset(dest, tmp)) 1180 break; 1181 } 1182 if (source == NUMA_NO_NODE) 1183 break; 1184 1185 node_clear(source, tmp); 1186 err = migrate_to_node(mm, source, dest, flags); 1187 if (err > 0) 1188 busy += err; 1189 if (err < 0) 1190 break; 1191 } 1192 mmap_read_unlock(mm); 1193 1194 lru_cache_enable(); 1195 if (err < 0) 1196 return err; 1197 return busy; 1198 1199 } 1200 1201 /* 1202 * Allocate a new page for page migration based on vma policy. 1203 * Start by assuming the page is mapped by the same vma as contains @start. 1204 * Search forward from there, if not. N.B., this assumes that the 1205 * list of pages handed to migrate_pages()--which is how we get here-- 1206 * is in virtual address order. 1207 */ 1208 static struct page *new_page(struct page *page, unsigned long start) 1209 { 1210 struct folio *dst, *src = page_folio(page); 1211 struct vm_area_struct *vma; 1212 unsigned long address; 1213 VMA_ITERATOR(vmi, current->mm, start); 1214 gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; 1215 1216 for_each_vma(vmi, vma) { 1217 address = page_address_in_vma(page, vma); 1218 if (address != -EFAULT) 1219 break; 1220 } 1221 1222 if (folio_test_hugetlb(src)) 1223 return alloc_huge_page_vma(page_hstate(&src->page), 1224 vma, address); 1225 1226 if (folio_test_large(src)) 1227 gfp = GFP_TRANSHUGE; 1228 1229 /* 1230 * if !vma, vma_alloc_folio() will use task or system default policy 1231 */ 1232 dst = vma_alloc_folio(gfp, folio_order(src), vma, address, 1233 folio_test_large(src)); 1234 return &dst->page; 1235 } 1236 #else 1237 1238 static int migrate_page_add(struct page *page, struct list_head *pagelist, 1239 unsigned long flags) 1240 { 1241 return -EIO; 1242 } 1243 1244 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1245 const nodemask_t *to, int flags) 1246 { 1247 return -ENOSYS; 1248 } 1249 1250 static struct page *new_page(struct page *page, unsigned long start) 1251 { 1252 return NULL; 1253 } 1254 #endif 1255 1256 static long do_mbind(unsigned long start, unsigned long len, 1257 unsigned short mode, unsigned short mode_flags, 1258 nodemask_t *nmask, unsigned long flags) 1259 { 1260 struct mm_struct *mm = current->mm; 1261 struct mempolicy *new; 1262 unsigned long end; 1263 int err; 1264 int ret; 1265 LIST_HEAD(pagelist); 1266 1267 if (flags & ~(unsigned long)MPOL_MF_VALID) 1268 return -EINVAL; 1269 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1270 return -EPERM; 1271 1272 if (start & ~PAGE_MASK) 1273 return -EINVAL; 1274 1275 if (mode == MPOL_DEFAULT) 1276 flags &= ~MPOL_MF_STRICT; 1277 1278 len = PAGE_ALIGN(len); 1279 end = start + len; 1280 1281 if (end < start) 1282 return -EINVAL; 1283 if (end == start) 1284 return 0; 1285 1286 new = mpol_new(mode, mode_flags, nmask); 1287 if (IS_ERR(new)) 1288 return PTR_ERR(new); 1289 1290 if (flags & MPOL_MF_LAZY) 1291 new->flags |= MPOL_F_MOF; 1292 1293 /* 1294 * If we are using the default policy then operation 1295 * on discontinuous address spaces is okay after all 1296 */ 1297 if (!new) 1298 flags |= MPOL_MF_DISCONTIG_OK; 1299 1300 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1301 start, start + len, mode, mode_flags, 1302 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 1303 1304 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 1305 1306 lru_cache_disable(); 1307 } 1308 { 1309 NODEMASK_SCRATCH(scratch); 1310 if (scratch) { 1311 mmap_write_lock(mm); 1312 err = mpol_set_nodemask(new, nmask, scratch); 1313 if (err) 1314 mmap_write_unlock(mm); 1315 } else 1316 err = -ENOMEM; 1317 NODEMASK_SCRATCH_FREE(scratch); 1318 } 1319 if (err) 1320 goto mpol_out; 1321 1322 ret = queue_pages_range(mm, start, end, nmask, 1323 flags | MPOL_MF_INVERT, &pagelist); 1324 1325 if (ret < 0) { 1326 err = ret; 1327 goto up_out; 1328 } 1329 1330 err = mbind_range(mm, start, end, new); 1331 1332 if (!err) { 1333 int nr_failed = 0; 1334 1335 if (!list_empty(&pagelist)) { 1336 WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1337 nr_failed = migrate_pages(&pagelist, new_page, NULL, 1338 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1339 if (nr_failed) 1340 putback_movable_pages(&pagelist); 1341 } 1342 1343 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 1344 err = -EIO; 1345 } else { 1346 up_out: 1347 if (!list_empty(&pagelist)) 1348 putback_movable_pages(&pagelist); 1349 } 1350 1351 mmap_write_unlock(mm); 1352 mpol_out: 1353 mpol_put(new); 1354 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1355 lru_cache_enable(); 1356 return err; 1357 } 1358 1359 /* 1360 * User space interface with variable sized bitmaps for nodelists. 1361 */ 1362 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1363 unsigned long maxnode) 1364 { 1365 unsigned long nlongs = BITS_TO_LONGS(maxnode); 1366 int ret; 1367 1368 if (in_compat_syscall()) 1369 ret = compat_get_bitmap(mask, 1370 (const compat_ulong_t __user *)nmask, 1371 maxnode); 1372 else 1373 ret = copy_from_user(mask, nmask, 1374 nlongs * sizeof(unsigned long)); 1375 1376 if (ret) 1377 return -EFAULT; 1378 1379 if (maxnode % BITS_PER_LONG) 1380 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1381 1382 return 0; 1383 } 1384 1385 /* Copy a node mask from user space. */ 1386 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 1387 unsigned long maxnode) 1388 { 1389 --maxnode; 1390 nodes_clear(*nodes); 1391 if (maxnode == 0 || !nmask) 1392 return 0; 1393 if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1394 return -EINVAL; 1395 1396 /* 1397 * When the user specified more nodes than supported just check 1398 * if the non supported part is all zero, one word at a time, 1399 * starting at the end. 1400 */ 1401 while (maxnode > MAX_NUMNODES) { 1402 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1403 unsigned long t; 1404 1405 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) 1406 return -EFAULT; 1407 1408 if (maxnode - bits >= MAX_NUMNODES) { 1409 maxnode -= bits; 1410 } else { 1411 maxnode = MAX_NUMNODES; 1412 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1413 } 1414 if (t) 1415 return -EINVAL; 1416 } 1417 1418 return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 1419 } 1420 1421 /* Copy a kernel node mask to user space */ 1422 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 1423 nodemask_t *nodes) 1424 { 1425 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1426 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1427 bool compat = in_compat_syscall(); 1428 1429 if (compat) 1430 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 1431 1432 if (copy > nbytes) { 1433 if (copy > PAGE_SIZE) 1434 return -EINVAL; 1435 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 1436 return -EFAULT; 1437 copy = nbytes; 1438 maxnode = nr_node_ids; 1439 } 1440 1441 if (compat) 1442 return compat_put_bitmap((compat_ulong_t __user *)mask, 1443 nodes_addr(*nodes), maxnode); 1444 1445 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 1446 } 1447 1448 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 1449 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 1450 { 1451 *flags = *mode & MPOL_MODE_FLAGS; 1452 *mode &= ~MPOL_MODE_FLAGS; 1453 1454 if ((unsigned int)(*mode) >= MPOL_MAX) 1455 return -EINVAL; 1456 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 1457 return -EINVAL; 1458 if (*flags & MPOL_F_NUMA_BALANCING) { 1459 if (*mode != MPOL_BIND) 1460 return -EINVAL; 1461 *flags |= (MPOL_F_MOF | MPOL_F_MORON); 1462 } 1463 return 0; 1464 } 1465 1466 static long kernel_mbind(unsigned long start, unsigned long len, 1467 unsigned long mode, const unsigned long __user *nmask, 1468 unsigned long maxnode, unsigned int flags) 1469 { 1470 unsigned short mode_flags; 1471 nodemask_t nodes; 1472 int lmode = mode; 1473 int err; 1474 1475 start = untagged_addr(start); 1476 err = sanitize_mpol_flags(&lmode, &mode_flags); 1477 if (err) 1478 return err; 1479 1480 err = get_nodes(&nodes, nmask, maxnode); 1481 if (err) 1482 return err; 1483 1484 return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 1485 } 1486 1487 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1488 unsigned long, home_node, unsigned long, flags) 1489 { 1490 struct mm_struct *mm = current->mm; 1491 struct vm_area_struct *vma; 1492 struct mempolicy *new, *old; 1493 unsigned long vmstart; 1494 unsigned long vmend; 1495 unsigned long end; 1496 int err = -ENOENT; 1497 VMA_ITERATOR(vmi, mm, start); 1498 1499 start = untagged_addr(start); 1500 if (start & ~PAGE_MASK) 1501 return -EINVAL; 1502 /* 1503 * flags is used for future extension if any. 1504 */ 1505 if (flags != 0) 1506 return -EINVAL; 1507 1508 /* 1509 * Check home_node is online to avoid accessing uninitialized 1510 * NODE_DATA. 1511 */ 1512 if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1513 return -EINVAL; 1514 1515 len = PAGE_ALIGN(len); 1516 end = start + len; 1517 1518 if (end < start) 1519 return -EINVAL; 1520 if (end == start) 1521 return 0; 1522 mmap_write_lock(mm); 1523 for_each_vma_range(vmi, vma, end) { 1524 /* 1525 * If any vma in the range got policy other than MPOL_BIND 1526 * or MPOL_PREFERRED_MANY we return error. We don't reset 1527 * the home node for vmas we already updated before. 1528 */ 1529 old = vma_policy(vma); 1530 if (!old) 1531 continue; 1532 if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) { 1533 err = -EOPNOTSUPP; 1534 break; 1535 } 1536 new = mpol_dup(old); 1537 if (IS_ERR(new)) { 1538 err = PTR_ERR(new); 1539 break; 1540 } 1541 1542 new->home_node = home_node; 1543 vmstart = max(start, vma->vm_start); 1544 vmend = min(end, vma->vm_end); 1545 err = mbind_range(mm, vmstart, vmend, new); 1546 mpol_put(new); 1547 if (err) 1548 break; 1549 } 1550 mmap_write_unlock(mm); 1551 return err; 1552 } 1553 1554 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1555 unsigned long, mode, const unsigned long __user *, nmask, 1556 unsigned long, maxnode, unsigned int, flags) 1557 { 1558 return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1559 } 1560 1561 /* Set the process memory policy */ 1562 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1563 unsigned long maxnode) 1564 { 1565 unsigned short mode_flags; 1566 nodemask_t nodes; 1567 int lmode = mode; 1568 int err; 1569 1570 err = sanitize_mpol_flags(&lmode, &mode_flags); 1571 if (err) 1572 return err; 1573 1574 err = get_nodes(&nodes, nmask, maxnode); 1575 if (err) 1576 return err; 1577 1578 return do_set_mempolicy(lmode, mode_flags, &nodes); 1579 } 1580 1581 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1582 unsigned long, maxnode) 1583 { 1584 return kernel_set_mempolicy(mode, nmask, maxnode); 1585 } 1586 1587 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1588 const unsigned long __user *old_nodes, 1589 const unsigned long __user *new_nodes) 1590 { 1591 struct mm_struct *mm = NULL; 1592 struct task_struct *task; 1593 nodemask_t task_nodes; 1594 int err; 1595 nodemask_t *old; 1596 nodemask_t *new; 1597 NODEMASK_SCRATCH(scratch); 1598 1599 if (!scratch) 1600 return -ENOMEM; 1601 1602 old = &scratch->mask1; 1603 new = &scratch->mask2; 1604 1605 err = get_nodes(old, old_nodes, maxnode); 1606 if (err) 1607 goto out; 1608 1609 err = get_nodes(new, new_nodes, maxnode); 1610 if (err) 1611 goto out; 1612 1613 /* Find the mm_struct */ 1614 rcu_read_lock(); 1615 task = pid ? find_task_by_vpid(pid) : current; 1616 if (!task) { 1617 rcu_read_unlock(); 1618 err = -ESRCH; 1619 goto out; 1620 } 1621 get_task_struct(task); 1622 1623 err = -EINVAL; 1624 1625 /* 1626 * Check if this process has the right to modify the specified process. 1627 * Use the regular "ptrace_may_access()" checks. 1628 */ 1629 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1630 rcu_read_unlock(); 1631 err = -EPERM; 1632 goto out_put; 1633 } 1634 rcu_read_unlock(); 1635 1636 task_nodes = cpuset_mems_allowed(task); 1637 /* Is the user allowed to access the target nodes? */ 1638 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 1639 err = -EPERM; 1640 goto out_put; 1641 } 1642 1643 task_nodes = cpuset_mems_allowed(current); 1644 nodes_and(*new, *new, task_nodes); 1645 if (nodes_empty(*new)) 1646 goto out_put; 1647 1648 err = security_task_movememory(task); 1649 if (err) 1650 goto out_put; 1651 1652 mm = get_task_mm(task); 1653 put_task_struct(task); 1654 1655 if (!mm) { 1656 err = -EINVAL; 1657 goto out; 1658 } 1659 1660 err = do_migrate_pages(mm, old, new, 1661 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 1662 1663 mmput(mm); 1664 out: 1665 NODEMASK_SCRATCH_FREE(scratch); 1666 1667 return err; 1668 1669 out_put: 1670 put_task_struct(task); 1671 goto out; 1672 1673 } 1674 1675 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1676 const unsigned long __user *, old_nodes, 1677 const unsigned long __user *, new_nodes) 1678 { 1679 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1680 } 1681 1682 1683 /* Retrieve NUMA policy */ 1684 static int kernel_get_mempolicy(int __user *policy, 1685 unsigned long __user *nmask, 1686 unsigned long maxnode, 1687 unsigned long addr, 1688 unsigned long flags) 1689 { 1690 int err; 1691 int pval; 1692 nodemask_t nodes; 1693 1694 if (nmask != NULL && maxnode < nr_node_ids) 1695 return -EINVAL; 1696 1697 addr = untagged_addr(addr); 1698 1699 err = do_get_mempolicy(&pval, &nodes, addr, flags); 1700 1701 if (err) 1702 return err; 1703 1704 if (policy && put_user(pval, policy)) 1705 return -EFAULT; 1706 1707 if (nmask) 1708 err = copy_nodes_to_user(nmask, maxnode, &nodes); 1709 1710 return err; 1711 } 1712 1713 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1714 unsigned long __user *, nmask, unsigned long, maxnode, 1715 unsigned long, addr, unsigned long, flags) 1716 { 1717 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1718 } 1719 1720 bool vma_migratable(struct vm_area_struct *vma) 1721 { 1722 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1723 return false; 1724 1725 /* 1726 * DAX device mappings require predictable access latency, so avoid 1727 * incurring periodic faults. 1728 */ 1729 if (vma_is_dax(vma)) 1730 return false; 1731 1732 if (is_vm_hugetlb_page(vma) && 1733 !hugepage_migration_supported(hstate_vma(vma))) 1734 return false; 1735 1736 /* 1737 * Migration allocates pages in the highest zone. If we cannot 1738 * do so then migration (at least from node to node) is not 1739 * possible. 1740 */ 1741 if (vma->vm_file && 1742 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 1743 < policy_zone) 1744 return false; 1745 return true; 1746 } 1747 1748 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 1749 unsigned long addr) 1750 { 1751 struct mempolicy *pol = NULL; 1752 1753 if (vma) { 1754 if (vma->vm_ops && vma->vm_ops->get_policy) { 1755 pol = vma->vm_ops->get_policy(vma, addr); 1756 } else if (vma->vm_policy) { 1757 pol = vma->vm_policy; 1758 1759 /* 1760 * shmem_alloc_page() passes MPOL_F_SHARED policy with 1761 * a pseudo vma whose vma->vm_ops=NULL. Take a reference 1762 * count on these policies which will be dropped by 1763 * mpol_cond_put() later 1764 */ 1765 if (mpol_needs_cond_ref(pol)) 1766 mpol_get(pol); 1767 } 1768 } 1769 1770 return pol; 1771 } 1772 1773 /* 1774 * get_vma_policy(@vma, @addr) 1775 * @vma: virtual memory area whose policy is sought 1776 * @addr: address in @vma for shared policy lookup 1777 * 1778 * Returns effective policy for a VMA at specified address. 1779 * Falls back to current->mempolicy or system default policy, as necessary. 1780 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 1781 * count--added by the get_policy() vm_op, as appropriate--to protect against 1782 * freeing by another task. It is the caller's responsibility to free the 1783 * extra reference for shared policies. 1784 */ 1785 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1786 unsigned long addr) 1787 { 1788 struct mempolicy *pol = __get_vma_policy(vma, addr); 1789 1790 if (!pol) 1791 pol = get_task_policy(current); 1792 1793 return pol; 1794 } 1795 1796 bool vma_policy_mof(struct vm_area_struct *vma) 1797 { 1798 struct mempolicy *pol; 1799 1800 if (vma->vm_ops && vma->vm_ops->get_policy) { 1801 bool ret = false; 1802 1803 pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1804 if (pol && (pol->flags & MPOL_F_MOF)) 1805 ret = true; 1806 mpol_cond_put(pol); 1807 1808 return ret; 1809 } 1810 1811 pol = vma->vm_policy; 1812 if (!pol) 1813 pol = get_task_policy(current); 1814 1815 return pol->flags & MPOL_F_MOF; 1816 } 1817 1818 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1819 { 1820 enum zone_type dynamic_policy_zone = policy_zone; 1821 1822 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1823 1824 /* 1825 * if policy->nodes has movable memory only, 1826 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1827 * 1828 * policy->nodes is intersect with node_states[N_MEMORY]. 1829 * so if the following test fails, it implies 1830 * policy->nodes has movable memory only. 1831 */ 1832 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1833 dynamic_policy_zone = ZONE_MOVABLE; 1834 1835 return zone >= dynamic_policy_zone; 1836 } 1837 1838 /* 1839 * Return a nodemask representing a mempolicy for filtering nodes for 1840 * page allocation 1841 */ 1842 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 1843 { 1844 int mode = policy->mode; 1845 1846 /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1847 if (unlikely(mode == MPOL_BIND) && 1848 apply_policy_zone(policy, gfp_zone(gfp)) && 1849 cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1850 return &policy->nodes; 1851 1852 if (mode == MPOL_PREFERRED_MANY) 1853 return &policy->nodes; 1854 1855 return NULL; 1856 } 1857 1858 /* 1859 * Return the preferred node id for 'prefer' mempolicy, and return 1860 * the given id for all other policies. 1861 * 1862 * policy_node() is always coupled with policy_nodemask(), which 1863 * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1864 */ 1865 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 1866 { 1867 if (policy->mode == MPOL_PREFERRED) { 1868 nd = first_node(policy->nodes); 1869 } else { 1870 /* 1871 * __GFP_THISNODE shouldn't even be used with the bind policy 1872 * because we might easily break the expectation to stay on the 1873 * requested node and not break the policy. 1874 */ 1875 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 1876 } 1877 1878 if ((policy->mode == MPOL_BIND || 1879 policy->mode == MPOL_PREFERRED_MANY) && 1880 policy->home_node != NUMA_NO_NODE) 1881 return policy->home_node; 1882 1883 return nd; 1884 } 1885 1886 /* Do dynamic interleaving for a process */ 1887 static unsigned interleave_nodes(struct mempolicy *policy) 1888 { 1889 unsigned next; 1890 struct task_struct *me = current; 1891 1892 next = next_node_in(me->il_prev, policy->nodes); 1893 if (next < MAX_NUMNODES) 1894 me->il_prev = next; 1895 return next; 1896 } 1897 1898 /* 1899 * Depending on the memory policy provide a node from which to allocate the 1900 * next slab entry. 1901 */ 1902 unsigned int mempolicy_slab_node(void) 1903 { 1904 struct mempolicy *policy; 1905 int node = numa_mem_id(); 1906 1907 if (!in_task()) 1908 return node; 1909 1910 policy = current->mempolicy; 1911 if (!policy) 1912 return node; 1913 1914 switch (policy->mode) { 1915 case MPOL_PREFERRED: 1916 return first_node(policy->nodes); 1917 1918 case MPOL_INTERLEAVE: 1919 return interleave_nodes(policy); 1920 1921 case MPOL_BIND: 1922 case MPOL_PREFERRED_MANY: 1923 { 1924 struct zoneref *z; 1925 1926 /* 1927 * Follow bind policy behavior and start allocation at the 1928 * first node. 1929 */ 1930 struct zonelist *zonelist; 1931 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1932 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1933 z = first_zones_zonelist(zonelist, highest_zoneidx, 1934 &policy->nodes); 1935 return z->zone ? zone_to_nid(z->zone) : node; 1936 } 1937 case MPOL_LOCAL: 1938 return node; 1939 1940 default: 1941 BUG(); 1942 } 1943 } 1944 1945 /* 1946 * Do static interleaving for a VMA with known offset @n. Returns the n'th 1947 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1948 * number of present nodes. 1949 */ 1950 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 1951 { 1952 nodemask_t nodemask = pol->nodes; 1953 unsigned int target, nnodes; 1954 int i; 1955 int nid; 1956 /* 1957 * The barrier will stabilize the nodemask in a register or on 1958 * the stack so that it will stop changing under the code. 1959 * 1960 * Between first_node() and next_node(), pol->nodes could be changed 1961 * by other threads. So we put pol->nodes in a local stack. 1962 */ 1963 barrier(); 1964 1965 nnodes = nodes_weight(nodemask); 1966 if (!nnodes) 1967 return numa_node_id(); 1968 target = (unsigned int)n % nnodes; 1969 nid = first_node(nodemask); 1970 for (i = 0; i < target; i++) 1971 nid = next_node(nid, nodemask); 1972 return nid; 1973 } 1974 1975 /* Determine a node number for interleave */ 1976 static inline unsigned interleave_nid(struct mempolicy *pol, 1977 struct vm_area_struct *vma, unsigned long addr, int shift) 1978 { 1979 if (vma) { 1980 unsigned long off; 1981 1982 /* 1983 * for small pages, there is no difference between 1984 * shift and PAGE_SHIFT, so the bit-shift is safe. 1985 * for huge pages, since vm_pgoff is in units of small 1986 * pages, we need to shift off the always 0 bits to get 1987 * a useful offset. 1988 */ 1989 BUG_ON(shift < PAGE_SHIFT); 1990 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 1991 off += (addr - vma->vm_start) >> shift; 1992 return offset_il_node(pol, off); 1993 } else 1994 return interleave_nodes(pol); 1995 } 1996 1997 #ifdef CONFIG_HUGETLBFS 1998 /* 1999 * huge_node(@vma, @addr, @gfp_flags, @mpol) 2000 * @vma: virtual memory area whose policy is sought 2001 * @addr: address in @vma for shared policy lookup and interleave policy 2002 * @gfp_flags: for requested zone 2003 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2004 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 2005 * 2006 * Returns a nid suitable for a huge page allocation and a pointer 2007 * to the struct mempolicy for conditional unref after allocation. 2008 * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2009 * to the mempolicy's @nodemask for filtering the zonelist. 2010 * 2011 * Must be protected by read_mems_allowed_begin() 2012 */ 2013 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 2014 struct mempolicy **mpol, nodemask_t **nodemask) 2015 { 2016 int nid; 2017 int mode; 2018 2019 *mpol = get_vma_policy(vma, addr); 2020 *nodemask = NULL; 2021 mode = (*mpol)->mode; 2022 2023 if (unlikely(mode == MPOL_INTERLEAVE)) { 2024 nid = interleave_nid(*mpol, vma, addr, 2025 huge_page_shift(hstate_vma(vma))); 2026 } else { 2027 nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2028 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2029 *nodemask = &(*mpol)->nodes; 2030 } 2031 return nid; 2032 } 2033 2034 /* 2035 * init_nodemask_of_mempolicy 2036 * 2037 * If the current task's mempolicy is "default" [NULL], return 'false' 2038 * to indicate default policy. Otherwise, extract the policy nodemask 2039 * for 'bind' or 'interleave' policy into the argument nodemask, or 2040 * initialize the argument nodemask to contain the single node for 2041 * 'preferred' or 'local' policy and return 'true' to indicate presence 2042 * of non-default mempolicy. 2043 * 2044 * We don't bother with reference counting the mempolicy [mpol_get/put] 2045 * because the current task is examining it's own mempolicy and a task's 2046 * mempolicy is only ever changed by the task itself. 2047 * 2048 * N.B., it is the caller's responsibility to free a returned nodemask. 2049 */ 2050 bool init_nodemask_of_mempolicy(nodemask_t *mask) 2051 { 2052 struct mempolicy *mempolicy; 2053 2054 if (!(mask && current->mempolicy)) 2055 return false; 2056 2057 task_lock(current); 2058 mempolicy = current->mempolicy; 2059 switch (mempolicy->mode) { 2060 case MPOL_PREFERRED: 2061 case MPOL_PREFERRED_MANY: 2062 case MPOL_BIND: 2063 case MPOL_INTERLEAVE: 2064 *mask = mempolicy->nodes; 2065 break; 2066 2067 case MPOL_LOCAL: 2068 init_nodemask_of_node(mask, numa_node_id()); 2069 break; 2070 2071 default: 2072 BUG(); 2073 } 2074 task_unlock(current); 2075 2076 return true; 2077 } 2078 #endif 2079 2080 /* 2081 * mempolicy_in_oom_domain 2082 * 2083 * If tsk's mempolicy is "bind", check for intersection between mask and 2084 * the policy nodemask. Otherwise, return true for all other policies 2085 * including "interleave", as a tsk with "interleave" policy may have 2086 * memory allocated from all nodes in system. 2087 * 2088 * Takes task_lock(tsk) to prevent freeing of its mempolicy. 2089 */ 2090 bool mempolicy_in_oom_domain(struct task_struct *tsk, 2091 const nodemask_t *mask) 2092 { 2093 struct mempolicy *mempolicy; 2094 bool ret = true; 2095 2096 if (!mask) 2097 return ret; 2098 2099 task_lock(tsk); 2100 mempolicy = tsk->mempolicy; 2101 if (mempolicy && mempolicy->mode == MPOL_BIND) 2102 ret = nodes_intersects(mempolicy->nodes, *mask); 2103 task_unlock(tsk); 2104 2105 return ret; 2106 } 2107 2108 /* Allocate a page in interleaved policy. 2109 Own path because it needs to do special accounting. */ 2110 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2111 unsigned nid) 2112 { 2113 struct page *page; 2114 2115 page = __alloc_pages(gfp, order, nid, NULL); 2116 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 2117 if (!static_branch_likely(&vm_numa_stat_key)) 2118 return page; 2119 if (page && page_to_nid(page) == nid) { 2120 preempt_disable(); 2121 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2122 preempt_enable(); 2123 } 2124 return page; 2125 } 2126 2127 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 2128 int nid, struct mempolicy *pol) 2129 { 2130 struct page *page; 2131 gfp_t preferred_gfp; 2132 2133 /* 2134 * This is a two pass approach. The first pass will only try the 2135 * preferred nodes but skip the direct reclaim and allow the 2136 * allocation to fail, while the second pass will try all the 2137 * nodes in system. 2138 */ 2139 preferred_gfp = gfp | __GFP_NOWARN; 2140 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2141 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 2142 if (!page) 2143 page = __alloc_pages(gfp, order, nid, NULL); 2144 2145 return page; 2146 } 2147 2148 /** 2149 * vma_alloc_folio - Allocate a folio for a VMA. 2150 * @gfp: GFP flags. 2151 * @order: Order of the folio. 2152 * @vma: Pointer to VMA or NULL if not available. 2153 * @addr: Virtual address of the allocation. Must be inside @vma. 2154 * @hugepage: For hugepages try only the preferred node if possible. 2155 * 2156 * Allocate a folio for a specific address in @vma, using the appropriate 2157 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2158 * of the mm_struct of the VMA to prevent it from going away. Should be 2159 * used for all allocations for folios that will be mapped into user space. 2160 * 2161 * Return: The folio on success or NULL if allocation fails. 2162 */ 2163 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, 2164 unsigned long addr, bool hugepage) 2165 { 2166 struct mempolicy *pol; 2167 int node = numa_node_id(); 2168 struct folio *folio; 2169 int preferred_nid; 2170 nodemask_t *nmask; 2171 2172 pol = get_vma_policy(vma, addr); 2173 2174 if (pol->mode == MPOL_INTERLEAVE) { 2175 struct page *page; 2176 unsigned nid; 2177 2178 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 2179 mpol_cond_put(pol); 2180 gfp |= __GFP_COMP; 2181 page = alloc_page_interleave(gfp, order, nid); 2182 if (page && order > 1) 2183 prep_transhuge_page(page); 2184 folio = (struct folio *)page; 2185 goto out; 2186 } 2187 2188 if (pol->mode == MPOL_PREFERRED_MANY) { 2189 struct page *page; 2190 2191 node = policy_node(gfp, pol, node); 2192 gfp |= __GFP_COMP; 2193 page = alloc_pages_preferred_many(gfp, order, node, pol); 2194 mpol_cond_put(pol); 2195 if (page && order > 1) 2196 prep_transhuge_page(page); 2197 folio = (struct folio *)page; 2198 goto out; 2199 } 2200 2201 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 2202 int hpage_node = node; 2203 2204 /* 2205 * For hugepage allocation and non-interleave policy which 2206 * allows the current node (or other explicitly preferred 2207 * node) we only try to allocate from the current/preferred 2208 * node and don't fall back to other nodes, as the cost of 2209 * remote accesses would likely offset THP benefits. 2210 * 2211 * If the policy is interleave or does not allow the current 2212 * node in its nodemask, we allocate the standard way. 2213 */ 2214 if (pol->mode == MPOL_PREFERRED) 2215 hpage_node = first_node(pol->nodes); 2216 2217 nmask = policy_nodemask(gfp, pol); 2218 if (!nmask || node_isset(hpage_node, *nmask)) { 2219 mpol_cond_put(pol); 2220 /* 2221 * First, try to allocate THP only on local node, but 2222 * don't reclaim unnecessarily, just compact. 2223 */ 2224 folio = __folio_alloc_node(gfp | __GFP_THISNODE | 2225 __GFP_NORETRY, order, hpage_node); 2226 2227 /* 2228 * If hugepage allocations are configured to always 2229 * synchronous compact or the vma has been madvised 2230 * to prefer hugepage backing, retry allowing remote 2231 * memory with both reclaim and compact as well. 2232 */ 2233 if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) 2234 folio = __folio_alloc(gfp, order, hpage_node, 2235 nmask); 2236 2237 goto out; 2238 } 2239 } 2240 2241 nmask = policy_nodemask(gfp, pol); 2242 preferred_nid = policy_node(gfp, pol, node); 2243 folio = __folio_alloc(gfp, order, preferred_nid, nmask); 2244 mpol_cond_put(pol); 2245 out: 2246 return folio; 2247 } 2248 EXPORT_SYMBOL(vma_alloc_folio); 2249 2250 /** 2251 * alloc_pages - Allocate pages. 2252 * @gfp: GFP flags. 2253 * @order: Power of two of number of pages to allocate. 2254 * 2255 * Allocate 1 << @order contiguous pages. The physical address of the 2256 * first page is naturally aligned (eg an order-3 allocation will be aligned 2257 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 2258 * process is honoured when in process context. 2259 * 2260 * Context: Can be called from any context, providing the appropriate GFP 2261 * flags are used. 2262 * Return: The page on success or NULL if allocation fails. 2263 */ 2264 struct page *alloc_pages(gfp_t gfp, unsigned order) 2265 { 2266 struct mempolicy *pol = &default_policy; 2267 struct page *page; 2268 2269 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2270 pol = get_task_policy(current); 2271 2272 /* 2273 * No reference counting needed for current->mempolicy 2274 * nor system default_policy 2275 */ 2276 if (pol->mode == MPOL_INTERLEAVE) 2277 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2278 else if (pol->mode == MPOL_PREFERRED_MANY) 2279 page = alloc_pages_preferred_many(gfp, order, 2280 policy_node(gfp, pol, numa_node_id()), pol); 2281 else 2282 page = __alloc_pages(gfp, order, 2283 policy_node(gfp, pol, numa_node_id()), 2284 policy_nodemask(gfp, pol)); 2285 2286 return page; 2287 } 2288 EXPORT_SYMBOL(alloc_pages); 2289 2290 struct folio *folio_alloc(gfp_t gfp, unsigned order) 2291 { 2292 struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2293 2294 if (page && order > 1) 2295 prep_transhuge_page(page); 2296 return (struct folio *)page; 2297 } 2298 EXPORT_SYMBOL(folio_alloc); 2299 2300 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2301 struct mempolicy *pol, unsigned long nr_pages, 2302 struct page **page_array) 2303 { 2304 int nodes; 2305 unsigned long nr_pages_per_node; 2306 int delta; 2307 int i; 2308 unsigned long nr_allocated; 2309 unsigned long total_allocated = 0; 2310 2311 nodes = nodes_weight(pol->nodes); 2312 nr_pages_per_node = nr_pages / nodes; 2313 delta = nr_pages - nodes * nr_pages_per_node; 2314 2315 for (i = 0; i < nodes; i++) { 2316 if (delta) { 2317 nr_allocated = __alloc_pages_bulk(gfp, 2318 interleave_nodes(pol), NULL, 2319 nr_pages_per_node + 1, NULL, 2320 page_array); 2321 delta--; 2322 } else { 2323 nr_allocated = __alloc_pages_bulk(gfp, 2324 interleave_nodes(pol), NULL, 2325 nr_pages_per_node, NULL, page_array); 2326 } 2327 2328 page_array += nr_allocated; 2329 total_allocated += nr_allocated; 2330 } 2331 2332 return total_allocated; 2333 } 2334 2335 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2336 struct mempolicy *pol, unsigned long nr_pages, 2337 struct page **page_array) 2338 { 2339 gfp_t preferred_gfp; 2340 unsigned long nr_allocated = 0; 2341 2342 preferred_gfp = gfp | __GFP_NOWARN; 2343 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2344 2345 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, 2346 nr_pages, NULL, page_array); 2347 2348 if (nr_allocated < nr_pages) 2349 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, 2350 nr_pages - nr_allocated, NULL, 2351 page_array + nr_allocated); 2352 return nr_allocated; 2353 } 2354 2355 /* alloc pages bulk and mempolicy should be considered at the 2356 * same time in some situation such as vmalloc. 2357 * 2358 * It can accelerate memory allocation especially interleaving 2359 * allocate memory. 2360 */ 2361 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, 2362 unsigned long nr_pages, struct page **page_array) 2363 { 2364 struct mempolicy *pol = &default_policy; 2365 2366 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2367 pol = get_task_policy(current); 2368 2369 if (pol->mode == MPOL_INTERLEAVE) 2370 return alloc_pages_bulk_array_interleave(gfp, pol, 2371 nr_pages, page_array); 2372 2373 if (pol->mode == MPOL_PREFERRED_MANY) 2374 return alloc_pages_bulk_array_preferred_many(gfp, 2375 numa_node_id(), pol, nr_pages, page_array); 2376 2377 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), 2378 policy_nodemask(gfp, pol), nr_pages, NULL, 2379 page_array); 2380 } 2381 2382 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2383 { 2384 struct mempolicy *pol = mpol_dup(vma_policy(src)); 2385 2386 if (IS_ERR(pol)) 2387 return PTR_ERR(pol); 2388 dst->vm_policy = pol; 2389 return 0; 2390 } 2391 2392 /* 2393 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 2394 * rebinds the mempolicy its copying by calling mpol_rebind_policy() 2395 * with the mems_allowed returned by cpuset_mems_allowed(). This 2396 * keeps mempolicies cpuset relative after its cpuset moves. See 2397 * further kernel/cpuset.c update_nodemask(). 2398 * 2399 * current's mempolicy may be rebinded by the other task(the task that changes 2400 * cpuset's mems), so we needn't do rebind work for current task. 2401 */ 2402 2403 /* Slow path of a mempolicy duplicate */ 2404 struct mempolicy *__mpol_dup(struct mempolicy *old) 2405 { 2406 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2407 2408 if (!new) 2409 return ERR_PTR(-ENOMEM); 2410 2411 /* task's mempolicy is protected by alloc_lock */ 2412 if (old == current->mempolicy) { 2413 task_lock(current); 2414 *new = *old; 2415 task_unlock(current); 2416 } else 2417 *new = *old; 2418 2419 if (current_cpuset_is_being_rebound()) { 2420 nodemask_t mems = cpuset_mems_allowed(current); 2421 mpol_rebind_policy(new, &mems); 2422 } 2423 atomic_set(&new->refcnt, 1); 2424 return new; 2425 } 2426 2427 /* Slow path of a mempolicy comparison */ 2428 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 2429 { 2430 if (!a || !b) 2431 return false; 2432 if (a->mode != b->mode) 2433 return false; 2434 if (a->flags != b->flags) 2435 return false; 2436 if (a->home_node != b->home_node) 2437 return false; 2438 if (mpol_store_user_nodemask(a)) 2439 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2440 return false; 2441 2442 switch (a->mode) { 2443 case MPOL_BIND: 2444 case MPOL_INTERLEAVE: 2445 case MPOL_PREFERRED: 2446 case MPOL_PREFERRED_MANY: 2447 return !!nodes_equal(a->nodes, b->nodes); 2448 case MPOL_LOCAL: 2449 return true; 2450 default: 2451 BUG(); 2452 return false; 2453 } 2454 } 2455 2456 /* 2457 * Shared memory backing store policy support. 2458 * 2459 * Remember policies even when nobody has shared memory mapped. 2460 * The policies are kept in Red-Black tree linked from the inode. 2461 * They are protected by the sp->lock rwlock, which should be held 2462 * for any accesses to the tree. 2463 */ 2464 2465 /* 2466 * lookup first element intersecting start-end. Caller holds sp->lock for 2467 * reading or for writing 2468 */ 2469 static struct sp_node * 2470 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 2471 { 2472 struct rb_node *n = sp->root.rb_node; 2473 2474 while (n) { 2475 struct sp_node *p = rb_entry(n, struct sp_node, nd); 2476 2477 if (start >= p->end) 2478 n = n->rb_right; 2479 else if (end <= p->start) 2480 n = n->rb_left; 2481 else 2482 break; 2483 } 2484 if (!n) 2485 return NULL; 2486 for (;;) { 2487 struct sp_node *w = NULL; 2488 struct rb_node *prev = rb_prev(n); 2489 if (!prev) 2490 break; 2491 w = rb_entry(prev, struct sp_node, nd); 2492 if (w->end <= start) 2493 break; 2494 n = prev; 2495 } 2496 return rb_entry(n, struct sp_node, nd); 2497 } 2498 2499 /* 2500 * Insert a new shared policy into the list. Caller holds sp->lock for 2501 * writing. 2502 */ 2503 static void sp_insert(struct shared_policy *sp, struct sp_node *new) 2504 { 2505 struct rb_node **p = &sp->root.rb_node; 2506 struct rb_node *parent = NULL; 2507 struct sp_node *nd; 2508 2509 while (*p) { 2510 parent = *p; 2511 nd = rb_entry(parent, struct sp_node, nd); 2512 if (new->start < nd->start) 2513 p = &(*p)->rb_left; 2514 else if (new->end > nd->end) 2515 p = &(*p)->rb_right; 2516 else 2517 BUG(); 2518 } 2519 rb_link_node(&new->nd, parent, p); 2520 rb_insert_color(&new->nd, &sp->root); 2521 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 2522 new->policy ? new->policy->mode : 0); 2523 } 2524 2525 /* Find shared policy intersecting idx */ 2526 struct mempolicy * 2527 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 2528 { 2529 struct mempolicy *pol = NULL; 2530 struct sp_node *sn; 2531 2532 if (!sp->root.rb_node) 2533 return NULL; 2534 read_lock(&sp->lock); 2535 sn = sp_lookup(sp, idx, idx+1); 2536 if (sn) { 2537 mpol_get(sn->policy); 2538 pol = sn->policy; 2539 } 2540 read_unlock(&sp->lock); 2541 return pol; 2542 } 2543 2544 static void sp_free(struct sp_node *n) 2545 { 2546 mpol_put(n->policy); 2547 kmem_cache_free(sn_cache, n); 2548 } 2549 2550 /** 2551 * mpol_misplaced - check whether current page node is valid in policy 2552 * 2553 * @page: page to be checked 2554 * @vma: vm area where page mapped 2555 * @addr: virtual address where page mapped 2556 * 2557 * Lookup current policy node id for vma,addr and "compare to" page's 2558 * node id. Policy determination "mimics" alloc_page_vma(). 2559 * Called from fault path where we know the vma and faulting address. 2560 * 2561 * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2562 * policy, or a suitable node ID to allocate a replacement page from. 2563 */ 2564 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2565 { 2566 struct mempolicy *pol; 2567 struct zoneref *z; 2568 int curnid = page_to_nid(page); 2569 unsigned long pgoff; 2570 int thiscpu = raw_smp_processor_id(); 2571 int thisnid = cpu_to_node(thiscpu); 2572 int polnid = NUMA_NO_NODE; 2573 int ret = NUMA_NO_NODE; 2574 2575 pol = get_vma_policy(vma, addr); 2576 if (!(pol->flags & MPOL_F_MOF)) 2577 goto out; 2578 2579 switch (pol->mode) { 2580 case MPOL_INTERLEAVE: 2581 pgoff = vma->vm_pgoff; 2582 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2583 polnid = offset_il_node(pol, pgoff); 2584 break; 2585 2586 case MPOL_PREFERRED: 2587 if (node_isset(curnid, pol->nodes)) 2588 goto out; 2589 polnid = first_node(pol->nodes); 2590 break; 2591 2592 case MPOL_LOCAL: 2593 polnid = numa_node_id(); 2594 break; 2595 2596 case MPOL_BIND: 2597 /* Optimize placement among multiple nodes via NUMA balancing */ 2598 if (pol->flags & MPOL_F_MORON) { 2599 if (node_isset(thisnid, pol->nodes)) 2600 break; 2601 goto out; 2602 } 2603 fallthrough; 2604 2605 case MPOL_PREFERRED_MANY: 2606 /* 2607 * use current page if in policy nodemask, 2608 * else select nearest allowed node, if any. 2609 * If no allowed nodes, use current [!misplaced]. 2610 */ 2611 if (node_isset(curnid, pol->nodes)) 2612 goto out; 2613 z = first_zones_zonelist( 2614 node_zonelist(numa_node_id(), GFP_HIGHUSER), 2615 gfp_zone(GFP_HIGHUSER), 2616 &pol->nodes); 2617 polnid = zone_to_nid(z->zone); 2618 break; 2619 2620 default: 2621 BUG(); 2622 } 2623 2624 /* Migrate the page towards the node whose CPU is referencing it */ 2625 if (pol->flags & MPOL_F_MORON) { 2626 polnid = thisnid; 2627 2628 if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2629 goto out; 2630 } 2631 2632 if (curnid != polnid) 2633 ret = polnid; 2634 out: 2635 mpol_cond_put(pol); 2636 2637 return ret; 2638 } 2639 2640 /* 2641 * Drop the (possibly final) reference to task->mempolicy. It needs to be 2642 * dropped after task->mempolicy is set to NULL so that any allocation done as 2643 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2644 * policy. 2645 */ 2646 void mpol_put_task_policy(struct task_struct *task) 2647 { 2648 struct mempolicy *pol; 2649 2650 task_lock(task); 2651 pol = task->mempolicy; 2652 task->mempolicy = NULL; 2653 task_unlock(task); 2654 mpol_put(pol); 2655 } 2656 2657 static void sp_delete(struct shared_policy *sp, struct sp_node *n) 2658 { 2659 pr_debug("deleting %lx-l%lx\n", n->start, n->end); 2660 rb_erase(&n->nd, &sp->root); 2661 sp_free(n); 2662 } 2663 2664 static void sp_node_init(struct sp_node *node, unsigned long start, 2665 unsigned long end, struct mempolicy *pol) 2666 { 2667 node->start = start; 2668 node->end = end; 2669 node->policy = pol; 2670 } 2671 2672 static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2673 struct mempolicy *pol) 2674 { 2675 struct sp_node *n; 2676 struct mempolicy *newpol; 2677 2678 n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2679 if (!n) 2680 return NULL; 2681 2682 newpol = mpol_dup(pol); 2683 if (IS_ERR(newpol)) { 2684 kmem_cache_free(sn_cache, n); 2685 return NULL; 2686 } 2687 newpol->flags |= MPOL_F_SHARED; 2688 sp_node_init(n, start, end, newpol); 2689 2690 return n; 2691 } 2692 2693 /* Replace a policy range. */ 2694 static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 2695 unsigned long end, struct sp_node *new) 2696 { 2697 struct sp_node *n; 2698 struct sp_node *n_new = NULL; 2699 struct mempolicy *mpol_new = NULL; 2700 int ret = 0; 2701 2702 restart: 2703 write_lock(&sp->lock); 2704 n = sp_lookup(sp, start, end); 2705 /* Take care of old policies in the same range. */ 2706 while (n && n->start < end) { 2707 struct rb_node *next = rb_next(&n->nd); 2708 if (n->start >= start) { 2709 if (n->end <= end) 2710 sp_delete(sp, n); 2711 else 2712 n->start = end; 2713 } else { 2714 /* Old policy spanning whole new range. */ 2715 if (n->end > end) { 2716 if (!n_new) 2717 goto alloc_new; 2718 2719 *mpol_new = *n->policy; 2720 atomic_set(&mpol_new->refcnt, 1); 2721 sp_node_init(n_new, end, n->end, mpol_new); 2722 n->end = start; 2723 sp_insert(sp, n_new); 2724 n_new = NULL; 2725 mpol_new = NULL; 2726 break; 2727 } else 2728 n->end = start; 2729 } 2730 if (!next) 2731 break; 2732 n = rb_entry(next, struct sp_node, nd); 2733 } 2734 if (new) 2735 sp_insert(sp, new); 2736 write_unlock(&sp->lock); 2737 ret = 0; 2738 2739 err_out: 2740 if (mpol_new) 2741 mpol_put(mpol_new); 2742 if (n_new) 2743 kmem_cache_free(sn_cache, n_new); 2744 2745 return ret; 2746 2747 alloc_new: 2748 write_unlock(&sp->lock); 2749 ret = -ENOMEM; 2750 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2751 if (!n_new) 2752 goto err_out; 2753 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2754 if (!mpol_new) 2755 goto err_out; 2756 atomic_set(&mpol_new->refcnt, 1); 2757 goto restart; 2758 } 2759 2760 /** 2761 * mpol_shared_policy_init - initialize shared policy for inode 2762 * @sp: pointer to inode shared policy 2763 * @mpol: struct mempolicy to install 2764 * 2765 * Install non-NULL @mpol in inode's shared policy rb-tree. 2766 * On entry, the current task has a reference on a non-NULL @mpol. 2767 * This must be released on exit. 2768 * This is called at get_inode() calls and we can use GFP_KERNEL. 2769 */ 2770 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 2771 { 2772 int ret; 2773 2774 sp->root = RB_ROOT; /* empty tree == default mempolicy */ 2775 rwlock_init(&sp->lock); 2776 2777 if (mpol) { 2778 struct vm_area_struct pvma; 2779 struct mempolicy *new; 2780 NODEMASK_SCRATCH(scratch); 2781 2782 if (!scratch) 2783 goto put_mpol; 2784 /* contextualize the tmpfs mount point mempolicy */ 2785 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2786 if (IS_ERR(new)) 2787 goto free_scratch; /* no valid nodemask intersection */ 2788 2789 task_lock(current); 2790 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 2791 task_unlock(current); 2792 if (ret) 2793 goto put_new; 2794 2795 /* Create pseudo-vma that contains just the policy */ 2796 vma_init(&pvma, NULL); 2797 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2798 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2799 2800 put_new: 2801 mpol_put(new); /* drop initial ref */ 2802 free_scratch: 2803 NODEMASK_SCRATCH_FREE(scratch); 2804 put_mpol: 2805 mpol_put(mpol); /* drop our incoming ref on sb mpol */ 2806 } 2807 } 2808 2809 int mpol_set_shared_policy(struct shared_policy *info, 2810 struct vm_area_struct *vma, struct mempolicy *npol) 2811 { 2812 int err; 2813 struct sp_node *new = NULL; 2814 unsigned long sz = vma_pages(vma); 2815 2816 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 2817 vma->vm_pgoff, 2818 sz, npol ? npol->mode : -1, 2819 npol ? npol->flags : -1, 2820 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 2821 2822 if (npol) { 2823 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 2824 if (!new) 2825 return -ENOMEM; 2826 } 2827 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 2828 if (err && new) 2829 sp_free(new); 2830 return err; 2831 } 2832 2833 /* Free a backing policy store on inode delete. */ 2834 void mpol_free_shared_policy(struct shared_policy *p) 2835 { 2836 struct sp_node *n; 2837 struct rb_node *next; 2838 2839 if (!p->root.rb_node) 2840 return; 2841 write_lock(&p->lock); 2842 next = rb_first(&p->root); 2843 while (next) { 2844 n = rb_entry(next, struct sp_node, nd); 2845 next = rb_next(&n->nd); 2846 sp_delete(p, n); 2847 } 2848 write_unlock(&p->lock); 2849 } 2850 2851 #ifdef CONFIG_NUMA_BALANCING 2852 static int __initdata numabalancing_override; 2853 2854 static void __init check_numabalancing_enable(void) 2855 { 2856 bool numabalancing_default = false; 2857 2858 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 2859 numabalancing_default = true; 2860 2861 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2862 if (numabalancing_override) 2863 set_numabalancing_state(numabalancing_override == 1); 2864 2865 if (num_online_nodes() > 1 && !numabalancing_override) { 2866 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2867 numabalancing_default ? "Enabling" : "Disabling"); 2868 set_numabalancing_state(numabalancing_default); 2869 } 2870 } 2871 2872 static int __init setup_numabalancing(char *str) 2873 { 2874 int ret = 0; 2875 if (!str) 2876 goto out; 2877 2878 if (!strcmp(str, "enable")) { 2879 numabalancing_override = 1; 2880 ret = 1; 2881 } else if (!strcmp(str, "disable")) { 2882 numabalancing_override = -1; 2883 ret = 1; 2884 } 2885 out: 2886 if (!ret) 2887 pr_warn("Unable to parse numa_balancing=\n"); 2888 2889 return ret; 2890 } 2891 __setup("numa_balancing=", setup_numabalancing); 2892 #else 2893 static inline void __init check_numabalancing_enable(void) 2894 { 2895 } 2896 #endif /* CONFIG_NUMA_BALANCING */ 2897 2898 /* assumes fs == KERNEL_DS */ 2899 void __init numa_policy_init(void) 2900 { 2901 nodemask_t interleave_nodes; 2902 unsigned long largest = 0; 2903 int nid, prefer = 0; 2904 2905 policy_cache = kmem_cache_create("numa_policy", 2906 sizeof(struct mempolicy), 2907 0, SLAB_PANIC, NULL); 2908 2909 sn_cache = kmem_cache_create("shared_policy_node", 2910 sizeof(struct sp_node), 2911 0, SLAB_PANIC, NULL); 2912 2913 for_each_node(nid) { 2914 preferred_node_policy[nid] = (struct mempolicy) { 2915 .refcnt = ATOMIC_INIT(1), 2916 .mode = MPOL_PREFERRED, 2917 .flags = MPOL_F_MOF | MPOL_F_MORON, 2918 .nodes = nodemask_of_node(nid), 2919 }; 2920 } 2921 2922 /* 2923 * Set interleaving policy for system init. Interleaving is only 2924 * enabled across suitably sized nodes (default is >= 16MB), or 2925 * fall back to the largest node if they're all smaller. 2926 */ 2927 nodes_clear(interleave_nodes); 2928 for_each_node_state(nid, N_MEMORY) { 2929 unsigned long total_pages = node_present_pages(nid); 2930 2931 /* Preserve the largest node */ 2932 if (largest < total_pages) { 2933 largest = total_pages; 2934 prefer = nid; 2935 } 2936 2937 /* Interleave this node? */ 2938 if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2939 node_set(nid, interleave_nodes); 2940 } 2941 2942 /* All too small, use the largest */ 2943 if (unlikely(nodes_empty(interleave_nodes))) 2944 node_set(prefer, interleave_nodes); 2945 2946 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2947 pr_err("%s: interleaving failed\n", __func__); 2948 2949 check_numabalancing_enable(); 2950 } 2951 2952 /* Reset policy of current process to default */ 2953 void numa_default_policy(void) 2954 { 2955 do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 2956 } 2957 2958 /* 2959 * Parse and format mempolicy from/to strings 2960 */ 2961 2962 static const char * const policy_modes[] = 2963 { 2964 [MPOL_DEFAULT] = "default", 2965 [MPOL_PREFERRED] = "prefer", 2966 [MPOL_BIND] = "bind", 2967 [MPOL_INTERLEAVE] = "interleave", 2968 [MPOL_LOCAL] = "local", 2969 [MPOL_PREFERRED_MANY] = "prefer (many)", 2970 }; 2971 2972 2973 #ifdef CONFIG_TMPFS 2974 /** 2975 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2976 * @str: string containing mempolicy to parse 2977 * @mpol: pointer to struct mempolicy pointer, returned on success. 2978 * 2979 * Format of input: 2980 * <mode>[=<flags>][:<nodelist>] 2981 * 2982 * Return: %0 on success, else %1 2983 */ 2984 int mpol_parse_str(char *str, struct mempolicy **mpol) 2985 { 2986 struct mempolicy *new = NULL; 2987 unsigned short mode_flags; 2988 nodemask_t nodes; 2989 char *nodelist = strchr(str, ':'); 2990 char *flags = strchr(str, '='); 2991 int err = 1, mode; 2992 2993 if (flags) 2994 *flags++ = '\0'; /* terminate mode string */ 2995 2996 if (nodelist) { 2997 /* NUL-terminate mode or flags string */ 2998 *nodelist++ = '\0'; 2999 if (nodelist_parse(nodelist, nodes)) 3000 goto out; 3001 if (!nodes_subset(nodes, node_states[N_MEMORY])) 3002 goto out; 3003 } else 3004 nodes_clear(nodes); 3005 3006 mode = match_string(policy_modes, MPOL_MAX, str); 3007 if (mode < 0) 3008 goto out; 3009 3010 switch (mode) { 3011 case MPOL_PREFERRED: 3012 /* 3013 * Insist on a nodelist of one node only, although later 3014 * we use first_node(nodes) to grab a single node, so here 3015 * nodelist (or nodes) cannot be empty. 3016 */ 3017 if (nodelist) { 3018 char *rest = nodelist; 3019 while (isdigit(*rest)) 3020 rest++; 3021 if (*rest) 3022 goto out; 3023 if (nodes_empty(nodes)) 3024 goto out; 3025 } 3026 break; 3027 case MPOL_INTERLEAVE: 3028 /* 3029 * Default to online nodes with memory if no nodelist 3030 */ 3031 if (!nodelist) 3032 nodes = node_states[N_MEMORY]; 3033 break; 3034 case MPOL_LOCAL: 3035 /* 3036 * Don't allow a nodelist; mpol_new() checks flags 3037 */ 3038 if (nodelist) 3039 goto out; 3040 break; 3041 case MPOL_DEFAULT: 3042 /* 3043 * Insist on a empty nodelist 3044 */ 3045 if (!nodelist) 3046 err = 0; 3047 goto out; 3048 case MPOL_PREFERRED_MANY: 3049 case MPOL_BIND: 3050 /* 3051 * Insist on a nodelist 3052 */ 3053 if (!nodelist) 3054 goto out; 3055 } 3056 3057 mode_flags = 0; 3058 if (flags) { 3059 /* 3060 * Currently, we only support two mutually exclusive 3061 * mode flags. 3062 */ 3063 if (!strcmp(flags, "static")) 3064 mode_flags |= MPOL_F_STATIC_NODES; 3065 else if (!strcmp(flags, "relative")) 3066 mode_flags |= MPOL_F_RELATIVE_NODES; 3067 else 3068 goto out; 3069 } 3070 3071 new = mpol_new(mode, mode_flags, &nodes); 3072 if (IS_ERR(new)) 3073 goto out; 3074 3075 /* 3076 * Save nodes for mpol_to_str() to show the tmpfs mount options 3077 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3078 */ 3079 if (mode != MPOL_PREFERRED) { 3080 new->nodes = nodes; 3081 } else if (nodelist) { 3082 nodes_clear(new->nodes); 3083 node_set(first_node(nodes), new->nodes); 3084 } else { 3085 new->mode = MPOL_LOCAL; 3086 } 3087 3088 /* 3089 * Save nodes for contextualization: this will be used to "clone" 3090 * the mempolicy in a specific context [cpuset] at a later time. 3091 */ 3092 new->w.user_nodemask = nodes; 3093 3094 err = 0; 3095 3096 out: 3097 /* Restore string for error message */ 3098 if (nodelist) 3099 *--nodelist = ':'; 3100 if (flags) 3101 *--flags = '='; 3102 if (!err) 3103 *mpol = new; 3104 return err; 3105 } 3106 #endif /* CONFIG_TMPFS */ 3107 3108 /** 3109 * mpol_to_str - format a mempolicy structure for printing 3110 * @buffer: to contain formatted mempolicy string 3111 * @maxlen: length of @buffer 3112 * @pol: pointer to mempolicy to be formatted 3113 * 3114 * Convert @pol into a string. If @buffer is too short, truncate the string. 3115 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3116 * longest flag, "relative", and to display at least a few node ids. 3117 */ 3118 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 3119 { 3120 char *p = buffer; 3121 nodemask_t nodes = NODE_MASK_NONE; 3122 unsigned short mode = MPOL_DEFAULT; 3123 unsigned short flags = 0; 3124 3125 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3126 mode = pol->mode; 3127 flags = pol->flags; 3128 } 3129 3130 switch (mode) { 3131 case MPOL_DEFAULT: 3132 case MPOL_LOCAL: 3133 break; 3134 case MPOL_PREFERRED: 3135 case MPOL_PREFERRED_MANY: 3136 case MPOL_BIND: 3137 case MPOL_INTERLEAVE: 3138 nodes = pol->nodes; 3139 break; 3140 default: 3141 WARN_ON_ONCE(1); 3142 snprintf(p, maxlen, "unknown"); 3143 return; 3144 } 3145 3146 p += snprintf(p, maxlen, "%s", policy_modes[mode]); 3147 3148 if (flags & MPOL_MODE_FLAGS) { 3149 p += snprintf(p, buffer + maxlen - p, "="); 3150 3151 /* 3152 * Currently, the only defined flags are mutually exclusive 3153 */ 3154 if (flags & MPOL_F_STATIC_NODES) 3155 p += snprintf(p, buffer + maxlen - p, "static"); 3156 else if (flags & MPOL_F_RELATIVE_NODES) 3157 p += snprintf(p, buffer + maxlen - p, "relative"); 3158 } 3159 3160 if (!nodes_empty(nodes)) 3161 p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 3162 nodemask_pr_args(&nodes)); 3163 } 3164