1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Simple NUMA memory policy for the Linux kernel. 4 * 5 * Copyright 2003,2004 Andi Kleen, SuSE Labs. 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 7 * 8 * NUMA policy allows the user to give hints in which node(s) memory should 9 * be allocated. 10 * 11 * Support four policies per VMA and per process: 12 * 13 * The VMA policy has priority over the process policy for a page fault. 14 * 15 * interleave Allocate memory interleaved over a set of nodes, 16 * with normal fallback if it fails. 17 * For VMA based allocations this interleaves based on the 18 * offset into the backing object or offset into the mapping 19 * for anonymous memory. For process policy an process counter 20 * is used. 21 * 22 * bind Only allocate memory on a specific set of nodes, 23 * no fallback. 24 * FIXME: memory is allocated starting with the first node 25 * to the last. It would be better if bind would truly restrict 26 * the allocation to memory nodes instead 27 * 28 * preferred Try a specific node first before normal fallback. 29 * As a special case NUMA_NO_NODE here means do the allocation 30 * on the local CPU. This is normally identical to default, 31 * but useful to set in a VMA when you have a non default 32 * process policy. 33 * 34 * preferred many Try a set of nodes first before normal fallback. This is 35 * similar to preferred without the special case. 36 * 37 * default Allocate on the local node first, or when on a VMA 38 * use the process policy. This is what Linux always did 39 * in a NUMA aware kernel and still does by, ahem, default. 40 * 41 * The process policy is applied for most non interrupt memory allocations 42 * in that process' context. Interrupts ignore the policies and always 43 * try to allocate on the local CPU. The VMA policy is only applied for memory 44 * allocations for a VMA in the VM. 45 * 46 * Currently there are a few corner cases in swapping where the policy 47 * is not applied, but the majority should be handled. When process policy 48 * is used it is not remembered over swap outs/swap ins. 49 * 50 * Only the highest zone in the zone hierarchy gets policied. Allocations 51 * requesting a lower zone just use default policy. This implies that 52 * on systems with highmem kernel lowmem allocation don't get policied. 53 * Same with GFP_DMA allocations. 54 * 55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 56 * all users and remembered even when nobody has memory mapped. 57 */ 58 59 /* Notebook: 60 fix mmap readahead to honour policy and enable policy for any page cache 61 object 62 statistics for bigpages 63 global policy for page cache? currently it uses process policy. Requires 64 first item above. 65 handle mremap for shared memory (currently ignored for the policy) 66 grows down? 67 make bind policy root only? It can trigger oom much faster and the 68 kernel is not always grateful with that. 69 */ 70 71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72 73 #include <linux/mempolicy.h> 74 #include <linux/pagewalk.h> 75 #include <linux/highmem.h> 76 #include <linux/hugetlb.h> 77 #include <linux/kernel.h> 78 #include <linux/sched.h> 79 #include <linux/sched/mm.h> 80 #include <linux/sched/numa_balancing.h> 81 #include <linux/sched/task.h> 82 #include <linux/nodemask.h> 83 #include <linux/cpuset.h> 84 #include <linux/slab.h> 85 #include <linux/string.h> 86 #include <linux/export.h> 87 #include <linux/nsproxy.h> 88 #include <linux/interrupt.h> 89 #include <linux/init.h> 90 #include <linux/compat.h> 91 #include <linux/ptrace.h> 92 #include <linux/swap.h> 93 #include <linux/seq_file.h> 94 #include <linux/proc_fs.h> 95 #include <linux/migrate.h> 96 #include <linux/ksm.h> 97 #include <linux/rmap.h> 98 #include <linux/security.h> 99 #include <linux/syscalls.h> 100 #include <linux/ctype.h> 101 #include <linux/mm_inline.h> 102 #include <linux/mmu_notifier.h> 103 #include <linux/printk.h> 104 #include <linux/swapops.h> 105 106 #include <asm/tlbflush.h> 107 #include <asm/tlb.h> 108 #include <linux/uaccess.h> 109 110 #include "internal.h" 111 112 /* Internal flags */ 113 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 114 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 115 116 static struct kmem_cache *policy_cache; 117 static struct kmem_cache *sn_cache; 118 119 /* Highest zone. An specific allocation for a zone below that is not 120 policied. */ 121 enum zone_type policy_zone = 0; 122 123 /* 124 * run-time system-wide default policy => local allocation 125 */ 126 static struct mempolicy default_policy = { 127 .refcnt = ATOMIC_INIT(1), /* never free it */ 128 .mode = MPOL_LOCAL, 129 }; 130 131 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 132 133 /** 134 * numa_map_to_online_node - Find closest online node 135 * @node: Node id to start the search 136 * 137 * Lookup the next closest node by distance if @nid is not online. 138 * 139 * Return: this @node if it is online, otherwise the closest node by distance 140 */ 141 int numa_map_to_online_node(int node) 142 { 143 int min_dist = INT_MAX, dist, n, min_node; 144 145 if (node == NUMA_NO_NODE || node_online(node)) 146 return node; 147 148 min_node = node; 149 for_each_online_node(n) { 150 dist = node_distance(node, n); 151 if (dist < min_dist) { 152 min_dist = dist; 153 min_node = n; 154 } 155 } 156 157 return min_node; 158 } 159 EXPORT_SYMBOL_GPL(numa_map_to_online_node); 160 161 struct mempolicy *get_task_policy(struct task_struct *p) 162 { 163 struct mempolicy *pol = p->mempolicy; 164 int node; 165 166 if (pol) 167 return pol; 168 169 node = numa_node_id(); 170 if (node != NUMA_NO_NODE) { 171 pol = &preferred_node_policy[node]; 172 /* preferred_node_policy is not initialised early in boot */ 173 if (pol->mode) 174 return pol; 175 } 176 177 return &default_policy; 178 } 179 180 static const struct mempolicy_operations { 181 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 182 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 183 } mpol_ops[MPOL_MAX]; 184 185 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 186 { 187 return pol->flags & MPOL_MODE_FLAGS; 188 } 189 190 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 191 const nodemask_t *rel) 192 { 193 nodemask_t tmp; 194 nodes_fold(tmp, *orig, nodes_weight(*rel)); 195 nodes_onto(*ret, tmp, *rel); 196 } 197 198 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 199 { 200 if (nodes_empty(*nodes)) 201 return -EINVAL; 202 pol->nodes = *nodes; 203 return 0; 204 } 205 206 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 207 { 208 if (nodes_empty(*nodes)) 209 return -EINVAL; 210 211 nodes_clear(pol->nodes); 212 node_set(first_node(*nodes), pol->nodes); 213 return 0; 214 } 215 216 /* 217 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 218 * any, for the new policy. mpol_new() has already validated the nodes 219 * parameter with respect to the policy mode and flags. 220 * 221 * Must be called holding task's alloc_lock to protect task's mems_allowed 222 * and mempolicy. May also be called holding the mmap_lock for write. 223 */ 224 static int mpol_set_nodemask(struct mempolicy *pol, 225 const nodemask_t *nodes, struct nodemask_scratch *nsc) 226 { 227 int ret; 228 229 /* 230 * Default (pol==NULL) resp. local memory policies are not a 231 * subject of any remapping. They also do not need any special 232 * constructor. 233 */ 234 if (!pol || pol->mode == MPOL_LOCAL) 235 return 0; 236 237 /* Check N_MEMORY */ 238 nodes_and(nsc->mask1, 239 cpuset_current_mems_allowed, node_states[N_MEMORY]); 240 241 VM_BUG_ON(!nodes); 242 243 if (pol->flags & MPOL_F_RELATIVE_NODES) 244 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 245 else 246 nodes_and(nsc->mask2, *nodes, nsc->mask1); 247 248 if (mpol_store_user_nodemask(pol)) 249 pol->w.user_nodemask = *nodes; 250 else 251 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 252 253 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 254 return ret; 255 } 256 257 /* 258 * This function just creates a new policy, does some check and simple 259 * initialization. You must invoke mpol_set_nodemask() to set nodes. 260 */ 261 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 262 nodemask_t *nodes) 263 { 264 struct mempolicy *policy; 265 266 pr_debug("setting mode %d flags %d nodes[0] %lx\n", 267 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 268 269 if (mode == MPOL_DEFAULT) { 270 if (nodes && !nodes_empty(*nodes)) 271 return ERR_PTR(-EINVAL); 272 return NULL; 273 } 274 VM_BUG_ON(!nodes); 275 276 /* 277 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 278 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 279 * All other modes require a valid pointer to a non-empty nodemask. 280 */ 281 if (mode == MPOL_PREFERRED) { 282 if (nodes_empty(*nodes)) { 283 if (((flags & MPOL_F_STATIC_NODES) || 284 (flags & MPOL_F_RELATIVE_NODES))) 285 return ERR_PTR(-EINVAL); 286 287 mode = MPOL_LOCAL; 288 } 289 } else if (mode == MPOL_LOCAL) { 290 if (!nodes_empty(*nodes) || 291 (flags & MPOL_F_STATIC_NODES) || 292 (flags & MPOL_F_RELATIVE_NODES)) 293 return ERR_PTR(-EINVAL); 294 } else if (nodes_empty(*nodes)) 295 return ERR_PTR(-EINVAL); 296 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 297 if (!policy) 298 return ERR_PTR(-ENOMEM); 299 atomic_set(&policy->refcnt, 1); 300 policy->mode = mode; 301 policy->flags = flags; 302 policy->home_node = NUMA_NO_NODE; 303 304 return policy; 305 } 306 307 /* Slow path of a mpol destructor. */ 308 void __mpol_put(struct mempolicy *p) 309 { 310 if (!atomic_dec_and_test(&p->refcnt)) 311 return; 312 kmem_cache_free(policy_cache, p); 313 } 314 315 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 316 { 317 } 318 319 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 320 { 321 nodemask_t tmp; 322 323 if (pol->flags & MPOL_F_STATIC_NODES) 324 nodes_and(tmp, pol->w.user_nodemask, *nodes); 325 else if (pol->flags & MPOL_F_RELATIVE_NODES) 326 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 327 else { 328 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 329 *nodes); 330 pol->w.cpuset_mems_allowed = *nodes; 331 } 332 333 if (nodes_empty(tmp)) 334 tmp = *nodes; 335 336 pol->nodes = tmp; 337 } 338 339 static void mpol_rebind_preferred(struct mempolicy *pol, 340 const nodemask_t *nodes) 341 { 342 pol->w.cpuset_mems_allowed = *nodes; 343 } 344 345 /* 346 * mpol_rebind_policy - Migrate a policy to a different set of nodes 347 * 348 * Per-vma policies are protected by mmap_lock. Allocations using per-task 349 * policies are protected by task->mems_allowed_seq to prevent a premature 350 * OOM/allocation failure due to parallel nodemask modification. 351 */ 352 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 353 { 354 if (!pol || pol->mode == MPOL_LOCAL) 355 return; 356 if (!mpol_store_user_nodemask(pol) && 357 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 358 return; 359 360 mpol_ops[pol->mode].rebind(pol, newmask); 361 } 362 363 /* 364 * Wrapper for mpol_rebind_policy() that just requires task 365 * pointer, and updates task mempolicy. 366 * 367 * Called with task's alloc_lock held. 368 */ 369 370 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 371 { 372 mpol_rebind_policy(tsk->mempolicy, new); 373 } 374 375 /* 376 * Rebind each vma in mm to new nodemask. 377 * 378 * Call holding a reference to mm. Takes mm->mmap_lock during call. 379 */ 380 381 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 382 { 383 struct vm_area_struct *vma; 384 VMA_ITERATOR(vmi, mm, 0); 385 386 mmap_write_lock(mm); 387 for_each_vma(vmi, vma) { 388 vma_start_write(vma); 389 mpol_rebind_policy(vma->vm_policy, new); 390 } 391 mmap_write_unlock(mm); 392 } 393 394 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 395 [MPOL_DEFAULT] = { 396 .rebind = mpol_rebind_default, 397 }, 398 [MPOL_INTERLEAVE] = { 399 .create = mpol_new_nodemask, 400 .rebind = mpol_rebind_nodemask, 401 }, 402 [MPOL_PREFERRED] = { 403 .create = mpol_new_preferred, 404 .rebind = mpol_rebind_preferred, 405 }, 406 [MPOL_BIND] = { 407 .create = mpol_new_nodemask, 408 .rebind = mpol_rebind_nodemask, 409 }, 410 [MPOL_LOCAL] = { 411 .rebind = mpol_rebind_default, 412 }, 413 [MPOL_PREFERRED_MANY] = { 414 .create = mpol_new_nodemask, 415 .rebind = mpol_rebind_preferred, 416 }, 417 }; 418 419 static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 420 unsigned long flags); 421 422 struct queue_pages { 423 struct list_head *pagelist; 424 unsigned long flags; 425 nodemask_t *nmask; 426 unsigned long start; 427 unsigned long end; 428 struct vm_area_struct *first; 429 }; 430 431 /* 432 * Check if the folio's nid is in qp->nmask. 433 * 434 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 435 * in the invert of qp->nmask. 436 */ 437 static inline bool queue_folio_required(struct folio *folio, 438 struct queue_pages *qp) 439 { 440 int nid = folio_nid(folio); 441 unsigned long flags = qp->flags; 442 443 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 444 } 445 446 /* 447 * queue_folios_pmd() has three possible return values: 448 * 0 - folios are placed on the right node or queued successfully, or 449 * special page is met, i.e. huge zero page. 450 * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 451 * specified. 452 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 453 * existing folio was already on a node that does not follow the 454 * policy. 455 */ 456 static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 457 unsigned long end, struct mm_walk *walk) 458 __releases(ptl) 459 { 460 int ret = 0; 461 struct folio *folio; 462 struct queue_pages *qp = walk->private; 463 unsigned long flags; 464 465 if (unlikely(is_pmd_migration_entry(*pmd))) { 466 ret = -EIO; 467 goto unlock; 468 } 469 folio = pfn_folio(pmd_pfn(*pmd)); 470 if (is_huge_zero_page(&folio->page)) { 471 walk->action = ACTION_CONTINUE; 472 goto unlock; 473 } 474 if (!queue_folio_required(folio, qp)) 475 goto unlock; 476 477 flags = qp->flags; 478 /* go to folio migration */ 479 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 480 if (!vma_migratable(walk->vma) || 481 migrate_folio_add(folio, qp->pagelist, flags)) { 482 ret = 1; 483 goto unlock; 484 } 485 } else 486 ret = -EIO; 487 unlock: 488 spin_unlock(ptl); 489 return ret; 490 } 491 492 /* 493 * Scan through pages checking if pages follow certain conditions, 494 * and move them to the pagelist if they do. 495 * 496 * queue_folios_pte_range() has three possible return values: 497 * 0 - folios are placed on the right node or queued successfully, or 498 * special page is met, i.e. zero page. 499 * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 500 * specified. 501 * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already 502 * on a node that does not follow the policy. 503 */ 504 static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, 505 unsigned long end, struct mm_walk *walk) 506 { 507 struct vm_area_struct *vma = walk->vma; 508 struct folio *folio; 509 struct queue_pages *qp = walk->private; 510 unsigned long flags = qp->flags; 511 bool has_unmovable = false; 512 pte_t *pte, *mapped_pte; 513 pte_t ptent; 514 spinlock_t *ptl; 515 516 ptl = pmd_trans_huge_lock(pmd, vma); 517 if (ptl) 518 return queue_folios_pmd(pmd, ptl, addr, end, walk); 519 520 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 521 if (!pte) { 522 walk->action = ACTION_AGAIN; 523 return 0; 524 } 525 for (; addr != end; pte++, addr += PAGE_SIZE) { 526 ptent = ptep_get(pte); 527 if (!pte_present(ptent)) 528 continue; 529 folio = vm_normal_folio(vma, addr, ptent); 530 if (!folio || folio_is_zone_device(folio)) 531 continue; 532 /* 533 * vm_normal_folio() filters out zero pages, but there might 534 * still be reserved folios to skip, perhaps in a VDSO. 535 */ 536 if (folio_test_reserved(folio)) 537 continue; 538 if (!queue_folio_required(folio, qp)) 539 continue; 540 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 541 /* MPOL_MF_STRICT must be specified if we get here */ 542 if (!vma_migratable(vma)) { 543 has_unmovable = true; 544 break; 545 } 546 547 /* 548 * Do not abort immediately since there may be 549 * temporary off LRU pages in the range. Still 550 * need migrate other LRU pages. 551 */ 552 if (migrate_folio_add(folio, qp->pagelist, flags)) 553 has_unmovable = true; 554 } else 555 break; 556 } 557 pte_unmap_unlock(mapped_pte, ptl); 558 cond_resched(); 559 560 if (has_unmovable) 561 return 1; 562 563 return addr != end ? -EIO : 0; 564 } 565 566 static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, 567 unsigned long addr, unsigned long end, 568 struct mm_walk *walk) 569 { 570 int ret = 0; 571 #ifdef CONFIG_HUGETLB_PAGE 572 struct queue_pages *qp = walk->private; 573 unsigned long flags = (qp->flags & MPOL_MF_VALID); 574 struct folio *folio; 575 spinlock_t *ptl; 576 pte_t entry; 577 578 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 579 entry = huge_ptep_get(pte); 580 if (!pte_present(entry)) 581 goto unlock; 582 folio = pfn_folio(pte_pfn(entry)); 583 if (!queue_folio_required(folio, qp)) 584 goto unlock; 585 586 if (flags == MPOL_MF_STRICT) { 587 /* 588 * STRICT alone means only detecting misplaced folio and no 589 * need to further check other vma. 590 */ 591 ret = -EIO; 592 goto unlock; 593 } 594 595 if (!vma_migratable(walk->vma)) { 596 /* 597 * Must be STRICT with MOVE*, otherwise .test_walk() have 598 * stopped walking current vma. 599 * Detecting misplaced folio but allow migrating folios which 600 * have been queued. 601 */ 602 ret = 1; 603 goto unlock; 604 } 605 606 /* 607 * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it 608 * is shared it is likely not worth migrating. 609 * 610 * To check if the folio is shared, ideally we want to make sure 611 * every page is mapped to the same process. Doing that is very 612 * expensive, so check the estimated mapcount of the folio instead. 613 */ 614 if (flags & (MPOL_MF_MOVE_ALL) || 615 (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 && 616 !hugetlb_pmd_shared(pte))) { 617 if (!isolate_hugetlb(folio, qp->pagelist) && 618 (flags & MPOL_MF_STRICT)) 619 /* 620 * Failed to isolate folio but allow migrating pages 621 * which have been queued. 622 */ 623 ret = 1; 624 } 625 unlock: 626 spin_unlock(ptl); 627 #else 628 BUG(); 629 #endif 630 return ret; 631 } 632 633 #ifdef CONFIG_NUMA_BALANCING 634 /* 635 * This is used to mark a range of virtual addresses to be inaccessible. 636 * These are later cleared by a NUMA hinting fault. Depending on these 637 * faults, pages may be migrated for better NUMA placement. 638 * 639 * This is assuming that NUMA faults are handled using PROT_NONE. If 640 * an architecture makes a different choice, it will need further 641 * changes to the core. 642 */ 643 unsigned long change_prot_numa(struct vm_area_struct *vma, 644 unsigned long addr, unsigned long end) 645 { 646 struct mmu_gather tlb; 647 long nr_updated; 648 649 tlb_gather_mmu(&tlb, vma->vm_mm); 650 651 nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); 652 if (nr_updated > 0) 653 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 654 655 tlb_finish_mmu(&tlb); 656 657 return nr_updated; 658 } 659 #else 660 static unsigned long change_prot_numa(struct vm_area_struct *vma, 661 unsigned long addr, unsigned long end) 662 { 663 return 0; 664 } 665 #endif /* CONFIG_NUMA_BALANCING */ 666 667 static int queue_pages_test_walk(unsigned long start, unsigned long end, 668 struct mm_walk *walk) 669 { 670 struct vm_area_struct *next, *vma = walk->vma; 671 struct queue_pages *qp = walk->private; 672 unsigned long endvma = vma->vm_end; 673 unsigned long flags = qp->flags; 674 675 /* range check first */ 676 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 677 678 if (!qp->first) { 679 qp->first = vma; 680 if (!(flags & MPOL_MF_DISCONTIG_OK) && 681 (qp->start < vma->vm_start)) 682 /* hole at head side of range */ 683 return -EFAULT; 684 } 685 next = find_vma(vma->vm_mm, vma->vm_end); 686 if (!(flags & MPOL_MF_DISCONTIG_OK) && 687 ((vma->vm_end < qp->end) && 688 (!next || vma->vm_end < next->vm_start))) 689 /* hole at middle or tail of range */ 690 return -EFAULT; 691 692 /* 693 * Need check MPOL_MF_STRICT to return -EIO if possible 694 * regardless of vma_migratable 695 */ 696 if (!vma_migratable(vma) && 697 !(flags & MPOL_MF_STRICT)) 698 return 1; 699 700 if (endvma > end) 701 endvma = end; 702 703 if (flags & MPOL_MF_LAZY) { 704 /* Similar to task_numa_work, skip inaccessible VMAs */ 705 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 706 !(vma->vm_flags & VM_MIXEDMAP)) 707 change_prot_numa(vma, start, endvma); 708 return 1; 709 } 710 711 /* queue pages from current vma */ 712 if (flags & MPOL_MF_VALID) 713 return 0; 714 return 1; 715 } 716 717 static const struct mm_walk_ops queue_pages_walk_ops = { 718 .hugetlb_entry = queue_folios_hugetlb, 719 .pmd_entry = queue_folios_pte_range, 720 .test_walk = queue_pages_test_walk, 721 }; 722 723 /* 724 * Walk through page tables and collect pages to be migrated. 725 * 726 * If pages found in a given range are on a set of nodes (determined by 727 * @nodes and @flags,) it's isolated and queued to the pagelist which is 728 * passed via @private. 729 * 730 * queue_pages_range() has three possible return values: 731 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 732 * specified. 733 * 0 - queue pages successfully or no misplaced page. 734 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 735 * memory range specified by nodemask and maxnode points outside 736 * your accessible address space (-EFAULT) 737 */ 738 static int 739 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 740 nodemask_t *nodes, unsigned long flags, 741 struct list_head *pagelist) 742 { 743 int err; 744 struct queue_pages qp = { 745 .pagelist = pagelist, 746 .flags = flags, 747 .nmask = nodes, 748 .start = start, 749 .end = end, 750 .first = NULL, 751 }; 752 753 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 754 755 if (!qp.first) 756 /* whole range in hole */ 757 err = -EFAULT; 758 759 return err; 760 } 761 762 /* 763 * Apply policy to a single VMA 764 * This must be called with the mmap_lock held for writing. 765 */ 766 static int vma_replace_policy(struct vm_area_struct *vma, 767 struct mempolicy *pol) 768 { 769 int err; 770 struct mempolicy *old; 771 struct mempolicy *new; 772 773 vma_assert_write_locked(vma); 774 775 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 776 vma->vm_start, vma->vm_end, vma->vm_pgoff, 777 vma->vm_ops, vma->vm_file, 778 vma->vm_ops ? vma->vm_ops->set_policy : NULL); 779 780 new = mpol_dup(pol); 781 if (IS_ERR(new)) 782 return PTR_ERR(new); 783 784 if (vma->vm_ops && vma->vm_ops->set_policy) { 785 err = vma->vm_ops->set_policy(vma, new); 786 if (err) 787 goto err_out; 788 } 789 790 old = vma->vm_policy; 791 vma->vm_policy = new; /* protected by mmap_lock */ 792 mpol_put(old); 793 794 return 0; 795 err_out: 796 mpol_put(new); 797 return err; 798 } 799 800 /* Split or merge the VMA (if required) and apply the new policy */ 801 static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, 802 struct vm_area_struct **prev, unsigned long start, 803 unsigned long end, struct mempolicy *new_pol) 804 { 805 struct vm_area_struct *merged; 806 unsigned long vmstart, vmend; 807 pgoff_t pgoff; 808 int err; 809 810 vmend = min(end, vma->vm_end); 811 if (start > vma->vm_start) { 812 *prev = vma; 813 vmstart = start; 814 } else { 815 vmstart = vma->vm_start; 816 } 817 818 if (mpol_equal(vma_policy(vma), new_pol)) { 819 *prev = vma; 820 return 0; 821 } 822 823 pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); 824 merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags, 825 vma->anon_vma, vma->vm_file, pgoff, new_pol, 826 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 827 if (merged) { 828 *prev = merged; 829 return vma_replace_policy(merged, new_pol); 830 } 831 832 if (vma->vm_start != vmstart) { 833 err = split_vma(vmi, vma, vmstart, 1); 834 if (err) 835 return err; 836 } 837 838 if (vma->vm_end != vmend) { 839 err = split_vma(vmi, vma, vmend, 0); 840 if (err) 841 return err; 842 } 843 844 *prev = vma; 845 return vma_replace_policy(vma, new_pol); 846 } 847 848 /* Set the process memory policy */ 849 static long do_set_mempolicy(unsigned short mode, unsigned short flags, 850 nodemask_t *nodes) 851 { 852 struct mempolicy *new, *old; 853 NODEMASK_SCRATCH(scratch); 854 int ret; 855 856 if (!scratch) 857 return -ENOMEM; 858 859 new = mpol_new(mode, flags, nodes); 860 if (IS_ERR(new)) { 861 ret = PTR_ERR(new); 862 goto out; 863 } 864 865 task_lock(current); 866 ret = mpol_set_nodemask(new, nodes, scratch); 867 if (ret) { 868 task_unlock(current); 869 mpol_put(new); 870 goto out; 871 } 872 873 old = current->mempolicy; 874 current->mempolicy = new; 875 if (new && new->mode == MPOL_INTERLEAVE) 876 current->il_prev = MAX_NUMNODES-1; 877 task_unlock(current); 878 mpol_put(old); 879 ret = 0; 880 out: 881 NODEMASK_SCRATCH_FREE(scratch); 882 return ret; 883 } 884 885 /* 886 * Return nodemask for policy for get_mempolicy() query 887 * 888 * Called with task's alloc_lock held 889 */ 890 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 891 { 892 nodes_clear(*nodes); 893 if (p == &default_policy) 894 return; 895 896 switch (p->mode) { 897 case MPOL_BIND: 898 case MPOL_INTERLEAVE: 899 case MPOL_PREFERRED: 900 case MPOL_PREFERRED_MANY: 901 *nodes = p->nodes; 902 break; 903 case MPOL_LOCAL: 904 /* return empty node mask for local allocation */ 905 break; 906 default: 907 BUG(); 908 } 909 } 910 911 static int lookup_node(struct mm_struct *mm, unsigned long addr) 912 { 913 struct page *p = NULL; 914 int ret; 915 916 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); 917 if (ret > 0) { 918 ret = page_to_nid(p); 919 put_page(p); 920 } 921 return ret; 922 } 923 924 /* Retrieve NUMA policy */ 925 static long do_get_mempolicy(int *policy, nodemask_t *nmask, 926 unsigned long addr, unsigned long flags) 927 { 928 int err; 929 struct mm_struct *mm = current->mm; 930 struct vm_area_struct *vma = NULL; 931 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 932 933 if (flags & 934 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 935 return -EINVAL; 936 937 if (flags & MPOL_F_MEMS_ALLOWED) { 938 if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 939 return -EINVAL; 940 *policy = 0; /* just so it's initialized */ 941 task_lock(current); 942 *nmask = cpuset_current_mems_allowed; 943 task_unlock(current); 944 return 0; 945 } 946 947 if (flags & MPOL_F_ADDR) { 948 /* 949 * Do NOT fall back to task policy if the 950 * vma/shared policy at addr is NULL. We 951 * want to return MPOL_DEFAULT in this case. 952 */ 953 mmap_read_lock(mm); 954 vma = vma_lookup(mm, addr); 955 if (!vma) { 956 mmap_read_unlock(mm); 957 return -EFAULT; 958 } 959 if (vma->vm_ops && vma->vm_ops->get_policy) 960 pol = vma->vm_ops->get_policy(vma, addr); 961 else 962 pol = vma->vm_policy; 963 } else if (addr) 964 return -EINVAL; 965 966 if (!pol) 967 pol = &default_policy; /* indicates default behavior */ 968 969 if (flags & MPOL_F_NODE) { 970 if (flags & MPOL_F_ADDR) { 971 /* 972 * Take a refcount on the mpol, because we are about to 973 * drop the mmap_lock, after which only "pol" remains 974 * valid, "vma" is stale. 975 */ 976 pol_refcount = pol; 977 vma = NULL; 978 mpol_get(pol); 979 mmap_read_unlock(mm); 980 err = lookup_node(mm, addr); 981 if (err < 0) 982 goto out; 983 *policy = err; 984 } else if (pol == current->mempolicy && 985 pol->mode == MPOL_INTERLEAVE) { 986 *policy = next_node_in(current->il_prev, pol->nodes); 987 } else { 988 err = -EINVAL; 989 goto out; 990 } 991 } else { 992 *policy = pol == &default_policy ? MPOL_DEFAULT : 993 pol->mode; 994 /* 995 * Internal mempolicy flags must be masked off before exposing 996 * the policy to userspace. 997 */ 998 *policy |= (pol->flags & MPOL_MODE_FLAGS); 999 } 1000 1001 err = 0; 1002 if (nmask) { 1003 if (mpol_store_user_nodemask(pol)) { 1004 *nmask = pol->w.user_nodemask; 1005 } else { 1006 task_lock(current); 1007 get_policy_nodemask(pol, nmask); 1008 task_unlock(current); 1009 } 1010 } 1011 1012 out: 1013 mpol_cond_put(pol); 1014 if (vma) 1015 mmap_read_unlock(mm); 1016 if (pol_refcount) 1017 mpol_put(pol_refcount); 1018 return err; 1019 } 1020 1021 #ifdef CONFIG_MIGRATION 1022 static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 1023 unsigned long flags) 1024 { 1025 /* 1026 * We try to migrate only unshared folios. If it is shared it 1027 * is likely not worth migrating. 1028 * 1029 * To check if the folio is shared, ideally we want to make sure 1030 * every page is mapped to the same process. Doing that is very 1031 * expensive, so check the estimated mapcount of the folio instead. 1032 */ 1033 if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) { 1034 if (folio_isolate_lru(folio)) { 1035 list_add_tail(&folio->lru, foliolist); 1036 node_stat_mod_folio(folio, 1037 NR_ISOLATED_ANON + folio_is_file_lru(folio), 1038 folio_nr_pages(folio)); 1039 } else if (flags & MPOL_MF_STRICT) { 1040 /* 1041 * Non-movable folio may reach here. And, there may be 1042 * temporary off LRU folios or non-LRU movable folios. 1043 * Treat them as unmovable folios since they can't be 1044 * isolated, so they can't be moved at the moment. It 1045 * should return -EIO for this case too. 1046 */ 1047 return -EIO; 1048 } 1049 } 1050 1051 return 0; 1052 } 1053 1054 /* 1055 * Migrate pages from one node to a target node. 1056 * Returns error or the number of pages not migrated. 1057 */ 1058 static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1059 int flags) 1060 { 1061 nodemask_t nmask; 1062 struct vm_area_struct *vma; 1063 LIST_HEAD(pagelist); 1064 int err = 0; 1065 struct migration_target_control mtc = { 1066 .nid = dest, 1067 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1068 }; 1069 1070 nodes_clear(nmask); 1071 node_set(source, nmask); 1072 1073 /* 1074 * This does not "check" the range but isolates all pages that 1075 * need migration. Between passing in the full user address 1076 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 1077 */ 1078 vma = find_vma(mm, 0); 1079 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 1080 queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, 1081 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 1082 1083 if (!list_empty(&pagelist)) { 1084 err = migrate_pages(&pagelist, alloc_migration_target, NULL, 1085 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1086 if (err) 1087 putback_movable_pages(&pagelist); 1088 } 1089 1090 return err; 1091 } 1092 1093 /* 1094 * Move pages between the two nodesets so as to preserve the physical 1095 * layout as much as possible. 1096 * 1097 * Returns the number of page that could not be moved. 1098 */ 1099 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1100 const nodemask_t *to, int flags) 1101 { 1102 int busy = 0; 1103 int err = 0; 1104 nodemask_t tmp; 1105 1106 lru_cache_disable(); 1107 1108 mmap_read_lock(mm); 1109 1110 /* 1111 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 1112 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 1113 * bit in 'tmp', and return that <source, dest> pair for migration. 1114 * The pair of nodemasks 'to' and 'from' define the map. 1115 * 1116 * If no pair of bits is found that way, fallback to picking some 1117 * pair of 'source' and 'dest' bits that are not the same. If the 1118 * 'source' and 'dest' bits are the same, this represents a node 1119 * that will be migrating to itself, so no pages need move. 1120 * 1121 * If no bits are left in 'tmp', or if all remaining bits left 1122 * in 'tmp' correspond to the same bit in 'to', return false 1123 * (nothing left to migrate). 1124 * 1125 * This lets us pick a pair of nodes to migrate between, such that 1126 * if possible the dest node is not already occupied by some other 1127 * source node, minimizing the risk of overloading the memory on a 1128 * node that would happen if we migrated incoming memory to a node 1129 * before migrating outgoing memory source that same node. 1130 * 1131 * A single scan of tmp is sufficient. As we go, we remember the 1132 * most recent <s, d> pair that moved (s != d). If we find a pair 1133 * that not only moved, but what's better, moved to an empty slot 1134 * (d is not set in tmp), then we break out then, with that pair. 1135 * Otherwise when we finish scanning from_tmp, we at least have the 1136 * most recent <s, d> pair that moved. If we get all the way through 1137 * the scan of tmp without finding any node that moved, much less 1138 * moved to an empty node, then there is nothing left worth migrating. 1139 */ 1140 1141 tmp = *from; 1142 while (!nodes_empty(tmp)) { 1143 int s, d; 1144 int source = NUMA_NO_NODE; 1145 int dest = 0; 1146 1147 for_each_node_mask(s, tmp) { 1148 1149 /* 1150 * do_migrate_pages() tries to maintain the relative 1151 * node relationship of the pages established between 1152 * threads and memory areas. 1153 * 1154 * However if the number of source nodes is not equal to 1155 * the number of destination nodes we can not preserve 1156 * this node relative relationship. In that case, skip 1157 * copying memory from a node that is in the destination 1158 * mask. 1159 * 1160 * Example: [2,3,4] -> [3,4,5] moves everything. 1161 * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 1162 */ 1163 1164 if ((nodes_weight(*from) != nodes_weight(*to)) && 1165 (node_isset(s, *to))) 1166 continue; 1167 1168 d = node_remap(s, *from, *to); 1169 if (s == d) 1170 continue; 1171 1172 source = s; /* Node moved. Memorize */ 1173 dest = d; 1174 1175 /* dest not in remaining from nodes? */ 1176 if (!node_isset(dest, tmp)) 1177 break; 1178 } 1179 if (source == NUMA_NO_NODE) 1180 break; 1181 1182 node_clear(source, tmp); 1183 err = migrate_to_node(mm, source, dest, flags); 1184 if (err > 0) 1185 busy += err; 1186 if (err < 0) 1187 break; 1188 } 1189 mmap_read_unlock(mm); 1190 1191 lru_cache_enable(); 1192 if (err < 0) 1193 return err; 1194 return busy; 1195 1196 } 1197 1198 /* 1199 * Allocate a new page for page migration based on vma policy. 1200 * Start by assuming the page is mapped by the same vma as contains @start. 1201 * Search forward from there, if not. N.B., this assumes that the 1202 * list of pages handed to migrate_pages()--which is how we get here-- 1203 * is in virtual address order. 1204 */ 1205 static struct folio *new_folio(struct folio *src, unsigned long start) 1206 { 1207 struct vm_area_struct *vma; 1208 unsigned long address; 1209 VMA_ITERATOR(vmi, current->mm, start); 1210 gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; 1211 1212 for_each_vma(vmi, vma) { 1213 address = page_address_in_vma(&src->page, vma); 1214 if (address != -EFAULT) 1215 break; 1216 } 1217 1218 if (folio_test_hugetlb(src)) { 1219 return alloc_hugetlb_folio_vma(folio_hstate(src), 1220 vma, address); 1221 } 1222 1223 if (folio_test_large(src)) 1224 gfp = GFP_TRANSHUGE; 1225 1226 /* 1227 * if !vma, vma_alloc_folio() will use task or system default policy 1228 */ 1229 return vma_alloc_folio(gfp, folio_order(src), vma, address, 1230 folio_test_large(src)); 1231 } 1232 #else 1233 1234 static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, 1235 unsigned long flags) 1236 { 1237 return -EIO; 1238 } 1239 1240 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1241 const nodemask_t *to, int flags) 1242 { 1243 return -ENOSYS; 1244 } 1245 1246 static struct folio *new_folio(struct folio *src, unsigned long start) 1247 { 1248 return NULL; 1249 } 1250 #endif 1251 1252 static long do_mbind(unsigned long start, unsigned long len, 1253 unsigned short mode, unsigned short mode_flags, 1254 nodemask_t *nmask, unsigned long flags) 1255 { 1256 struct mm_struct *mm = current->mm; 1257 struct vm_area_struct *vma, *prev; 1258 struct vma_iterator vmi; 1259 struct mempolicy *new; 1260 unsigned long end; 1261 int err; 1262 int ret; 1263 LIST_HEAD(pagelist); 1264 1265 if (flags & ~(unsigned long)MPOL_MF_VALID) 1266 return -EINVAL; 1267 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1268 return -EPERM; 1269 1270 if (start & ~PAGE_MASK) 1271 return -EINVAL; 1272 1273 if (mode == MPOL_DEFAULT) 1274 flags &= ~MPOL_MF_STRICT; 1275 1276 len = PAGE_ALIGN(len); 1277 end = start + len; 1278 1279 if (end < start) 1280 return -EINVAL; 1281 if (end == start) 1282 return 0; 1283 1284 new = mpol_new(mode, mode_flags, nmask); 1285 if (IS_ERR(new)) 1286 return PTR_ERR(new); 1287 1288 if (flags & MPOL_MF_LAZY) 1289 new->flags |= MPOL_F_MOF; 1290 1291 /* 1292 * If we are using the default policy then operation 1293 * on discontinuous address spaces is okay after all 1294 */ 1295 if (!new) 1296 flags |= MPOL_MF_DISCONTIG_OK; 1297 1298 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1299 start, start + len, mode, mode_flags, 1300 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 1301 1302 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 1303 1304 lru_cache_disable(); 1305 } 1306 { 1307 NODEMASK_SCRATCH(scratch); 1308 if (scratch) { 1309 mmap_write_lock(mm); 1310 err = mpol_set_nodemask(new, nmask, scratch); 1311 if (err) 1312 mmap_write_unlock(mm); 1313 } else 1314 err = -ENOMEM; 1315 NODEMASK_SCRATCH_FREE(scratch); 1316 } 1317 if (err) 1318 goto mpol_out; 1319 1320 /* 1321 * Lock the VMAs before scanning for pages to migrate, to ensure we don't 1322 * miss a concurrently inserted page. 1323 */ 1324 vma_iter_init(&vmi, mm, start); 1325 for_each_vma_range(vmi, vma, end) 1326 vma_start_write(vma); 1327 1328 ret = queue_pages_range(mm, start, end, nmask, 1329 flags | MPOL_MF_INVERT, &pagelist); 1330 1331 if (ret < 0) { 1332 err = ret; 1333 goto up_out; 1334 } 1335 1336 vma_iter_init(&vmi, mm, start); 1337 prev = vma_prev(&vmi); 1338 for_each_vma_range(vmi, vma, end) { 1339 err = mbind_range(&vmi, vma, &prev, start, end, new); 1340 if (err) 1341 break; 1342 } 1343 1344 if (!err) { 1345 int nr_failed = 0; 1346 1347 if (!list_empty(&pagelist)) { 1348 WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1349 nr_failed = migrate_pages(&pagelist, new_folio, NULL, 1350 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1351 if (nr_failed) 1352 putback_movable_pages(&pagelist); 1353 } 1354 1355 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 1356 err = -EIO; 1357 } else { 1358 up_out: 1359 if (!list_empty(&pagelist)) 1360 putback_movable_pages(&pagelist); 1361 } 1362 1363 mmap_write_unlock(mm); 1364 mpol_out: 1365 mpol_put(new); 1366 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1367 lru_cache_enable(); 1368 return err; 1369 } 1370 1371 /* 1372 * User space interface with variable sized bitmaps for nodelists. 1373 */ 1374 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, 1375 unsigned long maxnode) 1376 { 1377 unsigned long nlongs = BITS_TO_LONGS(maxnode); 1378 int ret; 1379 1380 if (in_compat_syscall()) 1381 ret = compat_get_bitmap(mask, 1382 (const compat_ulong_t __user *)nmask, 1383 maxnode); 1384 else 1385 ret = copy_from_user(mask, nmask, 1386 nlongs * sizeof(unsigned long)); 1387 1388 if (ret) 1389 return -EFAULT; 1390 1391 if (maxnode % BITS_PER_LONG) 1392 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; 1393 1394 return 0; 1395 } 1396 1397 /* Copy a node mask from user space. */ 1398 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 1399 unsigned long maxnode) 1400 { 1401 --maxnode; 1402 nodes_clear(*nodes); 1403 if (maxnode == 0 || !nmask) 1404 return 0; 1405 if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1406 return -EINVAL; 1407 1408 /* 1409 * When the user specified more nodes than supported just check 1410 * if the non supported part is all zero, one word at a time, 1411 * starting at the end. 1412 */ 1413 while (maxnode > MAX_NUMNODES) { 1414 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); 1415 unsigned long t; 1416 1417 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) 1418 return -EFAULT; 1419 1420 if (maxnode - bits >= MAX_NUMNODES) { 1421 maxnode -= bits; 1422 } else { 1423 maxnode = MAX_NUMNODES; 1424 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1425 } 1426 if (t) 1427 return -EINVAL; 1428 } 1429 1430 return get_bitmap(nodes_addr(*nodes), nmask, maxnode); 1431 } 1432 1433 /* Copy a kernel node mask to user space */ 1434 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 1435 nodemask_t *nodes) 1436 { 1437 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1438 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1439 bool compat = in_compat_syscall(); 1440 1441 if (compat) 1442 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); 1443 1444 if (copy > nbytes) { 1445 if (copy > PAGE_SIZE) 1446 return -EINVAL; 1447 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 1448 return -EFAULT; 1449 copy = nbytes; 1450 maxnode = nr_node_ids; 1451 } 1452 1453 if (compat) 1454 return compat_put_bitmap((compat_ulong_t __user *)mask, 1455 nodes_addr(*nodes), maxnode); 1456 1457 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 1458 } 1459 1460 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 1461 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 1462 { 1463 *flags = *mode & MPOL_MODE_FLAGS; 1464 *mode &= ~MPOL_MODE_FLAGS; 1465 1466 if ((unsigned int)(*mode) >= MPOL_MAX) 1467 return -EINVAL; 1468 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 1469 return -EINVAL; 1470 if (*flags & MPOL_F_NUMA_BALANCING) { 1471 if (*mode != MPOL_BIND) 1472 return -EINVAL; 1473 *flags |= (MPOL_F_MOF | MPOL_F_MORON); 1474 } 1475 return 0; 1476 } 1477 1478 static long kernel_mbind(unsigned long start, unsigned long len, 1479 unsigned long mode, const unsigned long __user *nmask, 1480 unsigned long maxnode, unsigned int flags) 1481 { 1482 unsigned short mode_flags; 1483 nodemask_t nodes; 1484 int lmode = mode; 1485 int err; 1486 1487 start = untagged_addr(start); 1488 err = sanitize_mpol_flags(&lmode, &mode_flags); 1489 if (err) 1490 return err; 1491 1492 err = get_nodes(&nodes, nmask, maxnode); 1493 if (err) 1494 return err; 1495 1496 return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 1497 } 1498 1499 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, 1500 unsigned long, home_node, unsigned long, flags) 1501 { 1502 struct mm_struct *mm = current->mm; 1503 struct vm_area_struct *vma, *prev; 1504 struct mempolicy *new, *old; 1505 unsigned long end; 1506 int err = -ENOENT; 1507 VMA_ITERATOR(vmi, mm, start); 1508 1509 start = untagged_addr(start); 1510 if (start & ~PAGE_MASK) 1511 return -EINVAL; 1512 /* 1513 * flags is used for future extension if any. 1514 */ 1515 if (flags != 0) 1516 return -EINVAL; 1517 1518 /* 1519 * Check home_node is online to avoid accessing uninitialized 1520 * NODE_DATA. 1521 */ 1522 if (home_node >= MAX_NUMNODES || !node_online(home_node)) 1523 return -EINVAL; 1524 1525 len = PAGE_ALIGN(len); 1526 end = start + len; 1527 1528 if (end < start) 1529 return -EINVAL; 1530 if (end == start) 1531 return 0; 1532 mmap_write_lock(mm); 1533 prev = vma_prev(&vmi); 1534 for_each_vma_range(vmi, vma, end) { 1535 /* 1536 * If any vma in the range got policy other than MPOL_BIND 1537 * or MPOL_PREFERRED_MANY we return error. We don't reset 1538 * the home node for vmas we already updated before. 1539 */ 1540 old = vma_policy(vma); 1541 if (!old) 1542 continue; 1543 if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) { 1544 err = -EOPNOTSUPP; 1545 break; 1546 } 1547 new = mpol_dup(old); 1548 if (IS_ERR(new)) { 1549 err = PTR_ERR(new); 1550 break; 1551 } 1552 1553 vma_start_write(vma); 1554 new->home_node = home_node; 1555 err = mbind_range(&vmi, vma, &prev, start, end, new); 1556 mpol_put(new); 1557 if (err) 1558 break; 1559 } 1560 mmap_write_unlock(mm); 1561 return err; 1562 } 1563 1564 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1565 unsigned long, mode, const unsigned long __user *, nmask, 1566 unsigned long, maxnode, unsigned int, flags) 1567 { 1568 return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1569 } 1570 1571 /* Set the process memory policy */ 1572 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1573 unsigned long maxnode) 1574 { 1575 unsigned short mode_flags; 1576 nodemask_t nodes; 1577 int lmode = mode; 1578 int err; 1579 1580 err = sanitize_mpol_flags(&lmode, &mode_flags); 1581 if (err) 1582 return err; 1583 1584 err = get_nodes(&nodes, nmask, maxnode); 1585 if (err) 1586 return err; 1587 1588 return do_set_mempolicy(lmode, mode_flags, &nodes); 1589 } 1590 1591 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1592 unsigned long, maxnode) 1593 { 1594 return kernel_set_mempolicy(mode, nmask, maxnode); 1595 } 1596 1597 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1598 const unsigned long __user *old_nodes, 1599 const unsigned long __user *new_nodes) 1600 { 1601 struct mm_struct *mm = NULL; 1602 struct task_struct *task; 1603 nodemask_t task_nodes; 1604 int err; 1605 nodemask_t *old; 1606 nodemask_t *new; 1607 NODEMASK_SCRATCH(scratch); 1608 1609 if (!scratch) 1610 return -ENOMEM; 1611 1612 old = &scratch->mask1; 1613 new = &scratch->mask2; 1614 1615 err = get_nodes(old, old_nodes, maxnode); 1616 if (err) 1617 goto out; 1618 1619 err = get_nodes(new, new_nodes, maxnode); 1620 if (err) 1621 goto out; 1622 1623 /* Find the mm_struct */ 1624 rcu_read_lock(); 1625 task = pid ? find_task_by_vpid(pid) : current; 1626 if (!task) { 1627 rcu_read_unlock(); 1628 err = -ESRCH; 1629 goto out; 1630 } 1631 get_task_struct(task); 1632 1633 err = -EINVAL; 1634 1635 /* 1636 * Check if this process has the right to modify the specified process. 1637 * Use the regular "ptrace_may_access()" checks. 1638 */ 1639 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1640 rcu_read_unlock(); 1641 err = -EPERM; 1642 goto out_put; 1643 } 1644 rcu_read_unlock(); 1645 1646 task_nodes = cpuset_mems_allowed(task); 1647 /* Is the user allowed to access the target nodes? */ 1648 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 1649 err = -EPERM; 1650 goto out_put; 1651 } 1652 1653 task_nodes = cpuset_mems_allowed(current); 1654 nodes_and(*new, *new, task_nodes); 1655 if (nodes_empty(*new)) 1656 goto out_put; 1657 1658 err = security_task_movememory(task); 1659 if (err) 1660 goto out_put; 1661 1662 mm = get_task_mm(task); 1663 put_task_struct(task); 1664 1665 if (!mm) { 1666 err = -EINVAL; 1667 goto out; 1668 } 1669 1670 err = do_migrate_pages(mm, old, new, 1671 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 1672 1673 mmput(mm); 1674 out: 1675 NODEMASK_SCRATCH_FREE(scratch); 1676 1677 return err; 1678 1679 out_put: 1680 put_task_struct(task); 1681 goto out; 1682 1683 } 1684 1685 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1686 const unsigned long __user *, old_nodes, 1687 const unsigned long __user *, new_nodes) 1688 { 1689 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1690 } 1691 1692 1693 /* Retrieve NUMA policy */ 1694 static int kernel_get_mempolicy(int __user *policy, 1695 unsigned long __user *nmask, 1696 unsigned long maxnode, 1697 unsigned long addr, 1698 unsigned long flags) 1699 { 1700 int err; 1701 int pval; 1702 nodemask_t nodes; 1703 1704 if (nmask != NULL && maxnode < nr_node_ids) 1705 return -EINVAL; 1706 1707 addr = untagged_addr(addr); 1708 1709 err = do_get_mempolicy(&pval, &nodes, addr, flags); 1710 1711 if (err) 1712 return err; 1713 1714 if (policy && put_user(pval, policy)) 1715 return -EFAULT; 1716 1717 if (nmask) 1718 err = copy_nodes_to_user(nmask, maxnode, &nodes); 1719 1720 return err; 1721 } 1722 1723 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1724 unsigned long __user *, nmask, unsigned long, maxnode, 1725 unsigned long, addr, unsigned long, flags) 1726 { 1727 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1728 } 1729 1730 bool vma_migratable(struct vm_area_struct *vma) 1731 { 1732 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1733 return false; 1734 1735 /* 1736 * DAX device mappings require predictable access latency, so avoid 1737 * incurring periodic faults. 1738 */ 1739 if (vma_is_dax(vma)) 1740 return false; 1741 1742 if (is_vm_hugetlb_page(vma) && 1743 !hugepage_migration_supported(hstate_vma(vma))) 1744 return false; 1745 1746 /* 1747 * Migration allocates pages in the highest zone. If we cannot 1748 * do so then migration (at least from node to node) is not 1749 * possible. 1750 */ 1751 if (vma->vm_file && 1752 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 1753 < policy_zone) 1754 return false; 1755 return true; 1756 } 1757 1758 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 1759 unsigned long addr) 1760 { 1761 struct mempolicy *pol = NULL; 1762 1763 if (vma) { 1764 if (vma->vm_ops && vma->vm_ops->get_policy) { 1765 pol = vma->vm_ops->get_policy(vma, addr); 1766 } else if (vma->vm_policy) { 1767 pol = vma->vm_policy; 1768 1769 /* 1770 * shmem_alloc_page() passes MPOL_F_SHARED policy with 1771 * a pseudo vma whose vma->vm_ops=NULL. Take a reference 1772 * count on these policies which will be dropped by 1773 * mpol_cond_put() later 1774 */ 1775 if (mpol_needs_cond_ref(pol)) 1776 mpol_get(pol); 1777 } 1778 } 1779 1780 return pol; 1781 } 1782 1783 /* 1784 * get_vma_policy(@vma, @addr) 1785 * @vma: virtual memory area whose policy is sought 1786 * @addr: address in @vma for shared policy lookup 1787 * 1788 * Returns effective policy for a VMA at specified address. 1789 * Falls back to current->mempolicy or system default policy, as necessary. 1790 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 1791 * count--added by the get_policy() vm_op, as appropriate--to protect against 1792 * freeing by another task. It is the caller's responsibility to free the 1793 * extra reference for shared policies. 1794 */ 1795 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1796 unsigned long addr) 1797 { 1798 struct mempolicy *pol = __get_vma_policy(vma, addr); 1799 1800 if (!pol) 1801 pol = get_task_policy(current); 1802 1803 return pol; 1804 } 1805 1806 bool vma_policy_mof(struct vm_area_struct *vma) 1807 { 1808 struct mempolicy *pol; 1809 1810 if (vma->vm_ops && vma->vm_ops->get_policy) { 1811 bool ret = false; 1812 1813 pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1814 if (pol && (pol->flags & MPOL_F_MOF)) 1815 ret = true; 1816 mpol_cond_put(pol); 1817 1818 return ret; 1819 } 1820 1821 pol = vma->vm_policy; 1822 if (!pol) 1823 pol = get_task_policy(current); 1824 1825 return pol->flags & MPOL_F_MOF; 1826 } 1827 1828 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1829 { 1830 enum zone_type dynamic_policy_zone = policy_zone; 1831 1832 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1833 1834 /* 1835 * if policy->nodes has movable memory only, 1836 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1837 * 1838 * policy->nodes is intersect with node_states[N_MEMORY]. 1839 * so if the following test fails, it implies 1840 * policy->nodes has movable memory only. 1841 */ 1842 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1843 dynamic_policy_zone = ZONE_MOVABLE; 1844 1845 return zone >= dynamic_policy_zone; 1846 } 1847 1848 /* 1849 * Return a nodemask representing a mempolicy for filtering nodes for 1850 * page allocation 1851 */ 1852 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 1853 { 1854 int mode = policy->mode; 1855 1856 /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1857 if (unlikely(mode == MPOL_BIND) && 1858 apply_policy_zone(policy, gfp_zone(gfp)) && 1859 cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1860 return &policy->nodes; 1861 1862 if (mode == MPOL_PREFERRED_MANY) 1863 return &policy->nodes; 1864 1865 return NULL; 1866 } 1867 1868 /* 1869 * Return the preferred node id for 'prefer' mempolicy, and return 1870 * the given id for all other policies. 1871 * 1872 * policy_node() is always coupled with policy_nodemask(), which 1873 * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1874 */ 1875 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 1876 { 1877 if (policy->mode == MPOL_PREFERRED) { 1878 nd = first_node(policy->nodes); 1879 } else { 1880 /* 1881 * __GFP_THISNODE shouldn't even be used with the bind policy 1882 * because we might easily break the expectation to stay on the 1883 * requested node and not break the policy. 1884 */ 1885 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 1886 } 1887 1888 if ((policy->mode == MPOL_BIND || 1889 policy->mode == MPOL_PREFERRED_MANY) && 1890 policy->home_node != NUMA_NO_NODE) 1891 return policy->home_node; 1892 1893 return nd; 1894 } 1895 1896 /* Do dynamic interleaving for a process */ 1897 static unsigned interleave_nodes(struct mempolicy *policy) 1898 { 1899 unsigned next; 1900 struct task_struct *me = current; 1901 1902 next = next_node_in(me->il_prev, policy->nodes); 1903 if (next < MAX_NUMNODES) 1904 me->il_prev = next; 1905 return next; 1906 } 1907 1908 /* 1909 * Depending on the memory policy provide a node from which to allocate the 1910 * next slab entry. 1911 */ 1912 unsigned int mempolicy_slab_node(void) 1913 { 1914 struct mempolicy *policy; 1915 int node = numa_mem_id(); 1916 1917 if (!in_task()) 1918 return node; 1919 1920 policy = current->mempolicy; 1921 if (!policy) 1922 return node; 1923 1924 switch (policy->mode) { 1925 case MPOL_PREFERRED: 1926 return first_node(policy->nodes); 1927 1928 case MPOL_INTERLEAVE: 1929 return interleave_nodes(policy); 1930 1931 case MPOL_BIND: 1932 case MPOL_PREFERRED_MANY: 1933 { 1934 struct zoneref *z; 1935 1936 /* 1937 * Follow bind policy behavior and start allocation at the 1938 * first node. 1939 */ 1940 struct zonelist *zonelist; 1941 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1942 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1943 z = first_zones_zonelist(zonelist, highest_zoneidx, 1944 &policy->nodes); 1945 return z->zone ? zone_to_nid(z->zone) : node; 1946 } 1947 case MPOL_LOCAL: 1948 return node; 1949 1950 default: 1951 BUG(); 1952 } 1953 } 1954 1955 /* 1956 * Do static interleaving for a VMA with known offset @n. Returns the n'th 1957 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1958 * number of present nodes. 1959 */ 1960 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 1961 { 1962 nodemask_t nodemask = pol->nodes; 1963 unsigned int target, nnodes; 1964 int i; 1965 int nid; 1966 /* 1967 * The barrier will stabilize the nodemask in a register or on 1968 * the stack so that it will stop changing under the code. 1969 * 1970 * Between first_node() and next_node(), pol->nodes could be changed 1971 * by other threads. So we put pol->nodes in a local stack. 1972 */ 1973 barrier(); 1974 1975 nnodes = nodes_weight(nodemask); 1976 if (!nnodes) 1977 return numa_node_id(); 1978 target = (unsigned int)n % nnodes; 1979 nid = first_node(nodemask); 1980 for (i = 0; i < target; i++) 1981 nid = next_node(nid, nodemask); 1982 return nid; 1983 } 1984 1985 /* Determine a node number for interleave */ 1986 static inline unsigned interleave_nid(struct mempolicy *pol, 1987 struct vm_area_struct *vma, unsigned long addr, int shift) 1988 { 1989 if (vma) { 1990 unsigned long off; 1991 1992 /* 1993 * for small pages, there is no difference between 1994 * shift and PAGE_SHIFT, so the bit-shift is safe. 1995 * for huge pages, since vm_pgoff is in units of small 1996 * pages, we need to shift off the always 0 bits to get 1997 * a useful offset. 1998 */ 1999 BUG_ON(shift < PAGE_SHIFT); 2000 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 2001 off += (addr - vma->vm_start) >> shift; 2002 return offset_il_node(pol, off); 2003 } else 2004 return interleave_nodes(pol); 2005 } 2006 2007 #ifdef CONFIG_HUGETLBFS 2008 /* 2009 * huge_node(@vma, @addr, @gfp_flags, @mpol) 2010 * @vma: virtual memory area whose policy is sought 2011 * @addr: address in @vma for shared policy lookup and interleave policy 2012 * @gfp_flags: for requested zone 2013 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2014 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 2015 * 2016 * Returns a nid suitable for a huge page allocation and a pointer 2017 * to the struct mempolicy for conditional unref after allocation. 2018 * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2019 * to the mempolicy's @nodemask for filtering the zonelist. 2020 * 2021 * Must be protected by read_mems_allowed_begin() 2022 */ 2023 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 2024 struct mempolicy **mpol, nodemask_t **nodemask) 2025 { 2026 int nid; 2027 int mode; 2028 2029 *mpol = get_vma_policy(vma, addr); 2030 *nodemask = NULL; 2031 mode = (*mpol)->mode; 2032 2033 if (unlikely(mode == MPOL_INTERLEAVE)) { 2034 nid = interleave_nid(*mpol, vma, addr, 2035 huge_page_shift(hstate_vma(vma))); 2036 } else { 2037 nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2038 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2039 *nodemask = &(*mpol)->nodes; 2040 } 2041 return nid; 2042 } 2043 2044 /* 2045 * init_nodemask_of_mempolicy 2046 * 2047 * If the current task's mempolicy is "default" [NULL], return 'false' 2048 * to indicate default policy. Otherwise, extract the policy nodemask 2049 * for 'bind' or 'interleave' policy into the argument nodemask, or 2050 * initialize the argument nodemask to contain the single node for 2051 * 'preferred' or 'local' policy and return 'true' to indicate presence 2052 * of non-default mempolicy. 2053 * 2054 * We don't bother with reference counting the mempolicy [mpol_get/put] 2055 * because the current task is examining it's own mempolicy and a task's 2056 * mempolicy is only ever changed by the task itself. 2057 * 2058 * N.B., it is the caller's responsibility to free a returned nodemask. 2059 */ 2060 bool init_nodemask_of_mempolicy(nodemask_t *mask) 2061 { 2062 struct mempolicy *mempolicy; 2063 2064 if (!(mask && current->mempolicy)) 2065 return false; 2066 2067 task_lock(current); 2068 mempolicy = current->mempolicy; 2069 switch (mempolicy->mode) { 2070 case MPOL_PREFERRED: 2071 case MPOL_PREFERRED_MANY: 2072 case MPOL_BIND: 2073 case MPOL_INTERLEAVE: 2074 *mask = mempolicy->nodes; 2075 break; 2076 2077 case MPOL_LOCAL: 2078 init_nodemask_of_node(mask, numa_node_id()); 2079 break; 2080 2081 default: 2082 BUG(); 2083 } 2084 task_unlock(current); 2085 2086 return true; 2087 } 2088 #endif 2089 2090 /* 2091 * mempolicy_in_oom_domain 2092 * 2093 * If tsk's mempolicy is "bind", check for intersection between mask and 2094 * the policy nodemask. Otherwise, return true for all other policies 2095 * including "interleave", as a tsk with "interleave" policy may have 2096 * memory allocated from all nodes in system. 2097 * 2098 * Takes task_lock(tsk) to prevent freeing of its mempolicy. 2099 */ 2100 bool mempolicy_in_oom_domain(struct task_struct *tsk, 2101 const nodemask_t *mask) 2102 { 2103 struct mempolicy *mempolicy; 2104 bool ret = true; 2105 2106 if (!mask) 2107 return ret; 2108 2109 task_lock(tsk); 2110 mempolicy = tsk->mempolicy; 2111 if (mempolicy && mempolicy->mode == MPOL_BIND) 2112 ret = nodes_intersects(mempolicy->nodes, *mask); 2113 task_unlock(tsk); 2114 2115 return ret; 2116 } 2117 2118 /* Allocate a page in interleaved policy. 2119 Own path because it needs to do special accounting. */ 2120 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2121 unsigned nid) 2122 { 2123 struct page *page; 2124 2125 page = __alloc_pages(gfp, order, nid, NULL); 2126 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 2127 if (!static_branch_likely(&vm_numa_stat_key)) 2128 return page; 2129 if (page && page_to_nid(page) == nid) { 2130 preempt_disable(); 2131 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2132 preempt_enable(); 2133 } 2134 return page; 2135 } 2136 2137 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 2138 int nid, struct mempolicy *pol) 2139 { 2140 struct page *page; 2141 gfp_t preferred_gfp; 2142 2143 /* 2144 * This is a two pass approach. The first pass will only try the 2145 * preferred nodes but skip the direct reclaim and allow the 2146 * allocation to fail, while the second pass will try all the 2147 * nodes in system. 2148 */ 2149 preferred_gfp = gfp | __GFP_NOWARN; 2150 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2151 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 2152 if (!page) 2153 page = __alloc_pages(gfp, order, nid, NULL); 2154 2155 return page; 2156 } 2157 2158 /** 2159 * vma_alloc_folio - Allocate a folio for a VMA. 2160 * @gfp: GFP flags. 2161 * @order: Order of the folio. 2162 * @vma: Pointer to VMA or NULL if not available. 2163 * @addr: Virtual address of the allocation. Must be inside @vma. 2164 * @hugepage: For hugepages try only the preferred node if possible. 2165 * 2166 * Allocate a folio for a specific address in @vma, using the appropriate 2167 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2168 * of the mm_struct of the VMA to prevent it from going away. Should be 2169 * used for all allocations for folios that will be mapped into user space. 2170 * 2171 * Return: The folio on success or NULL if allocation fails. 2172 */ 2173 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, 2174 unsigned long addr, bool hugepage) 2175 { 2176 struct mempolicy *pol; 2177 int node = numa_node_id(); 2178 struct folio *folio; 2179 int preferred_nid; 2180 nodemask_t *nmask; 2181 2182 pol = get_vma_policy(vma, addr); 2183 2184 if (pol->mode == MPOL_INTERLEAVE) { 2185 struct page *page; 2186 unsigned nid; 2187 2188 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 2189 mpol_cond_put(pol); 2190 gfp |= __GFP_COMP; 2191 page = alloc_page_interleave(gfp, order, nid); 2192 if (page && order > 1) 2193 prep_transhuge_page(page); 2194 folio = (struct folio *)page; 2195 goto out; 2196 } 2197 2198 if (pol->mode == MPOL_PREFERRED_MANY) { 2199 struct page *page; 2200 2201 node = policy_node(gfp, pol, node); 2202 gfp |= __GFP_COMP; 2203 page = alloc_pages_preferred_many(gfp, order, node, pol); 2204 mpol_cond_put(pol); 2205 if (page && order > 1) 2206 prep_transhuge_page(page); 2207 folio = (struct folio *)page; 2208 goto out; 2209 } 2210 2211 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 2212 int hpage_node = node; 2213 2214 /* 2215 * For hugepage allocation and non-interleave policy which 2216 * allows the current node (or other explicitly preferred 2217 * node) we only try to allocate from the current/preferred 2218 * node and don't fall back to other nodes, as the cost of 2219 * remote accesses would likely offset THP benefits. 2220 * 2221 * If the policy is interleave or does not allow the current 2222 * node in its nodemask, we allocate the standard way. 2223 */ 2224 if (pol->mode == MPOL_PREFERRED) 2225 hpage_node = first_node(pol->nodes); 2226 2227 nmask = policy_nodemask(gfp, pol); 2228 if (!nmask || node_isset(hpage_node, *nmask)) { 2229 mpol_cond_put(pol); 2230 /* 2231 * First, try to allocate THP only on local node, but 2232 * don't reclaim unnecessarily, just compact. 2233 */ 2234 folio = __folio_alloc_node(gfp | __GFP_THISNODE | 2235 __GFP_NORETRY, order, hpage_node); 2236 2237 /* 2238 * If hugepage allocations are configured to always 2239 * synchronous compact or the vma has been madvised 2240 * to prefer hugepage backing, retry allowing remote 2241 * memory with both reclaim and compact as well. 2242 */ 2243 if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) 2244 folio = __folio_alloc(gfp, order, hpage_node, 2245 nmask); 2246 2247 goto out; 2248 } 2249 } 2250 2251 nmask = policy_nodemask(gfp, pol); 2252 preferred_nid = policy_node(gfp, pol, node); 2253 folio = __folio_alloc(gfp, order, preferred_nid, nmask); 2254 mpol_cond_put(pol); 2255 out: 2256 return folio; 2257 } 2258 EXPORT_SYMBOL(vma_alloc_folio); 2259 2260 /** 2261 * alloc_pages - Allocate pages. 2262 * @gfp: GFP flags. 2263 * @order: Power of two of number of pages to allocate. 2264 * 2265 * Allocate 1 << @order contiguous pages. The physical address of the 2266 * first page is naturally aligned (eg an order-3 allocation will be aligned 2267 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 2268 * process is honoured when in process context. 2269 * 2270 * Context: Can be called from any context, providing the appropriate GFP 2271 * flags are used. 2272 * Return: The page on success or NULL if allocation fails. 2273 */ 2274 struct page *alloc_pages(gfp_t gfp, unsigned order) 2275 { 2276 struct mempolicy *pol = &default_policy; 2277 struct page *page; 2278 2279 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2280 pol = get_task_policy(current); 2281 2282 /* 2283 * No reference counting needed for current->mempolicy 2284 * nor system default_policy 2285 */ 2286 if (pol->mode == MPOL_INTERLEAVE) 2287 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2288 else if (pol->mode == MPOL_PREFERRED_MANY) 2289 page = alloc_pages_preferred_many(gfp, order, 2290 policy_node(gfp, pol, numa_node_id()), pol); 2291 else 2292 page = __alloc_pages(gfp, order, 2293 policy_node(gfp, pol, numa_node_id()), 2294 policy_nodemask(gfp, pol)); 2295 2296 return page; 2297 } 2298 EXPORT_SYMBOL(alloc_pages); 2299 2300 struct folio *folio_alloc(gfp_t gfp, unsigned order) 2301 { 2302 struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2303 2304 if (page && order > 1) 2305 prep_transhuge_page(page); 2306 return (struct folio *)page; 2307 } 2308 EXPORT_SYMBOL(folio_alloc); 2309 2310 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, 2311 struct mempolicy *pol, unsigned long nr_pages, 2312 struct page **page_array) 2313 { 2314 int nodes; 2315 unsigned long nr_pages_per_node; 2316 int delta; 2317 int i; 2318 unsigned long nr_allocated; 2319 unsigned long total_allocated = 0; 2320 2321 nodes = nodes_weight(pol->nodes); 2322 nr_pages_per_node = nr_pages / nodes; 2323 delta = nr_pages - nodes * nr_pages_per_node; 2324 2325 for (i = 0; i < nodes; i++) { 2326 if (delta) { 2327 nr_allocated = __alloc_pages_bulk(gfp, 2328 interleave_nodes(pol), NULL, 2329 nr_pages_per_node + 1, NULL, 2330 page_array); 2331 delta--; 2332 } else { 2333 nr_allocated = __alloc_pages_bulk(gfp, 2334 interleave_nodes(pol), NULL, 2335 nr_pages_per_node, NULL, page_array); 2336 } 2337 2338 page_array += nr_allocated; 2339 total_allocated += nr_allocated; 2340 } 2341 2342 return total_allocated; 2343 } 2344 2345 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, 2346 struct mempolicy *pol, unsigned long nr_pages, 2347 struct page **page_array) 2348 { 2349 gfp_t preferred_gfp; 2350 unsigned long nr_allocated = 0; 2351 2352 preferred_gfp = gfp | __GFP_NOWARN; 2353 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2354 2355 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, 2356 nr_pages, NULL, page_array); 2357 2358 if (nr_allocated < nr_pages) 2359 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, 2360 nr_pages - nr_allocated, NULL, 2361 page_array + nr_allocated); 2362 return nr_allocated; 2363 } 2364 2365 /* alloc pages bulk and mempolicy should be considered at the 2366 * same time in some situation such as vmalloc. 2367 * 2368 * It can accelerate memory allocation especially interleaving 2369 * allocate memory. 2370 */ 2371 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, 2372 unsigned long nr_pages, struct page **page_array) 2373 { 2374 struct mempolicy *pol = &default_policy; 2375 2376 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2377 pol = get_task_policy(current); 2378 2379 if (pol->mode == MPOL_INTERLEAVE) 2380 return alloc_pages_bulk_array_interleave(gfp, pol, 2381 nr_pages, page_array); 2382 2383 if (pol->mode == MPOL_PREFERRED_MANY) 2384 return alloc_pages_bulk_array_preferred_many(gfp, 2385 numa_node_id(), pol, nr_pages, page_array); 2386 2387 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), 2388 policy_nodemask(gfp, pol), nr_pages, NULL, 2389 page_array); 2390 } 2391 2392 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2393 { 2394 struct mempolicy *pol = mpol_dup(vma_policy(src)); 2395 2396 if (IS_ERR(pol)) 2397 return PTR_ERR(pol); 2398 dst->vm_policy = pol; 2399 return 0; 2400 } 2401 2402 /* 2403 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 2404 * rebinds the mempolicy its copying by calling mpol_rebind_policy() 2405 * with the mems_allowed returned by cpuset_mems_allowed(). This 2406 * keeps mempolicies cpuset relative after its cpuset moves. See 2407 * further kernel/cpuset.c update_nodemask(). 2408 * 2409 * current's mempolicy may be rebinded by the other task(the task that changes 2410 * cpuset's mems), so we needn't do rebind work for current task. 2411 */ 2412 2413 /* Slow path of a mempolicy duplicate */ 2414 struct mempolicy *__mpol_dup(struct mempolicy *old) 2415 { 2416 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2417 2418 if (!new) 2419 return ERR_PTR(-ENOMEM); 2420 2421 /* task's mempolicy is protected by alloc_lock */ 2422 if (old == current->mempolicy) { 2423 task_lock(current); 2424 *new = *old; 2425 task_unlock(current); 2426 } else 2427 *new = *old; 2428 2429 if (current_cpuset_is_being_rebound()) { 2430 nodemask_t mems = cpuset_mems_allowed(current); 2431 mpol_rebind_policy(new, &mems); 2432 } 2433 atomic_set(&new->refcnt, 1); 2434 return new; 2435 } 2436 2437 /* Slow path of a mempolicy comparison */ 2438 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 2439 { 2440 if (!a || !b) 2441 return false; 2442 if (a->mode != b->mode) 2443 return false; 2444 if (a->flags != b->flags) 2445 return false; 2446 if (a->home_node != b->home_node) 2447 return false; 2448 if (mpol_store_user_nodemask(a)) 2449 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2450 return false; 2451 2452 switch (a->mode) { 2453 case MPOL_BIND: 2454 case MPOL_INTERLEAVE: 2455 case MPOL_PREFERRED: 2456 case MPOL_PREFERRED_MANY: 2457 return !!nodes_equal(a->nodes, b->nodes); 2458 case MPOL_LOCAL: 2459 return true; 2460 default: 2461 BUG(); 2462 return false; 2463 } 2464 } 2465 2466 /* 2467 * Shared memory backing store policy support. 2468 * 2469 * Remember policies even when nobody has shared memory mapped. 2470 * The policies are kept in Red-Black tree linked from the inode. 2471 * They are protected by the sp->lock rwlock, which should be held 2472 * for any accesses to the tree. 2473 */ 2474 2475 /* 2476 * lookup first element intersecting start-end. Caller holds sp->lock for 2477 * reading or for writing 2478 */ 2479 static struct sp_node * 2480 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 2481 { 2482 struct rb_node *n = sp->root.rb_node; 2483 2484 while (n) { 2485 struct sp_node *p = rb_entry(n, struct sp_node, nd); 2486 2487 if (start >= p->end) 2488 n = n->rb_right; 2489 else if (end <= p->start) 2490 n = n->rb_left; 2491 else 2492 break; 2493 } 2494 if (!n) 2495 return NULL; 2496 for (;;) { 2497 struct sp_node *w = NULL; 2498 struct rb_node *prev = rb_prev(n); 2499 if (!prev) 2500 break; 2501 w = rb_entry(prev, struct sp_node, nd); 2502 if (w->end <= start) 2503 break; 2504 n = prev; 2505 } 2506 return rb_entry(n, struct sp_node, nd); 2507 } 2508 2509 /* 2510 * Insert a new shared policy into the list. Caller holds sp->lock for 2511 * writing. 2512 */ 2513 static void sp_insert(struct shared_policy *sp, struct sp_node *new) 2514 { 2515 struct rb_node **p = &sp->root.rb_node; 2516 struct rb_node *parent = NULL; 2517 struct sp_node *nd; 2518 2519 while (*p) { 2520 parent = *p; 2521 nd = rb_entry(parent, struct sp_node, nd); 2522 if (new->start < nd->start) 2523 p = &(*p)->rb_left; 2524 else if (new->end > nd->end) 2525 p = &(*p)->rb_right; 2526 else 2527 BUG(); 2528 } 2529 rb_link_node(&new->nd, parent, p); 2530 rb_insert_color(&new->nd, &sp->root); 2531 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 2532 new->policy ? new->policy->mode : 0); 2533 } 2534 2535 /* Find shared policy intersecting idx */ 2536 struct mempolicy * 2537 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 2538 { 2539 struct mempolicy *pol = NULL; 2540 struct sp_node *sn; 2541 2542 if (!sp->root.rb_node) 2543 return NULL; 2544 read_lock(&sp->lock); 2545 sn = sp_lookup(sp, idx, idx+1); 2546 if (sn) { 2547 mpol_get(sn->policy); 2548 pol = sn->policy; 2549 } 2550 read_unlock(&sp->lock); 2551 return pol; 2552 } 2553 2554 static void sp_free(struct sp_node *n) 2555 { 2556 mpol_put(n->policy); 2557 kmem_cache_free(sn_cache, n); 2558 } 2559 2560 /** 2561 * mpol_misplaced - check whether current page node is valid in policy 2562 * 2563 * @page: page to be checked 2564 * @vma: vm area where page mapped 2565 * @addr: virtual address where page mapped 2566 * 2567 * Lookup current policy node id for vma,addr and "compare to" page's 2568 * node id. Policy determination "mimics" alloc_page_vma(). 2569 * Called from fault path where we know the vma and faulting address. 2570 * 2571 * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2572 * policy, or a suitable node ID to allocate a replacement page from. 2573 */ 2574 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2575 { 2576 struct mempolicy *pol; 2577 struct zoneref *z; 2578 int curnid = page_to_nid(page); 2579 unsigned long pgoff; 2580 int thiscpu = raw_smp_processor_id(); 2581 int thisnid = cpu_to_node(thiscpu); 2582 int polnid = NUMA_NO_NODE; 2583 int ret = NUMA_NO_NODE; 2584 2585 pol = get_vma_policy(vma, addr); 2586 if (!(pol->flags & MPOL_F_MOF)) 2587 goto out; 2588 2589 switch (pol->mode) { 2590 case MPOL_INTERLEAVE: 2591 pgoff = vma->vm_pgoff; 2592 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2593 polnid = offset_il_node(pol, pgoff); 2594 break; 2595 2596 case MPOL_PREFERRED: 2597 if (node_isset(curnid, pol->nodes)) 2598 goto out; 2599 polnid = first_node(pol->nodes); 2600 break; 2601 2602 case MPOL_LOCAL: 2603 polnid = numa_node_id(); 2604 break; 2605 2606 case MPOL_BIND: 2607 /* Optimize placement among multiple nodes via NUMA balancing */ 2608 if (pol->flags & MPOL_F_MORON) { 2609 if (node_isset(thisnid, pol->nodes)) 2610 break; 2611 goto out; 2612 } 2613 fallthrough; 2614 2615 case MPOL_PREFERRED_MANY: 2616 /* 2617 * use current page if in policy nodemask, 2618 * else select nearest allowed node, if any. 2619 * If no allowed nodes, use current [!misplaced]. 2620 */ 2621 if (node_isset(curnid, pol->nodes)) 2622 goto out; 2623 z = first_zones_zonelist( 2624 node_zonelist(numa_node_id(), GFP_HIGHUSER), 2625 gfp_zone(GFP_HIGHUSER), 2626 &pol->nodes); 2627 polnid = zone_to_nid(z->zone); 2628 break; 2629 2630 default: 2631 BUG(); 2632 } 2633 2634 /* Migrate the page towards the node whose CPU is referencing it */ 2635 if (pol->flags & MPOL_F_MORON) { 2636 polnid = thisnid; 2637 2638 if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2639 goto out; 2640 } 2641 2642 if (curnid != polnid) 2643 ret = polnid; 2644 out: 2645 mpol_cond_put(pol); 2646 2647 return ret; 2648 } 2649 2650 /* 2651 * Drop the (possibly final) reference to task->mempolicy. It needs to be 2652 * dropped after task->mempolicy is set to NULL so that any allocation done as 2653 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2654 * policy. 2655 */ 2656 void mpol_put_task_policy(struct task_struct *task) 2657 { 2658 struct mempolicy *pol; 2659 2660 task_lock(task); 2661 pol = task->mempolicy; 2662 task->mempolicy = NULL; 2663 task_unlock(task); 2664 mpol_put(pol); 2665 } 2666 2667 static void sp_delete(struct shared_policy *sp, struct sp_node *n) 2668 { 2669 pr_debug("deleting %lx-l%lx\n", n->start, n->end); 2670 rb_erase(&n->nd, &sp->root); 2671 sp_free(n); 2672 } 2673 2674 static void sp_node_init(struct sp_node *node, unsigned long start, 2675 unsigned long end, struct mempolicy *pol) 2676 { 2677 node->start = start; 2678 node->end = end; 2679 node->policy = pol; 2680 } 2681 2682 static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2683 struct mempolicy *pol) 2684 { 2685 struct sp_node *n; 2686 struct mempolicy *newpol; 2687 2688 n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2689 if (!n) 2690 return NULL; 2691 2692 newpol = mpol_dup(pol); 2693 if (IS_ERR(newpol)) { 2694 kmem_cache_free(sn_cache, n); 2695 return NULL; 2696 } 2697 newpol->flags |= MPOL_F_SHARED; 2698 sp_node_init(n, start, end, newpol); 2699 2700 return n; 2701 } 2702 2703 /* Replace a policy range. */ 2704 static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 2705 unsigned long end, struct sp_node *new) 2706 { 2707 struct sp_node *n; 2708 struct sp_node *n_new = NULL; 2709 struct mempolicy *mpol_new = NULL; 2710 int ret = 0; 2711 2712 restart: 2713 write_lock(&sp->lock); 2714 n = sp_lookup(sp, start, end); 2715 /* Take care of old policies in the same range. */ 2716 while (n && n->start < end) { 2717 struct rb_node *next = rb_next(&n->nd); 2718 if (n->start >= start) { 2719 if (n->end <= end) 2720 sp_delete(sp, n); 2721 else 2722 n->start = end; 2723 } else { 2724 /* Old policy spanning whole new range. */ 2725 if (n->end > end) { 2726 if (!n_new) 2727 goto alloc_new; 2728 2729 *mpol_new = *n->policy; 2730 atomic_set(&mpol_new->refcnt, 1); 2731 sp_node_init(n_new, end, n->end, mpol_new); 2732 n->end = start; 2733 sp_insert(sp, n_new); 2734 n_new = NULL; 2735 mpol_new = NULL; 2736 break; 2737 } else 2738 n->end = start; 2739 } 2740 if (!next) 2741 break; 2742 n = rb_entry(next, struct sp_node, nd); 2743 } 2744 if (new) 2745 sp_insert(sp, new); 2746 write_unlock(&sp->lock); 2747 ret = 0; 2748 2749 err_out: 2750 if (mpol_new) 2751 mpol_put(mpol_new); 2752 if (n_new) 2753 kmem_cache_free(sn_cache, n_new); 2754 2755 return ret; 2756 2757 alloc_new: 2758 write_unlock(&sp->lock); 2759 ret = -ENOMEM; 2760 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2761 if (!n_new) 2762 goto err_out; 2763 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2764 if (!mpol_new) 2765 goto err_out; 2766 atomic_set(&mpol_new->refcnt, 1); 2767 goto restart; 2768 } 2769 2770 /** 2771 * mpol_shared_policy_init - initialize shared policy for inode 2772 * @sp: pointer to inode shared policy 2773 * @mpol: struct mempolicy to install 2774 * 2775 * Install non-NULL @mpol in inode's shared policy rb-tree. 2776 * On entry, the current task has a reference on a non-NULL @mpol. 2777 * This must be released on exit. 2778 * This is called at get_inode() calls and we can use GFP_KERNEL. 2779 */ 2780 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 2781 { 2782 int ret; 2783 2784 sp->root = RB_ROOT; /* empty tree == default mempolicy */ 2785 rwlock_init(&sp->lock); 2786 2787 if (mpol) { 2788 struct vm_area_struct pvma; 2789 struct mempolicy *new; 2790 NODEMASK_SCRATCH(scratch); 2791 2792 if (!scratch) 2793 goto put_mpol; 2794 /* contextualize the tmpfs mount point mempolicy */ 2795 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2796 if (IS_ERR(new)) 2797 goto free_scratch; /* no valid nodemask intersection */ 2798 2799 task_lock(current); 2800 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 2801 task_unlock(current); 2802 if (ret) 2803 goto put_new; 2804 2805 /* Create pseudo-vma that contains just the policy */ 2806 vma_init(&pvma, NULL); 2807 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2808 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2809 2810 put_new: 2811 mpol_put(new); /* drop initial ref */ 2812 free_scratch: 2813 NODEMASK_SCRATCH_FREE(scratch); 2814 put_mpol: 2815 mpol_put(mpol); /* drop our incoming ref on sb mpol */ 2816 } 2817 } 2818 2819 int mpol_set_shared_policy(struct shared_policy *info, 2820 struct vm_area_struct *vma, struct mempolicy *npol) 2821 { 2822 int err; 2823 struct sp_node *new = NULL; 2824 unsigned long sz = vma_pages(vma); 2825 2826 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 2827 vma->vm_pgoff, 2828 sz, npol ? npol->mode : -1, 2829 npol ? npol->flags : -1, 2830 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 2831 2832 if (npol) { 2833 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 2834 if (!new) 2835 return -ENOMEM; 2836 } 2837 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 2838 if (err && new) 2839 sp_free(new); 2840 return err; 2841 } 2842 2843 /* Free a backing policy store on inode delete. */ 2844 void mpol_free_shared_policy(struct shared_policy *p) 2845 { 2846 struct sp_node *n; 2847 struct rb_node *next; 2848 2849 if (!p->root.rb_node) 2850 return; 2851 write_lock(&p->lock); 2852 next = rb_first(&p->root); 2853 while (next) { 2854 n = rb_entry(next, struct sp_node, nd); 2855 next = rb_next(&n->nd); 2856 sp_delete(p, n); 2857 } 2858 write_unlock(&p->lock); 2859 } 2860 2861 #ifdef CONFIG_NUMA_BALANCING 2862 static int __initdata numabalancing_override; 2863 2864 static void __init check_numabalancing_enable(void) 2865 { 2866 bool numabalancing_default = false; 2867 2868 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 2869 numabalancing_default = true; 2870 2871 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2872 if (numabalancing_override) 2873 set_numabalancing_state(numabalancing_override == 1); 2874 2875 if (num_online_nodes() > 1 && !numabalancing_override) { 2876 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2877 numabalancing_default ? "Enabling" : "Disabling"); 2878 set_numabalancing_state(numabalancing_default); 2879 } 2880 } 2881 2882 static int __init setup_numabalancing(char *str) 2883 { 2884 int ret = 0; 2885 if (!str) 2886 goto out; 2887 2888 if (!strcmp(str, "enable")) { 2889 numabalancing_override = 1; 2890 ret = 1; 2891 } else if (!strcmp(str, "disable")) { 2892 numabalancing_override = -1; 2893 ret = 1; 2894 } 2895 out: 2896 if (!ret) 2897 pr_warn("Unable to parse numa_balancing=\n"); 2898 2899 return ret; 2900 } 2901 __setup("numa_balancing=", setup_numabalancing); 2902 #else 2903 static inline void __init check_numabalancing_enable(void) 2904 { 2905 } 2906 #endif /* CONFIG_NUMA_BALANCING */ 2907 2908 /* assumes fs == KERNEL_DS */ 2909 void __init numa_policy_init(void) 2910 { 2911 nodemask_t interleave_nodes; 2912 unsigned long largest = 0; 2913 int nid, prefer = 0; 2914 2915 policy_cache = kmem_cache_create("numa_policy", 2916 sizeof(struct mempolicy), 2917 0, SLAB_PANIC, NULL); 2918 2919 sn_cache = kmem_cache_create("shared_policy_node", 2920 sizeof(struct sp_node), 2921 0, SLAB_PANIC, NULL); 2922 2923 for_each_node(nid) { 2924 preferred_node_policy[nid] = (struct mempolicy) { 2925 .refcnt = ATOMIC_INIT(1), 2926 .mode = MPOL_PREFERRED, 2927 .flags = MPOL_F_MOF | MPOL_F_MORON, 2928 .nodes = nodemask_of_node(nid), 2929 }; 2930 } 2931 2932 /* 2933 * Set interleaving policy for system init. Interleaving is only 2934 * enabled across suitably sized nodes (default is >= 16MB), or 2935 * fall back to the largest node if they're all smaller. 2936 */ 2937 nodes_clear(interleave_nodes); 2938 for_each_node_state(nid, N_MEMORY) { 2939 unsigned long total_pages = node_present_pages(nid); 2940 2941 /* Preserve the largest node */ 2942 if (largest < total_pages) { 2943 largest = total_pages; 2944 prefer = nid; 2945 } 2946 2947 /* Interleave this node? */ 2948 if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2949 node_set(nid, interleave_nodes); 2950 } 2951 2952 /* All too small, use the largest */ 2953 if (unlikely(nodes_empty(interleave_nodes))) 2954 node_set(prefer, interleave_nodes); 2955 2956 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2957 pr_err("%s: interleaving failed\n", __func__); 2958 2959 check_numabalancing_enable(); 2960 } 2961 2962 /* Reset policy of current process to default */ 2963 void numa_default_policy(void) 2964 { 2965 do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 2966 } 2967 2968 /* 2969 * Parse and format mempolicy from/to strings 2970 */ 2971 2972 static const char * const policy_modes[] = 2973 { 2974 [MPOL_DEFAULT] = "default", 2975 [MPOL_PREFERRED] = "prefer", 2976 [MPOL_BIND] = "bind", 2977 [MPOL_INTERLEAVE] = "interleave", 2978 [MPOL_LOCAL] = "local", 2979 [MPOL_PREFERRED_MANY] = "prefer (many)", 2980 }; 2981 2982 2983 #ifdef CONFIG_TMPFS 2984 /** 2985 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2986 * @str: string containing mempolicy to parse 2987 * @mpol: pointer to struct mempolicy pointer, returned on success. 2988 * 2989 * Format of input: 2990 * <mode>[=<flags>][:<nodelist>] 2991 * 2992 * Return: %0 on success, else %1 2993 */ 2994 int mpol_parse_str(char *str, struct mempolicy **mpol) 2995 { 2996 struct mempolicy *new = NULL; 2997 unsigned short mode_flags; 2998 nodemask_t nodes; 2999 char *nodelist = strchr(str, ':'); 3000 char *flags = strchr(str, '='); 3001 int err = 1, mode; 3002 3003 if (flags) 3004 *flags++ = '\0'; /* terminate mode string */ 3005 3006 if (nodelist) { 3007 /* NUL-terminate mode or flags string */ 3008 *nodelist++ = '\0'; 3009 if (nodelist_parse(nodelist, nodes)) 3010 goto out; 3011 if (!nodes_subset(nodes, node_states[N_MEMORY])) 3012 goto out; 3013 } else 3014 nodes_clear(nodes); 3015 3016 mode = match_string(policy_modes, MPOL_MAX, str); 3017 if (mode < 0) 3018 goto out; 3019 3020 switch (mode) { 3021 case MPOL_PREFERRED: 3022 /* 3023 * Insist on a nodelist of one node only, although later 3024 * we use first_node(nodes) to grab a single node, so here 3025 * nodelist (or nodes) cannot be empty. 3026 */ 3027 if (nodelist) { 3028 char *rest = nodelist; 3029 while (isdigit(*rest)) 3030 rest++; 3031 if (*rest) 3032 goto out; 3033 if (nodes_empty(nodes)) 3034 goto out; 3035 } 3036 break; 3037 case MPOL_INTERLEAVE: 3038 /* 3039 * Default to online nodes with memory if no nodelist 3040 */ 3041 if (!nodelist) 3042 nodes = node_states[N_MEMORY]; 3043 break; 3044 case MPOL_LOCAL: 3045 /* 3046 * Don't allow a nodelist; mpol_new() checks flags 3047 */ 3048 if (nodelist) 3049 goto out; 3050 break; 3051 case MPOL_DEFAULT: 3052 /* 3053 * Insist on a empty nodelist 3054 */ 3055 if (!nodelist) 3056 err = 0; 3057 goto out; 3058 case MPOL_PREFERRED_MANY: 3059 case MPOL_BIND: 3060 /* 3061 * Insist on a nodelist 3062 */ 3063 if (!nodelist) 3064 goto out; 3065 } 3066 3067 mode_flags = 0; 3068 if (flags) { 3069 /* 3070 * Currently, we only support two mutually exclusive 3071 * mode flags. 3072 */ 3073 if (!strcmp(flags, "static")) 3074 mode_flags |= MPOL_F_STATIC_NODES; 3075 else if (!strcmp(flags, "relative")) 3076 mode_flags |= MPOL_F_RELATIVE_NODES; 3077 else 3078 goto out; 3079 } 3080 3081 new = mpol_new(mode, mode_flags, &nodes); 3082 if (IS_ERR(new)) 3083 goto out; 3084 3085 /* 3086 * Save nodes for mpol_to_str() to show the tmpfs mount options 3087 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 3088 */ 3089 if (mode != MPOL_PREFERRED) { 3090 new->nodes = nodes; 3091 } else if (nodelist) { 3092 nodes_clear(new->nodes); 3093 node_set(first_node(nodes), new->nodes); 3094 } else { 3095 new->mode = MPOL_LOCAL; 3096 } 3097 3098 /* 3099 * Save nodes for contextualization: this will be used to "clone" 3100 * the mempolicy in a specific context [cpuset] at a later time. 3101 */ 3102 new->w.user_nodemask = nodes; 3103 3104 err = 0; 3105 3106 out: 3107 /* Restore string for error message */ 3108 if (nodelist) 3109 *--nodelist = ':'; 3110 if (flags) 3111 *--flags = '='; 3112 if (!err) 3113 *mpol = new; 3114 return err; 3115 } 3116 #endif /* CONFIG_TMPFS */ 3117 3118 /** 3119 * mpol_to_str - format a mempolicy structure for printing 3120 * @buffer: to contain formatted mempolicy string 3121 * @maxlen: length of @buffer 3122 * @pol: pointer to mempolicy to be formatted 3123 * 3124 * Convert @pol into a string. If @buffer is too short, truncate the string. 3125 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3126 * longest flag, "relative", and to display at least a few node ids. 3127 */ 3128 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 3129 { 3130 char *p = buffer; 3131 nodemask_t nodes = NODE_MASK_NONE; 3132 unsigned short mode = MPOL_DEFAULT; 3133 unsigned short flags = 0; 3134 3135 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3136 mode = pol->mode; 3137 flags = pol->flags; 3138 } 3139 3140 switch (mode) { 3141 case MPOL_DEFAULT: 3142 case MPOL_LOCAL: 3143 break; 3144 case MPOL_PREFERRED: 3145 case MPOL_PREFERRED_MANY: 3146 case MPOL_BIND: 3147 case MPOL_INTERLEAVE: 3148 nodes = pol->nodes; 3149 break; 3150 default: 3151 WARN_ON_ONCE(1); 3152 snprintf(p, maxlen, "unknown"); 3153 return; 3154 } 3155 3156 p += snprintf(p, maxlen, "%s", policy_modes[mode]); 3157 3158 if (flags & MPOL_MODE_FLAGS) { 3159 p += snprintf(p, buffer + maxlen - p, "="); 3160 3161 /* 3162 * Currently, the only defined flags are mutually exclusive 3163 */ 3164 if (flags & MPOL_F_STATIC_NODES) 3165 p += snprintf(p, buffer + maxlen - p, "static"); 3166 else if (flags & MPOL_F_RELATIVE_NODES) 3167 p += snprintf(p, buffer + maxlen - p, "relative"); 3168 } 3169 3170 if (!nodes_empty(nodes)) 3171 p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 3172 nodemask_pr_args(&nodes)); 3173 } 3174