1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Simple NUMA memory policy for the Linux kernel. 4 * 5 * Copyright 2003,2004 Andi Kleen, SuSE Labs. 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. 7 * 8 * NUMA policy allows the user to give hints in which node(s) memory should 9 * be allocated. 10 * 11 * Support four policies per VMA and per process: 12 * 13 * The VMA policy has priority over the process policy for a page fault. 14 * 15 * interleave Allocate memory interleaved over a set of nodes, 16 * with normal fallback if it fails. 17 * For VMA based allocations this interleaves based on the 18 * offset into the backing object or offset into the mapping 19 * for anonymous memory. For process policy an process counter 20 * is used. 21 * 22 * bind Only allocate memory on a specific set of nodes, 23 * no fallback. 24 * FIXME: memory is allocated starting with the first node 25 * to the last. It would be better if bind would truly restrict 26 * the allocation to memory nodes instead 27 * 28 * preferred Try a specific node first before normal fallback. 29 * As a special case NUMA_NO_NODE here means do the allocation 30 * on the local CPU. This is normally identical to default, 31 * but useful to set in a VMA when you have a non default 32 * process policy. 33 * 34 * preferred many Try a set of nodes first before normal fallback. This is 35 * similar to preferred without the special case. 36 * 37 * default Allocate on the local node first, or when on a VMA 38 * use the process policy. This is what Linux always did 39 * in a NUMA aware kernel and still does by, ahem, default. 40 * 41 * The process policy is applied for most non interrupt memory allocations 42 * in that process' context. Interrupts ignore the policies and always 43 * try to allocate on the local CPU. The VMA policy is only applied for memory 44 * allocations for a VMA in the VM. 45 * 46 * Currently there are a few corner cases in swapping where the policy 47 * is not applied, but the majority should be handled. When process policy 48 * is used it is not remembered over swap outs/swap ins. 49 * 50 * Only the highest zone in the zone hierarchy gets policied. Allocations 51 * requesting a lower zone just use default policy. This implies that 52 * on systems with highmem kernel lowmem allocation don't get policied. 53 * Same with GFP_DMA allocations. 54 * 55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 56 * all users and remembered even when nobody has memory mapped. 57 */ 58 59 /* Notebook: 60 fix mmap readahead to honour policy and enable policy for any page cache 61 object 62 statistics for bigpages 63 global policy for page cache? currently it uses process policy. Requires 64 first item above. 65 handle mremap for shared memory (currently ignored for the policy) 66 grows down? 67 make bind policy root only? It can trigger oom much faster and the 68 kernel is not always grateful with that. 69 */ 70 71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72 73 #include <linux/mempolicy.h> 74 #include <linux/pagewalk.h> 75 #include <linux/highmem.h> 76 #include <linux/hugetlb.h> 77 #include <linux/kernel.h> 78 #include <linux/sched.h> 79 #include <linux/sched/mm.h> 80 #include <linux/sched/numa_balancing.h> 81 #include <linux/sched/task.h> 82 #include <linux/nodemask.h> 83 #include <linux/cpuset.h> 84 #include <linux/slab.h> 85 #include <linux/string.h> 86 #include <linux/export.h> 87 #include <linux/nsproxy.h> 88 #include <linux/interrupt.h> 89 #include <linux/init.h> 90 #include <linux/compat.h> 91 #include <linux/ptrace.h> 92 #include <linux/swap.h> 93 #include <linux/seq_file.h> 94 #include <linux/proc_fs.h> 95 #include <linux/migrate.h> 96 #include <linux/ksm.h> 97 #include <linux/rmap.h> 98 #include <linux/security.h> 99 #include <linux/syscalls.h> 100 #include <linux/ctype.h> 101 #include <linux/mm_inline.h> 102 #include <linux/mmu_notifier.h> 103 #include <linux/printk.h> 104 #include <linux/swapops.h> 105 106 #include <asm/tlbflush.h> 107 #include <linux/uaccess.h> 108 109 #include "internal.h" 110 111 /* Internal flags */ 112 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 113 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 114 115 static struct kmem_cache *policy_cache; 116 static struct kmem_cache *sn_cache; 117 118 /* Highest zone. An specific allocation for a zone below that is not 119 policied. */ 120 enum zone_type policy_zone = 0; 121 122 /* 123 * run-time system-wide default policy => local allocation 124 */ 125 static struct mempolicy default_policy = { 126 .refcnt = ATOMIC_INIT(1), /* never free it */ 127 .mode = MPOL_LOCAL, 128 }; 129 130 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 131 132 /** 133 * numa_map_to_online_node - Find closest online node 134 * @node: Node id to start the search 135 * 136 * Lookup the next closest node by distance if @nid is not online. 137 */ 138 int numa_map_to_online_node(int node) 139 { 140 int min_dist = INT_MAX, dist, n, min_node; 141 142 if (node == NUMA_NO_NODE || node_online(node)) 143 return node; 144 145 min_node = node; 146 for_each_online_node(n) { 147 dist = node_distance(node, n); 148 if (dist < min_dist) { 149 min_dist = dist; 150 min_node = n; 151 } 152 } 153 154 return min_node; 155 } 156 EXPORT_SYMBOL_GPL(numa_map_to_online_node); 157 158 struct mempolicy *get_task_policy(struct task_struct *p) 159 { 160 struct mempolicy *pol = p->mempolicy; 161 int node; 162 163 if (pol) 164 return pol; 165 166 node = numa_node_id(); 167 if (node != NUMA_NO_NODE) { 168 pol = &preferred_node_policy[node]; 169 /* preferred_node_policy is not initialised early in boot */ 170 if (pol->mode) 171 return pol; 172 } 173 174 return &default_policy; 175 } 176 177 static const struct mempolicy_operations { 178 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 179 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 180 } mpol_ops[MPOL_MAX]; 181 182 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) 183 { 184 return pol->flags & MPOL_MODE_FLAGS; 185 } 186 187 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, 188 const nodemask_t *rel) 189 { 190 nodemask_t tmp; 191 nodes_fold(tmp, *orig, nodes_weight(*rel)); 192 nodes_onto(*ret, tmp, *rel); 193 } 194 195 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 196 { 197 if (nodes_empty(*nodes)) 198 return -EINVAL; 199 pol->nodes = *nodes; 200 return 0; 201 } 202 203 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) 204 { 205 if (nodes_empty(*nodes)) 206 return -EINVAL; 207 208 nodes_clear(pol->nodes); 209 node_set(first_node(*nodes), pol->nodes); 210 return 0; 211 } 212 213 /* 214 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if 215 * any, for the new policy. mpol_new() has already validated the nodes 216 * parameter with respect to the policy mode and flags. 217 * 218 * Must be called holding task's alloc_lock to protect task's mems_allowed 219 * and mempolicy. May also be called holding the mmap_lock for write. 220 */ 221 static int mpol_set_nodemask(struct mempolicy *pol, 222 const nodemask_t *nodes, struct nodemask_scratch *nsc) 223 { 224 int ret; 225 226 /* 227 * Default (pol==NULL) resp. local memory policies are not a 228 * subject of any remapping. They also do not need any special 229 * constructor. 230 */ 231 if (!pol || pol->mode == MPOL_LOCAL) 232 return 0; 233 234 /* Check N_MEMORY */ 235 nodes_and(nsc->mask1, 236 cpuset_current_mems_allowed, node_states[N_MEMORY]); 237 238 VM_BUG_ON(!nodes); 239 240 if (pol->flags & MPOL_F_RELATIVE_NODES) 241 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); 242 else 243 nodes_and(nsc->mask2, *nodes, nsc->mask1); 244 245 if (mpol_store_user_nodemask(pol)) 246 pol->w.user_nodemask = *nodes; 247 else 248 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; 249 250 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); 251 return ret; 252 } 253 254 /* 255 * This function just creates a new policy, does some check and simple 256 * initialization. You must invoke mpol_set_nodemask() to set nodes. 257 */ 258 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, 259 nodemask_t *nodes) 260 { 261 struct mempolicy *policy; 262 263 pr_debug("setting mode %d flags %d nodes[0] %lx\n", 264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); 265 266 if (mode == MPOL_DEFAULT) { 267 if (nodes && !nodes_empty(*nodes)) 268 return ERR_PTR(-EINVAL); 269 return NULL; 270 } 271 VM_BUG_ON(!nodes); 272 273 /* 274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or 275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). 276 * All other modes require a valid pointer to a non-empty nodemask. 277 */ 278 if (mode == MPOL_PREFERRED) { 279 if (nodes_empty(*nodes)) { 280 if (((flags & MPOL_F_STATIC_NODES) || 281 (flags & MPOL_F_RELATIVE_NODES))) 282 return ERR_PTR(-EINVAL); 283 284 mode = MPOL_LOCAL; 285 } 286 } else if (mode == MPOL_LOCAL) { 287 if (!nodes_empty(*nodes) || 288 (flags & MPOL_F_STATIC_NODES) || 289 (flags & MPOL_F_RELATIVE_NODES)) 290 return ERR_PTR(-EINVAL); 291 } else if (nodes_empty(*nodes)) 292 return ERR_PTR(-EINVAL); 293 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); 294 if (!policy) 295 return ERR_PTR(-ENOMEM); 296 atomic_set(&policy->refcnt, 1); 297 policy->mode = mode; 298 policy->flags = flags; 299 300 return policy; 301 } 302 303 /* Slow path of a mpol destructor. */ 304 void __mpol_put(struct mempolicy *p) 305 { 306 if (!atomic_dec_and_test(&p->refcnt)) 307 return; 308 kmem_cache_free(policy_cache, p); 309 } 310 311 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) 312 { 313 } 314 315 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) 316 { 317 nodemask_t tmp; 318 319 if (pol->flags & MPOL_F_STATIC_NODES) 320 nodes_and(tmp, pol->w.user_nodemask, *nodes); 321 else if (pol->flags & MPOL_F_RELATIVE_NODES) 322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); 323 else { 324 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, 325 *nodes); 326 pol->w.cpuset_mems_allowed = *nodes; 327 } 328 329 if (nodes_empty(tmp)) 330 tmp = *nodes; 331 332 pol->nodes = tmp; 333 } 334 335 static void mpol_rebind_preferred(struct mempolicy *pol, 336 const nodemask_t *nodes) 337 { 338 pol->w.cpuset_mems_allowed = *nodes; 339 } 340 341 /* 342 * mpol_rebind_policy - Migrate a policy to a different set of nodes 343 * 344 * Per-vma policies are protected by mmap_lock. Allocations using per-task 345 * policies are protected by task->mems_allowed_seq to prevent a premature 346 * OOM/allocation failure due to parallel nodemask modification. 347 */ 348 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) 349 { 350 if (!pol) 351 return; 352 if (!mpol_store_user_nodemask(pol) && 353 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 354 return; 355 356 mpol_ops[pol->mode].rebind(pol, newmask); 357 } 358 359 /* 360 * Wrapper for mpol_rebind_policy() that just requires task 361 * pointer, and updates task mempolicy. 362 * 363 * Called with task's alloc_lock held. 364 */ 365 366 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) 367 { 368 mpol_rebind_policy(tsk->mempolicy, new); 369 } 370 371 /* 372 * Rebind each vma in mm to new nodemask. 373 * 374 * Call holding a reference to mm. Takes mm->mmap_lock during call. 375 */ 376 377 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 378 { 379 struct vm_area_struct *vma; 380 381 mmap_write_lock(mm); 382 for (vma = mm->mmap; vma; vma = vma->vm_next) 383 mpol_rebind_policy(vma->vm_policy, new); 384 mmap_write_unlock(mm); 385 } 386 387 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { 388 [MPOL_DEFAULT] = { 389 .rebind = mpol_rebind_default, 390 }, 391 [MPOL_INTERLEAVE] = { 392 .create = mpol_new_nodemask, 393 .rebind = mpol_rebind_nodemask, 394 }, 395 [MPOL_PREFERRED] = { 396 .create = mpol_new_preferred, 397 .rebind = mpol_rebind_preferred, 398 }, 399 [MPOL_BIND] = { 400 .create = mpol_new_nodemask, 401 .rebind = mpol_rebind_nodemask, 402 }, 403 [MPOL_LOCAL] = { 404 .rebind = mpol_rebind_default, 405 }, 406 [MPOL_PREFERRED_MANY] = { 407 .create = mpol_new_nodemask, 408 .rebind = mpol_rebind_preferred, 409 }, 410 }; 411 412 static int migrate_page_add(struct page *page, struct list_head *pagelist, 413 unsigned long flags); 414 415 struct queue_pages { 416 struct list_head *pagelist; 417 unsigned long flags; 418 nodemask_t *nmask; 419 unsigned long start; 420 unsigned long end; 421 struct vm_area_struct *first; 422 }; 423 424 /* 425 * Check if the page's nid is in qp->nmask. 426 * 427 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is 428 * in the invert of qp->nmask. 429 */ 430 static inline bool queue_pages_required(struct page *page, 431 struct queue_pages *qp) 432 { 433 int nid = page_to_nid(page); 434 unsigned long flags = qp->flags; 435 436 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 437 } 438 439 /* 440 * queue_pages_pmd() has four possible return values: 441 * 0 - pages are placed on the right node or queued successfully, or 442 * special page is met, i.e. huge zero page. 443 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 444 * specified. 445 * 2 - THP was split. 446 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 447 * existing page was already on a node that does not follow the 448 * policy. 449 */ 450 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 451 unsigned long end, struct mm_walk *walk) 452 __releases(ptl) 453 { 454 int ret = 0; 455 struct page *page; 456 struct queue_pages *qp = walk->private; 457 unsigned long flags; 458 459 if (unlikely(is_pmd_migration_entry(*pmd))) { 460 ret = -EIO; 461 goto unlock; 462 } 463 page = pmd_page(*pmd); 464 if (is_huge_zero_page(page)) { 465 spin_unlock(ptl); 466 walk->action = ACTION_CONTINUE; 467 goto out; 468 } 469 if (!queue_pages_required(page, qp)) 470 goto unlock; 471 472 flags = qp->flags; 473 /* go to thp migration */ 474 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 475 if (!vma_migratable(walk->vma) || 476 migrate_page_add(page, qp->pagelist, flags)) { 477 ret = 1; 478 goto unlock; 479 } 480 } else 481 ret = -EIO; 482 unlock: 483 spin_unlock(ptl); 484 out: 485 return ret; 486 } 487 488 /* 489 * Scan through pages checking if pages follow certain conditions, 490 * and move them to the pagelist if they do. 491 * 492 * queue_pages_pte_range() has three possible return values: 493 * 0 - pages are placed on the right node or queued successfully, or 494 * special page is met, i.e. zero page. 495 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 496 * specified. 497 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already 498 * on a node that does not follow the policy. 499 */ 500 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 501 unsigned long end, struct mm_walk *walk) 502 { 503 struct vm_area_struct *vma = walk->vma; 504 struct page *page; 505 struct queue_pages *qp = walk->private; 506 unsigned long flags = qp->flags; 507 int ret; 508 bool has_unmovable = false; 509 pte_t *pte, *mapped_pte; 510 spinlock_t *ptl; 511 512 ptl = pmd_trans_huge_lock(pmd, vma); 513 if (ptl) { 514 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 515 if (ret != 2) 516 return ret; 517 } 518 /* THP was split, fall through to pte walk */ 519 520 if (pmd_trans_unstable(pmd)) 521 return 0; 522 523 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 524 for (; addr != end; pte++, addr += PAGE_SIZE) { 525 if (!pte_present(*pte)) 526 continue; 527 page = vm_normal_page(vma, addr, *pte); 528 if (!page) 529 continue; 530 /* 531 * vm_normal_page() filters out zero pages, but there might 532 * still be PageReserved pages to skip, perhaps in a VDSO. 533 */ 534 if (PageReserved(page)) 535 continue; 536 if (!queue_pages_required(page, qp)) 537 continue; 538 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 539 /* MPOL_MF_STRICT must be specified if we get here */ 540 if (!vma_migratable(vma)) { 541 has_unmovable = true; 542 break; 543 } 544 545 /* 546 * Do not abort immediately since there may be 547 * temporary off LRU pages in the range. Still 548 * need migrate other LRU pages. 549 */ 550 if (migrate_page_add(page, qp->pagelist, flags)) 551 has_unmovable = true; 552 } else 553 break; 554 } 555 pte_unmap_unlock(mapped_pte, ptl); 556 cond_resched(); 557 558 if (has_unmovable) 559 return 1; 560 561 return addr != end ? -EIO : 0; 562 } 563 564 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 565 unsigned long addr, unsigned long end, 566 struct mm_walk *walk) 567 { 568 int ret = 0; 569 #ifdef CONFIG_HUGETLB_PAGE 570 struct queue_pages *qp = walk->private; 571 unsigned long flags = (qp->flags & MPOL_MF_VALID); 572 struct page *page; 573 spinlock_t *ptl; 574 pte_t entry; 575 576 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 577 entry = huge_ptep_get(pte); 578 if (!pte_present(entry)) 579 goto unlock; 580 page = pte_page(entry); 581 if (!queue_pages_required(page, qp)) 582 goto unlock; 583 584 if (flags == MPOL_MF_STRICT) { 585 /* 586 * STRICT alone means only detecting misplaced page and no 587 * need to further check other vma. 588 */ 589 ret = -EIO; 590 goto unlock; 591 } 592 593 if (!vma_migratable(walk->vma)) { 594 /* 595 * Must be STRICT with MOVE*, otherwise .test_walk() have 596 * stopped walking current vma. 597 * Detecting misplaced page but allow migrating pages which 598 * have been queued. 599 */ 600 ret = 1; 601 goto unlock; 602 } 603 604 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */ 605 if (flags & (MPOL_MF_MOVE_ALL) || 606 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) { 607 if (!isolate_huge_page(page, qp->pagelist) && 608 (flags & MPOL_MF_STRICT)) 609 /* 610 * Failed to isolate page but allow migrating pages 611 * which have been queued. 612 */ 613 ret = 1; 614 } 615 unlock: 616 spin_unlock(ptl); 617 #else 618 BUG(); 619 #endif 620 return ret; 621 } 622 623 #ifdef CONFIG_NUMA_BALANCING 624 /* 625 * This is used to mark a range of virtual addresses to be inaccessible. 626 * These are later cleared by a NUMA hinting fault. Depending on these 627 * faults, pages may be migrated for better NUMA placement. 628 * 629 * This is assuming that NUMA faults are handled using PROT_NONE. If 630 * an architecture makes a different choice, it will need further 631 * changes to the core. 632 */ 633 unsigned long change_prot_numa(struct vm_area_struct *vma, 634 unsigned long addr, unsigned long end) 635 { 636 int nr_updated; 637 638 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); 639 if (nr_updated) 640 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); 641 642 return nr_updated; 643 } 644 #else 645 static unsigned long change_prot_numa(struct vm_area_struct *vma, 646 unsigned long addr, unsigned long end) 647 { 648 return 0; 649 } 650 #endif /* CONFIG_NUMA_BALANCING */ 651 652 static int queue_pages_test_walk(unsigned long start, unsigned long end, 653 struct mm_walk *walk) 654 { 655 struct vm_area_struct *vma = walk->vma; 656 struct queue_pages *qp = walk->private; 657 unsigned long endvma = vma->vm_end; 658 unsigned long flags = qp->flags; 659 660 /* range check first */ 661 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); 662 663 if (!qp->first) { 664 qp->first = vma; 665 if (!(flags & MPOL_MF_DISCONTIG_OK) && 666 (qp->start < vma->vm_start)) 667 /* hole at head side of range */ 668 return -EFAULT; 669 } 670 if (!(flags & MPOL_MF_DISCONTIG_OK) && 671 ((vma->vm_end < qp->end) && 672 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) 673 /* hole at middle or tail of range */ 674 return -EFAULT; 675 676 /* 677 * Need check MPOL_MF_STRICT to return -EIO if possible 678 * regardless of vma_migratable 679 */ 680 if (!vma_migratable(vma) && 681 !(flags & MPOL_MF_STRICT)) 682 return 1; 683 684 if (endvma > end) 685 endvma = end; 686 687 if (flags & MPOL_MF_LAZY) { 688 /* Similar to task_numa_work, skip inaccessible VMAs */ 689 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && 690 !(vma->vm_flags & VM_MIXEDMAP)) 691 change_prot_numa(vma, start, endvma); 692 return 1; 693 } 694 695 /* queue pages from current vma */ 696 if (flags & MPOL_MF_VALID) 697 return 0; 698 return 1; 699 } 700 701 static const struct mm_walk_ops queue_pages_walk_ops = { 702 .hugetlb_entry = queue_pages_hugetlb, 703 .pmd_entry = queue_pages_pte_range, 704 .test_walk = queue_pages_test_walk, 705 }; 706 707 /* 708 * Walk through page tables and collect pages to be migrated. 709 * 710 * If pages found in a given range are on a set of nodes (determined by 711 * @nodes and @flags,) it's isolated and queued to the pagelist which is 712 * passed via @private. 713 * 714 * queue_pages_range() has three possible return values: 715 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were 716 * specified. 717 * 0 - queue pages successfully or no misplaced page. 718 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or 719 * memory range specified by nodemask and maxnode points outside 720 * your accessible address space (-EFAULT) 721 */ 722 static int 723 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 724 nodemask_t *nodes, unsigned long flags, 725 struct list_head *pagelist) 726 { 727 int err; 728 struct queue_pages qp = { 729 .pagelist = pagelist, 730 .flags = flags, 731 .nmask = nodes, 732 .start = start, 733 .end = end, 734 .first = NULL, 735 }; 736 737 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); 738 739 if (!qp.first) 740 /* whole range in hole */ 741 err = -EFAULT; 742 743 return err; 744 } 745 746 /* 747 * Apply policy to a single VMA 748 * This must be called with the mmap_lock held for writing. 749 */ 750 static int vma_replace_policy(struct vm_area_struct *vma, 751 struct mempolicy *pol) 752 { 753 int err; 754 struct mempolicy *old; 755 struct mempolicy *new; 756 757 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", 758 vma->vm_start, vma->vm_end, vma->vm_pgoff, 759 vma->vm_ops, vma->vm_file, 760 vma->vm_ops ? vma->vm_ops->set_policy : NULL); 761 762 new = mpol_dup(pol); 763 if (IS_ERR(new)) 764 return PTR_ERR(new); 765 766 if (vma->vm_ops && vma->vm_ops->set_policy) { 767 err = vma->vm_ops->set_policy(vma, new); 768 if (err) 769 goto err_out; 770 } 771 772 old = vma->vm_policy; 773 vma->vm_policy = new; /* protected by mmap_lock */ 774 mpol_put(old); 775 776 return 0; 777 err_out: 778 mpol_put(new); 779 return err; 780 } 781 782 /* Step 2: apply policy to a range and do splits. */ 783 static int mbind_range(struct mm_struct *mm, unsigned long start, 784 unsigned long end, struct mempolicy *new_pol) 785 { 786 struct vm_area_struct *next; 787 struct vm_area_struct *prev; 788 struct vm_area_struct *vma; 789 int err = 0; 790 pgoff_t pgoff; 791 unsigned long vmstart; 792 unsigned long vmend; 793 794 vma = find_vma(mm, start); 795 VM_BUG_ON(!vma); 796 797 prev = vma->vm_prev; 798 if (start > vma->vm_start) 799 prev = vma; 800 801 for (; vma && vma->vm_start < end; prev = vma, vma = next) { 802 next = vma->vm_next; 803 vmstart = max(start, vma->vm_start); 804 vmend = min(end, vma->vm_end); 805 806 if (mpol_equal(vma_policy(vma), new_pol)) 807 continue; 808 809 pgoff = vma->vm_pgoff + 810 ((vmstart - vma->vm_start) >> PAGE_SHIFT); 811 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 812 vma->anon_vma, vma->vm_file, pgoff, 813 new_pol, vma->vm_userfaultfd_ctx); 814 if (prev) { 815 vma = prev; 816 next = vma->vm_next; 817 if (mpol_equal(vma_policy(vma), new_pol)) 818 continue; 819 /* vma_merge() joined vma && vma->next, case 8 */ 820 goto replace; 821 } 822 if (vma->vm_start != vmstart) { 823 err = split_vma(vma->vm_mm, vma, vmstart, 1); 824 if (err) 825 goto out; 826 } 827 if (vma->vm_end != vmend) { 828 err = split_vma(vma->vm_mm, vma, vmend, 0); 829 if (err) 830 goto out; 831 } 832 replace: 833 err = vma_replace_policy(vma, new_pol); 834 if (err) 835 goto out; 836 } 837 838 out: 839 return err; 840 } 841 842 /* Set the process memory policy */ 843 static long do_set_mempolicy(unsigned short mode, unsigned short flags, 844 nodemask_t *nodes) 845 { 846 struct mempolicy *new, *old; 847 NODEMASK_SCRATCH(scratch); 848 int ret; 849 850 if (!scratch) 851 return -ENOMEM; 852 853 new = mpol_new(mode, flags, nodes); 854 if (IS_ERR(new)) { 855 ret = PTR_ERR(new); 856 goto out; 857 } 858 859 if (flags & MPOL_F_NUMA_BALANCING) { 860 if (new && new->mode == MPOL_BIND) { 861 new->flags |= (MPOL_F_MOF | MPOL_F_MORON); 862 } else { 863 ret = -EINVAL; 864 mpol_put(new); 865 goto out; 866 } 867 } 868 869 ret = mpol_set_nodemask(new, nodes, scratch); 870 if (ret) { 871 mpol_put(new); 872 goto out; 873 } 874 task_lock(current); 875 old = current->mempolicy; 876 current->mempolicy = new; 877 if (new && new->mode == MPOL_INTERLEAVE) 878 current->il_prev = MAX_NUMNODES-1; 879 task_unlock(current); 880 mpol_put(old); 881 ret = 0; 882 out: 883 NODEMASK_SCRATCH_FREE(scratch); 884 return ret; 885 } 886 887 /* 888 * Return nodemask for policy for get_mempolicy() query 889 * 890 * Called with task's alloc_lock held 891 */ 892 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) 893 { 894 nodes_clear(*nodes); 895 if (p == &default_policy) 896 return; 897 898 switch (p->mode) { 899 case MPOL_BIND: 900 case MPOL_INTERLEAVE: 901 case MPOL_PREFERRED: 902 case MPOL_PREFERRED_MANY: 903 *nodes = p->nodes; 904 break; 905 case MPOL_LOCAL: 906 /* return empty node mask for local allocation */ 907 break; 908 default: 909 BUG(); 910 } 911 } 912 913 static int lookup_node(struct mm_struct *mm, unsigned long addr) 914 { 915 struct page *p = NULL; 916 int err; 917 918 int locked = 1; 919 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked); 920 if (err > 0) { 921 err = page_to_nid(p); 922 put_page(p); 923 } 924 if (locked) 925 mmap_read_unlock(mm); 926 return err; 927 } 928 929 /* Retrieve NUMA policy */ 930 static long do_get_mempolicy(int *policy, nodemask_t *nmask, 931 unsigned long addr, unsigned long flags) 932 { 933 int err; 934 struct mm_struct *mm = current->mm; 935 struct vm_area_struct *vma = NULL; 936 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; 937 938 if (flags & 939 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) 940 return -EINVAL; 941 942 if (flags & MPOL_F_MEMS_ALLOWED) { 943 if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) 944 return -EINVAL; 945 *policy = 0; /* just so it's initialized */ 946 task_lock(current); 947 *nmask = cpuset_current_mems_allowed; 948 task_unlock(current); 949 return 0; 950 } 951 952 if (flags & MPOL_F_ADDR) { 953 /* 954 * Do NOT fall back to task policy if the 955 * vma/shared policy at addr is NULL. We 956 * want to return MPOL_DEFAULT in this case. 957 */ 958 mmap_read_lock(mm); 959 vma = vma_lookup(mm, addr); 960 if (!vma) { 961 mmap_read_unlock(mm); 962 return -EFAULT; 963 } 964 if (vma->vm_ops && vma->vm_ops->get_policy) 965 pol = vma->vm_ops->get_policy(vma, addr); 966 else 967 pol = vma->vm_policy; 968 } else if (addr) 969 return -EINVAL; 970 971 if (!pol) 972 pol = &default_policy; /* indicates default behavior */ 973 974 if (flags & MPOL_F_NODE) { 975 if (flags & MPOL_F_ADDR) { 976 /* 977 * Take a refcount on the mpol, lookup_node() 978 * will drop the mmap_lock, so after calling 979 * lookup_node() only "pol" remains valid, "vma" 980 * is stale. 981 */ 982 pol_refcount = pol; 983 vma = NULL; 984 mpol_get(pol); 985 err = lookup_node(mm, addr); 986 if (err < 0) 987 goto out; 988 *policy = err; 989 } else if (pol == current->mempolicy && 990 pol->mode == MPOL_INTERLEAVE) { 991 *policy = next_node_in(current->il_prev, pol->nodes); 992 } else { 993 err = -EINVAL; 994 goto out; 995 } 996 } else { 997 *policy = pol == &default_policy ? MPOL_DEFAULT : 998 pol->mode; 999 /* 1000 * Internal mempolicy flags must be masked off before exposing 1001 * the policy to userspace. 1002 */ 1003 *policy |= (pol->flags & MPOL_MODE_FLAGS); 1004 } 1005 1006 err = 0; 1007 if (nmask) { 1008 if (mpol_store_user_nodemask(pol)) { 1009 *nmask = pol->w.user_nodemask; 1010 } else { 1011 task_lock(current); 1012 get_policy_nodemask(pol, nmask); 1013 task_unlock(current); 1014 } 1015 } 1016 1017 out: 1018 mpol_cond_put(pol); 1019 if (vma) 1020 mmap_read_unlock(mm); 1021 if (pol_refcount) 1022 mpol_put(pol_refcount); 1023 return err; 1024 } 1025 1026 #ifdef CONFIG_MIGRATION 1027 /* 1028 * page migration, thp tail pages can be passed. 1029 */ 1030 static int migrate_page_add(struct page *page, struct list_head *pagelist, 1031 unsigned long flags) 1032 { 1033 struct page *head = compound_head(page); 1034 /* 1035 * Avoid migrating a page that is shared with others. 1036 */ 1037 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { 1038 if (!isolate_lru_page(head)) { 1039 list_add_tail(&head->lru, pagelist); 1040 mod_node_page_state(page_pgdat(head), 1041 NR_ISOLATED_ANON + page_is_file_lru(head), 1042 thp_nr_pages(head)); 1043 } else if (flags & MPOL_MF_STRICT) { 1044 /* 1045 * Non-movable page may reach here. And, there may be 1046 * temporary off LRU pages or non-LRU movable pages. 1047 * Treat them as unmovable pages since they can't be 1048 * isolated, so they can't be moved at the moment. It 1049 * should return -EIO for this case too. 1050 */ 1051 return -EIO; 1052 } 1053 } 1054 1055 return 0; 1056 } 1057 1058 /* 1059 * Migrate pages from one node to a target node. 1060 * Returns error or the number of pages not migrated. 1061 */ 1062 static int migrate_to_node(struct mm_struct *mm, int source, int dest, 1063 int flags) 1064 { 1065 nodemask_t nmask; 1066 LIST_HEAD(pagelist); 1067 int err = 0; 1068 struct migration_target_control mtc = { 1069 .nid = dest, 1070 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 1071 }; 1072 1073 nodes_clear(nmask); 1074 node_set(source, nmask); 1075 1076 /* 1077 * This does not "check" the range but isolates all pages that 1078 * need migration. Between passing in the full user address 1079 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. 1080 */ 1081 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); 1082 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, 1083 flags | MPOL_MF_DISCONTIG_OK, &pagelist); 1084 1085 if (!list_empty(&pagelist)) { 1086 err = migrate_pages(&pagelist, alloc_migration_target, NULL, 1087 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); 1088 if (err) 1089 putback_movable_pages(&pagelist); 1090 } 1091 1092 return err; 1093 } 1094 1095 /* 1096 * Move pages between the two nodesets so as to preserve the physical 1097 * layout as much as possible. 1098 * 1099 * Returns the number of page that could not be moved. 1100 */ 1101 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1102 const nodemask_t *to, int flags) 1103 { 1104 int busy = 0; 1105 int err = 0; 1106 nodemask_t tmp; 1107 1108 lru_cache_disable(); 1109 1110 mmap_read_lock(mm); 1111 1112 /* 1113 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 1114 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 1115 * bit in 'tmp', and return that <source, dest> pair for migration. 1116 * The pair of nodemasks 'to' and 'from' define the map. 1117 * 1118 * If no pair of bits is found that way, fallback to picking some 1119 * pair of 'source' and 'dest' bits that are not the same. If the 1120 * 'source' and 'dest' bits are the same, this represents a node 1121 * that will be migrating to itself, so no pages need move. 1122 * 1123 * If no bits are left in 'tmp', or if all remaining bits left 1124 * in 'tmp' correspond to the same bit in 'to', return false 1125 * (nothing left to migrate). 1126 * 1127 * This lets us pick a pair of nodes to migrate between, such that 1128 * if possible the dest node is not already occupied by some other 1129 * source node, minimizing the risk of overloading the memory on a 1130 * node that would happen if we migrated incoming memory to a node 1131 * before migrating outgoing memory source that same node. 1132 * 1133 * A single scan of tmp is sufficient. As we go, we remember the 1134 * most recent <s, d> pair that moved (s != d). If we find a pair 1135 * that not only moved, but what's better, moved to an empty slot 1136 * (d is not set in tmp), then we break out then, with that pair. 1137 * Otherwise when we finish scanning from_tmp, we at least have the 1138 * most recent <s, d> pair that moved. If we get all the way through 1139 * the scan of tmp without finding any node that moved, much less 1140 * moved to an empty node, then there is nothing left worth migrating. 1141 */ 1142 1143 tmp = *from; 1144 while (!nodes_empty(tmp)) { 1145 int s, d; 1146 int source = NUMA_NO_NODE; 1147 int dest = 0; 1148 1149 for_each_node_mask(s, tmp) { 1150 1151 /* 1152 * do_migrate_pages() tries to maintain the relative 1153 * node relationship of the pages established between 1154 * threads and memory areas. 1155 * 1156 * However if the number of source nodes is not equal to 1157 * the number of destination nodes we can not preserve 1158 * this node relative relationship. In that case, skip 1159 * copying memory from a node that is in the destination 1160 * mask. 1161 * 1162 * Example: [2,3,4] -> [3,4,5] moves everything. 1163 * [0-7] - > [3,4,5] moves only 0,1,2,6,7. 1164 */ 1165 1166 if ((nodes_weight(*from) != nodes_weight(*to)) && 1167 (node_isset(s, *to))) 1168 continue; 1169 1170 d = node_remap(s, *from, *to); 1171 if (s == d) 1172 continue; 1173 1174 source = s; /* Node moved. Memorize */ 1175 dest = d; 1176 1177 /* dest not in remaining from nodes? */ 1178 if (!node_isset(dest, tmp)) 1179 break; 1180 } 1181 if (source == NUMA_NO_NODE) 1182 break; 1183 1184 node_clear(source, tmp); 1185 err = migrate_to_node(mm, source, dest, flags); 1186 if (err > 0) 1187 busy += err; 1188 if (err < 0) 1189 break; 1190 } 1191 mmap_read_unlock(mm); 1192 1193 lru_cache_enable(); 1194 if (err < 0) 1195 return err; 1196 return busy; 1197 1198 } 1199 1200 /* 1201 * Allocate a new page for page migration based on vma policy. 1202 * Start by assuming the page is mapped by the same vma as contains @start. 1203 * Search forward from there, if not. N.B., this assumes that the 1204 * list of pages handed to migrate_pages()--which is how we get here-- 1205 * is in virtual address order. 1206 */ 1207 static struct page *new_page(struct page *page, unsigned long start) 1208 { 1209 struct vm_area_struct *vma; 1210 unsigned long address; 1211 1212 vma = find_vma(current->mm, start); 1213 while (vma) { 1214 address = page_address_in_vma(page, vma); 1215 if (address != -EFAULT) 1216 break; 1217 vma = vma->vm_next; 1218 } 1219 1220 if (PageHuge(page)) { 1221 return alloc_huge_page_vma(page_hstate(compound_head(page)), 1222 vma, address); 1223 } else if (PageTransHuge(page)) { 1224 struct page *thp; 1225 1226 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 1227 HPAGE_PMD_ORDER); 1228 if (!thp) 1229 return NULL; 1230 prep_transhuge_page(thp); 1231 return thp; 1232 } 1233 /* 1234 * if !vma, alloc_page_vma() will use task or system default policy 1235 */ 1236 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, 1237 vma, address); 1238 } 1239 #else 1240 1241 static int migrate_page_add(struct page *page, struct list_head *pagelist, 1242 unsigned long flags) 1243 { 1244 return -EIO; 1245 } 1246 1247 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1248 const nodemask_t *to, int flags) 1249 { 1250 return -ENOSYS; 1251 } 1252 1253 static struct page *new_page(struct page *page, unsigned long start) 1254 { 1255 return NULL; 1256 } 1257 #endif 1258 1259 static long do_mbind(unsigned long start, unsigned long len, 1260 unsigned short mode, unsigned short mode_flags, 1261 nodemask_t *nmask, unsigned long flags) 1262 { 1263 struct mm_struct *mm = current->mm; 1264 struct mempolicy *new; 1265 unsigned long end; 1266 int err; 1267 int ret; 1268 LIST_HEAD(pagelist); 1269 1270 if (flags & ~(unsigned long)MPOL_MF_VALID) 1271 return -EINVAL; 1272 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1273 return -EPERM; 1274 1275 if (start & ~PAGE_MASK) 1276 return -EINVAL; 1277 1278 if (mode == MPOL_DEFAULT) 1279 flags &= ~MPOL_MF_STRICT; 1280 1281 len = (len + PAGE_SIZE - 1) & PAGE_MASK; 1282 end = start + len; 1283 1284 if (end < start) 1285 return -EINVAL; 1286 if (end == start) 1287 return 0; 1288 1289 new = mpol_new(mode, mode_flags, nmask); 1290 if (IS_ERR(new)) 1291 return PTR_ERR(new); 1292 1293 if (flags & MPOL_MF_LAZY) 1294 new->flags |= MPOL_F_MOF; 1295 1296 /* 1297 * If we are using the default policy then operation 1298 * on discontinuous address spaces is okay after all 1299 */ 1300 if (!new) 1301 flags |= MPOL_MF_DISCONTIG_OK; 1302 1303 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", 1304 start, start + len, mode, mode_flags, 1305 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); 1306 1307 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 1308 1309 lru_cache_disable(); 1310 } 1311 { 1312 NODEMASK_SCRATCH(scratch); 1313 if (scratch) { 1314 mmap_write_lock(mm); 1315 err = mpol_set_nodemask(new, nmask, scratch); 1316 if (err) 1317 mmap_write_unlock(mm); 1318 } else 1319 err = -ENOMEM; 1320 NODEMASK_SCRATCH_FREE(scratch); 1321 } 1322 if (err) 1323 goto mpol_out; 1324 1325 ret = queue_pages_range(mm, start, end, nmask, 1326 flags | MPOL_MF_INVERT, &pagelist); 1327 1328 if (ret < 0) { 1329 err = ret; 1330 goto up_out; 1331 } 1332 1333 err = mbind_range(mm, start, end, new); 1334 1335 if (!err) { 1336 int nr_failed = 0; 1337 1338 if (!list_empty(&pagelist)) { 1339 WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1340 nr_failed = migrate_pages(&pagelist, new_page, NULL, 1341 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); 1342 if (nr_failed) 1343 putback_movable_pages(&pagelist); 1344 } 1345 1346 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 1347 err = -EIO; 1348 } else { 1349 up_out: 1350 if (!list_empty(&pagelist)) 1351 putback_movable_pages(&pagelist); 1352 } 1353 1354 mmap_write_unlock(mm); 1355 mpol_out: 1356 mpol_put(new); 1357 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 1358 lru_cache_enable(); 1359 return err; 1360 } 1361 1362 /* 1363 * User space interface with variable sized bitmaps for nodelists. 1364 */ 1365 1366 /* Copy a node mask from user space. */ 1367 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, 1368 unsigned long maxnode) 1369 { 1370 unsigned long k; 1371 unsigned long t; 1372 unsigned long nlongs; 1373 unsigned long endmask; 1374 1375 --maxnode; 1376 nodes_clear(*nodes); 1377 if (maxnode == 0 || !nmask) 1378 return 0; 1379 if (maxnode > PAGE_SIZE*BITS_PER_BYTE) 1380 return -EINVAL; 1381 1382 nlongs = BITS_TO_LONGS(maxnode); 1383 if ((maxnode % BITS_PER_LONG) == 0) 1384 endmask = ~0UL; 1385 else 1386 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; 1387 1388 /* 1389 * When the user specified more nodes than supported just check 1390 * if the non supported part is all zero. 1391 * 1392 * If maxnode have more longs than MAX_NUMNODES, check 1393 * the bits in that area first. And then go through to 1394 * check the rest bits which equal or bigger than MAX_NUMNODES. 1395 * Otherwise, just check bits [MAX_NUMNODES, maxnode). 1396 */ 1397 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { 1398 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { 1399 if (get_user(t, nmask + k)) 1400 return -EFAULT; 1401 if (k == nlongs - 1) { 1402 if (t & endmask) 1403 return -EINVAL; 1404 } else if (t) 1405 return -EINVAL; 1406 } 1407 nlongs = BITS_TO_LONGS(MAX_NUMNODES); 1408 endmask = ~0UL; 1409 } 1410 1411 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) { 1412 unsigned long valid_mask = endmask; 1413 1414 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); 1415 if (get_user(t, nmask + nlongs - 1)) 1416 return -EFAULT; 1417 if (t & valid_mask) 1418 return -EINVAL; 1419 } 1420 1421 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) 1422 return -EFAULT; 1423 nodes_addr(*nodes)[nlongs-1] &= endmask; 1424 return 0; 1425 } 1426 1427 /* Copy a kernel node mask to user space */ 1428 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, 1429 nodemask_t *nodes) 1430 { 1431 unsigned long copy = ALIGN(maxnode-1, 64) / 8; 1432 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); 1433 1434 if (copy > nbytes) { 1435 if (copy > PAGE_SIZE) 1436 return -EINVAL; 1437 if (clear_user((char __user *)mask + nbytes, copy - nbytes)) 1438 return -EFAULT; 1439 copy = nbytes; 1440 } 1441 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; 1442 } 1443 1444 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ 1445 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) 1446 { 1447 *flags = *mode & MPOL_MODE_FLAGS; 1448 *mode &= ~MPOL_MODE_FLAGS; 1449 1450 if ((unsigned int)(*mode) >= MPOL_MAX) 1451 return -EINVAL; 1452 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) 1453 return -EINVAL; 1454 1455 return 0; 1456 } 1457 1458 static long kernel_mbind(unsigned long start, unsigned long len, 1459 unsigned long mode, const unsigned long __user *nmask, 1460 unsigned long maxnode, unsigned int flags) 1461 { 1462 unsigned short mode_flags; 1463 nodemask_t nodes; 1464 int lmode = mode; 1465 int err; 1466 1467 start = untagged_addr(start); 1468 err = sanitize_mpol_flags(&lmode, &mode_flags); 1469 if (err) 1470 return err; 1471 1472 err = get_nodes(&nodes, nmask, maxnode); 1473 if (err) 1474 return err; 1475 1476 return do_mbind(start, len, lmode, mode_flags, &nodes, flags); 1477 } 1478 1479 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, 1480 unsigned long, mode, const unsigned long __user *, nmask, 1481 unsigned long, maxnode, unsigned int, flags) 1482 { 1483 return kernel_mbind(start, len, mode, nmask, maxnode, flags); 1484 } 1485 1486 /* Set the process memory policy */ 1487 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, 1488 unsigned long maxnode) 1489 { 1490 unsigned short mode_flags; 1491 nodemask_t nodes; 1492 int lmode = mode; 1493 int err; 1494 1495 err = sanitize_mpol_flags(&lmode, &mode_flags); 1496 if (err) 1497 return err; 1498 1499 err = get_nodes(&nodes, nmask, maxnode); 1500 if (err) 1501 return err; 1502 1503 return do_set_mempolicy(lmode, mode_flags, &nodes); 1504 } 1505 1506 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, 1507 unsigned long, maxnode) 1508 { 1509 return kernel_set_mempolicy(mode, nmask, maxnode); 1510 } 1511 1512 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, 1513 const unsigned long __user *old_nodes, 1514 const unsigned long __user *new_nodes) 1515 { 1516 struct mm_struct *mm = NULL; 1517 struct task_struct *task; 1518 nodemask_t task_nodes; 1519 int err; 1520 nodemask_t *old; 1521 nodemask_t *new; 1522 NODEMASK_SCRATCH(scratch); 1523 1524 if (!scratch) 1525 return -ENOMEM; 1526 1527 old = &scratch->mask1; 1528 new = &scratch->mask2; 1529 1530 err = get_nodes(old, old_nodes, maxnode); 1531 if (err) 1532 goto out; 1533 1534 err = get_nodes(new, new_nodes, maxnode); 1535 if (err) 1536 goto out; 1537 1538 /* Find the mm_struct */ 1539 rcu_read_lock(); 1540 task = pid ? find_task_by_vpid(pid) : current; 1541 if (!task) { 1542 rcu_read_unlock(); 1543 err = -ESRCH; 1544 goto out; 1545 } 1546 get_task_struct(task); 1547 1548 err = -EINVAL; 1549 1550 /* 1551 * Check if this process has the right to modify the specified process. 1552 * Use the regular "ptrace_may_access()" checks. 1553 */ 1554 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { 1555 rcu_read_unlock(); 1556 err = -EPERM; 1557 goto out_put; 1558 } 1559 rcu_read_unlock(); 1560 1561 task_nodes = cpuset_mems_allowed(task); 1562 /* Is the user allowed to access the target nodes? */ 1563 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { 1564 err = -EPERM; 1565 goto out_put; 1566 } 1567 1568 task_nodes = cpuset_mems_allowed(current); 1569 nodes_and(*new, *new, task_nodes); 1570 if (nodes_empty(*new)) 1571 goto out_put; 1572 1573 err = security_task_movememory(task); 1574 if (err) 1575 goto out_put; 1576 1577 mm = get_task_mm(task); 1578 put_task_struct(task); 1579 1580 if (!mm) { 1581 err = -EINVAL; 1582 goto out; 1583 } 1584 1585 err = do_migrate_pages(mm, old, new, 1586 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); 1587 1588 mmput(mm); 1589 out: 1590 NODEMASK_SCRATCH_FREE(scratch); 1591 1592 return err; 1593 1594 out_put: 1595 put_task_struct(task); 1596 goto out; 1597 1598 } 1599 1600 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, 1601 const unsigned long __user *, old_nodes, 1602 const unsigned long __user *, new_nodes) 1603 { 1604 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); 1605 } 1606 1607 1608 /* Retrieve NUMA policy */ 1609 static int kernel_get_mempolicy(int __user *policy, 1610 unsigned long __user *nmask, 1611 unsigned long maxnode, 1612 unsigned long addr, 1613 unsigned long flags) 1614 { 1615 int err; 1616 int pval; 1617 nodemask_t nodes; 1618 1619 if (nmask != NULL && maxnode < nr_node_ids) 1620 return -EINVAL; 1621 1622 addr = untagged_addr(addr); 1623 1624 err = do_get_mempolicy(&pval, &nodes, addr, flags); 1625 1626 if (err) 1627 return err; 1628 1629 if (policy && put_user(pval, policy)) 1630 return -EFAULT; 1631 1632 if (nmask) 1633 err = copy_nodes_to_user(nmask, maxnode, &nodes); 1634 1635 return err; 1636 } 1637 1638 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1639 unsigned long __user *, nmask, unsigned long, maxnode, 1640 unsigned long, addr, unsigned long, flags) 1641 { 1642 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); 1643 } 1644 1645 #ifdef CONFIG_COMPAT 1646 1647 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, 1648 compat_ulong_t __user *, nmask, 1649 compat_ulong_t, maxnode, 1650 compat_ulong_t, addr, compat_ulong_t, flags) 1651 { 1652 long err; 1653 unsigned long __user *nm = NULL; 1654 unsigned long nr_bits, alloc_size; 1655 DECLARE_BITMAP(bm, MAX_NUMNODES); 1656 1657 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); 1658 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1659 1660 if (nmask) 1661 nm = compat_alloc_user_space(alloc_size); 1662 1663 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 1664 1665 if (!err && nmask) { 1666 unsigned long copy_size; 1667 copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 1668 err = copy_from_user(bm, nm, copy_size); 1669 /* ensure entire bitmap is zeroed */ 1670 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 1671 err |= compat_put_bitmap(nmask, bm, nr_bits); 1672 } 1673 1674 return err; 1675 } 1676 1677 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1678 compat_ulong_t, maxnode) 1679 { 1680 unsigned long __user *nm = NULL; 1681 unsigned long nr_bits, alloc_size; 1682 DECLARE_BITMAP(bm, MAX_NUMNODES); 1683 1684 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1685 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1686 1687 if (nmask) { 1688 if (compat_get_bitmap(bm, nmask, nr_bits)) 1689 return -EFAULT; 1690 nm = compat_alloc_user_space(alloc_size); 1691 if (copy_to_user(nm, bm, alloc_size)) 1692 return -EFAULT; 1693 } 1694 1695 return kernel_set_mempolicy(mode, nm, nr_bits+1); 1696 } 1697 1698 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, 1699 compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1700 compat_ulong_t, maxnode, compat_ulong_t, flags) 1701 { 1702 unsigned long __user *nm = NULL; 1703 unsigned long nr_bits, alloc_size; 1704 nodemask_t bm; 1705 1706 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); 1707 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1708 1709 if (nmask) { 1710 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) 1711 return -EFAULT; 1712 nm = compat_alloc_user_space(alloc_size); 1713 if (copy_to_user(nm, nodes_addr(bm), alloc_size)) 1714 return -EFAULT; 1715 } 1716 1717 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags); 1718 } 1719 1720 COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, 1721 compat_ulong_t, maxnode, 1722 const compat_ulong_t __user *, old_nodes, 1723 const compat_ulong_t __user *, new_nodes) 1724 { 1725 unsigned long __user *old = NULL; 1726 unsigned long __user *new = NULL; 1727 nodemask_t tmp_mask; 1728 unsigned long nr_bits; 1729 unsigned long size; 1730 1731 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); 1732 size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1733 if (old_nodes) { 1734 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) 1735 return -EFAULT; 1736 old = compat_alloc_user_space(new_nodes ? size * 2 : size); 1737 if (new_nodes) 1738 new = old + size / sizeof(unsigned long); 1739 if (copy_to_user(old, nodes_addr(tmp_mask), size)) 1740 return -EFAULT; 1741 } 1742 if (new_nodes) { 1743 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) 1744 return -EFAULT; 1745 if (new == NULL) 1746 new = compat_alloc_user_space(size); 1747 if (copy_to_user(new, nodes_addr(tmp_mask), size)) 1748 return -EFAULT; 1749 } 1750 return kernel_migrate_pages(pid, nr_bits + 1, old, new); 1751 } 1752 1753 #endif /* CONFIG_COMPAT */ 1754 1755 bool vma_migratable(struct vm_area_struct *vma) 1756 { 1757 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1758 return false; 1759 1760 /* 1761 * DAX device mappings require predictable access latency, so avoid 1762 * incurring periodic faults. 1763 */ 1764 if (vma_is_dax(vma)) 1765 return false; 1766 1767 if (is_vm_hugetlb_page(vma) && 1768 !hugepage_migration_supported(hstate_vma(vma))) 1769 return false; 1770 1771 /* 1772 * Migration allocates pages in the highest zone. If we cannot 1773 * do so then migration (at least from node to node) is not 1774 * possible. 1775 */ 1776 if (vma->vm_file && 1777 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 1778 < policy_zone) 1779 return false; 1780 return true; 1781 } 1782 1783 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 1784 unsigned long addr) 1785 { 1786 struct mempolicy *pol = NULL; 1787 1788 if (vma) { 1789 if (vma->vm_ops && vma->vm_ops->get_policy) { 1790 pol = vma->vm_ops->get_policy(vma, addr); 1791 } else if (vma->vm_policy) { 1792 pol = vma->vm_policy; 1793 1794 /* 1795 * shmem_alloc_page() passes MPOL_F_SHARED policy with 1796 * a pseudo vma whose vma->vm_ops=NULL. Take a reference 1797 * count on these policies which will be dropped by 1798 * mpol_cond_put() later 1799 */ 1800 if (mpol_needs_cond_ref(pol)) 1801 mpol_get(pol); 1802 } 1803 } 1804 1805 return pol; 1806 } 1807 1808 /* 1809 * get_vma_policy(@vma, @addr) 1810 * @vma: virtual memory area whose policy is sought 1811 * @addr: address in @vma for shared policy lookup 1812 * 1813 * Returns effective policy for a VMA at specified address. 1814 * Falls back to current->mempolicy or system default policy, as necessary. 1815 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference 1816 * count--added by the get_policy() vm_op, as appropriate--to protect against 1817 * freeing by another task. It is the caller's responsibility to free the 1818 * extra reference for shared policies. 1819 */ 1820 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1821 unsigned long addr) 1822 { 1823 struct mempolicy *pol = __get_vma_policy(vma, addr); 1824 1825 if (!pol) 1826 pol = get_task_policy(current); 1827 1828 return pol; 1829 } 1830 1831 bool vma_policy_mof(struct vm_area_struct *vma) 1832 { 1833 struct mempolicy *pol; 1834 1835 if (vma->vm_ops && vma->vm_ops->get_policy) { 1836 bool ret = false; 1837 1838 pol = vma->vm_ops->get_policy(vma, vma->vm_start); 1839 if (pol && (pol->flags & MPOL_F_MOF)) 1840 ret = true; 1841 mpol_cond_put(pol); 1842 1843 return ret; 1844 } 1845 1846 pol = vma->vm_policy; 1847 if (!pol) 1848 pol = get_task_policy(current); 1849 1850 return pol->flags & MPOL_F_MOF; 1851 } 1852 1853 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) 1854 { 1855 enum zone_type dynamic_policy_zone = policy_zone; 1856 1857 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); 1858 1859 /* 1860 * if policy->nodes has movable memory only, 1861 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. 1862 * 1863 * policy->nodes is intersect with node_states[N_MEMORY]. 1864 * so if the following test fails, it implies 1865 * policy->nodes has movable memory only. 1866 */ 1867 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) 1868 dynamic_policy_zone = ZONE_MOVABLE; 1869 1870 return zone >= dynamic_policy_zone; 1871 } 1872 1873 /* 1874 * Return a nodemask representing a mempolicy for filtering nodes for 1875 * page allocation 1876 */ 1877 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) 1878 { 1879 int mode = policy->mode; 1880 1881 /* Lower zones don't get a nodemask applied for MPOL_BIND */ 1882 if (unlikely(mode == MPOL_BIND) && 1883 apply_policy_zone(policy, gfp_zone(gfp)) && 1884 cpuset_nodemask_valid_mems_allowed(&policy->nodes)) 1885 return &policy->nodes; 1886 1887 if (mode == MPOL_PREFERRED_MANY) 1888 return &policy->nodes; 1889 1890 return NULL; 1891 } 1892 1893 /* 1894 * Return the preferred node id for 'prefer' mempolicy, and return 1895 * the given id for all other policies. 1896 * 1897 * policy_node() is always coupled with policy_nodemask(), which 1898 * secures the nodemask limit for 'bind' and 'prefer-many' policy. 1899 */ 1900 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) 1901 { 1902 if (policy->mode == MPOL_PREFERRED) { 1903 nd = first_node(policy->nodes); 1904 } else { 1905 /* 1906 * __GFP_THISNODE shouldn't even be used with the bind policy 1907 * because we might easily break the expectation to stay on the 1908 * requested node and not break the policy. 1909 */ 1910 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); 1911 } 1912 1913 return nd; 1914 } 1915 1916 /* Do dynamic interleaving for a process */ 1917 static unsigned interleave_nodes(struct mempolicy *policy) 1918 { 1919 unsigned next; 1920 struct task_struct *me = current; 1921 1922 next = next_node_in(me->il_prev, policy->nodes); 1923 if (next < MAX_NUMNODES) 1924 me->il_prev = next; 1925 return next; 1926 } 1927 1928 /* 1929 * Depending on the memory policy provide a node from which to allocate the 1930 * next slab entry. 1931 */ 1932 unsigned int mempolicy_slab_node(void) 1933 { 1934 struct mempolicy *policy; 1935 int node = numa_mem_id(); 1936 1937 if (!in_task()) 1938 return node; 1939 1940 policy = current->mempolicy; 1941 if (!policy) 1942 return node; 1943 1944 switch (policy->mode) { 1945 case MPOL_PREFERRED: 1946 return first_node(policy->nodes); 1947 1948 case MPOL_INTERLEAVE: 1949 return interleave_nodes(policy); 1950 1951 case MPOL_BIND: 1952 case MPOL_PREFERRED_MANY: 1953 { 1954 struct zoneref *z; 1955 1956 /* 1957 * Follow bind policy behavior and start allocation at the 1958 * first node. 1959 */ 1960 struct zonelist *zonelist; 1961 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); 1962 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1963 z = first_zones_zonelist(zonelist, highest_zoneidx, 1964 &policy->nodes); 1965 return z->zone ? zone_to_nid(z->zone) : node; 1966 } 1967 case MPOL_LOCAL: 1968 return node; 1969 1970 default: 1971 BUG(); 1972 } 1973 } 1974 1975 /* 1976 * Do static interleaving for a VMA with known offset @n. Returns the n'th 1977 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the 1978 * number of present nodes. 1979 */ 1980 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) 1981 { 1982 unsigned nnodes = nodes_weight(pol->nodes); 1983 unsigned target; 1984 int i; 1985 int nid; 1986 1987 if (!nnodes) 1988 return numa_node_id(); 1989 target = (unsigned int)n % nnodes; 1990 nid = first_node(pol->nodes); 1991 for (i = 0; i < target; i++) 1992 nid = next_node(nid, pol->nodes); 1993 return nid; 1994 } 1995 1996 /* Determine a node number for interleave */ 1997 static inline unsigned interleave_nid(struct mempolicy *pol, 1998 struct vm_area_struct *vma, unsigned long addr, int shift) 1999 { 2000 if (vma) { 2001 unsigned long off; 2002 2003 /* 2004 * for small pages, there is no difference between 2005 * shift and PAGE_SHIFT, so the bit-shift is safe. 2006 * for huge pages, since vm_pgoff is in units of small 2007 * pages, we need to shift off the always 0 bits to get 2008 * a useful offset. 2009 */ 2010 BUG_ON(shift < PAGE_SHIFT); 2011 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); 2012 off += (addr - vma->vm_start) >> shift; 2013 return offset_il_node(pol, off); 2014 } else 2015 return interleave_nodes(pol); 2016 } 2017 2018 #ifdef CONFIG_HUGETLBFS 2019 /* 2020 * huge_node(@vma, @addr, @gfp_flags, @mpol) 2021 * @vma: virtual memory area whose policy is sought 2022 * @addr: address in @vma for shared policy lookup and interleave policy 2023 * @gfp_flags: for requested zone 2024 * @mpol: pointer to mempolicy pointer for reference counted mempolicy 2025 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy 2026 * 2027 * Returns a nid suitable for a huge page allocation and a pointer 2028 * to the struct mempolicy for conditional unref after allocation. 2029 * If the effective policy is 'bind' or 'prefer-many', returns a pointer 2030 * to the mempolicy's @nodemask for filtering the zonelist. 2031 * 2032 * Must be protected by read_mems_allowed_begin() 2033 */ 2034 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, 2035 struct mempolicy **mpol, nodemask_t **nodemask) 2036 { 2037 int nid; 2038 int mode; 2039 2040 *mpol = get_vma_policy(vma, addr); 2041 *nodemask = NULL; 2042 mode = (*mpol)->mode; 2043 2044 if (unlikely(mode == MPOL_INTERLEAVE)) { 2045 nid = interleave_nid(*mpol, vma, addr, 2046 huge_page_shift(hstate_vma(vma))); 2047 } else { 2048 nid = policy_node(gfp_flags, *mpol, numa_node_id()); 2049 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) 2050 *nodemask = &(*mpol)->nodes; 2051 } 2052 return nid; 2053 } 2054 2055 /* 2056 * init_nodemask_of_mempolicy 2057 * 2058 * If the current task's mempolicy is "default" [NULL], return 'false' 2059 * to indicate default policy. Otherwise, extract the policy nodemask 2060 * for 'bind' or 'interleave' policy into the argument nodemask, or 2061 * initialize the argument nodemask to contain the single node for 2062 * 'preferred' or 'local' policy and return 'true' to indicate presence 2063 * of non-default mempolicy. 2064 * 2065 * We don't bother with reference counting the mempolicy [mpol_get/put] 2066 * because the current task is examining it's own mempolicy and a task's 2067 * mempolicy is only ever changed by the task itself. 2068 * 2069 * N.B., it is the caller's responsibility to free a returned nodemask. 2070 */ 2071 bool init_nodemask_of_mempolicy(nodemask_t *mask) 2072 { 2073 struct mempolicy *mempolicy; 2074 2075 if (!(mask && current->mempolicy)) 2076 return false; 2077 2078 task_lock(current); 2079 mempolicy = current->mempolicy; 2080 switch (mempolicy->mode) { 2081 case MPOL_PREFERRED: 2082 case MPOL_PREFERRED_MANY: 2083 case MPOL_BIND: 2084 case MPOL_INTERLEAVE: 2085 *mask = mempolicy->nodes; 2086 break; 2087 2088 case MPOL_LOCAL: 2089 init_nodemask_of_node(mask, numa_node_id()); 2090 break; 2091 2092 default: 2093 BUG(); 2094 } 2095 task_unlock(current); 2096 2097 return true; 2098 } 2099 #endif 2100 2101 /* 2102 * mempolicy_in_oom_domain 2103 * 2104 * If tsk's mempolicy is "bind", check for intersection between mask and 2105 * the policy nodemask. Otherwise, return true for all other policies 2106 * including "interleave", as a tsk with "interleave" policy may have 2107 * memory allocated from all nodes in system. 2108 * 2109 * Takes task_lock(tsk) to prevent freeing of its mempolicy. 2110 */ 2111 bool mempolicy_in_oom_domain(struct task_struct *tsk, 2112 const nodemask_t *mask) 2113 { 2114 struct mempolicy *mempolicy; 2115 bool ret = true; 2116 2117 if (!mask) 2118 return ret; 2119 2120 task_lock(tsk); 2121 mempolicy = tsk->mempolicy; 2122 if (mempolicy && mempolicy->mode == MPOL_BIND) 2123 ret = nodes_intersects(mempolicy->nodes, *mask); 2124 task_unlock(tsk); 2125 2126 return ret; 2127 } 2128 2129 /* Allocate a page in interleaved policy. 2130 Own path because it needs to do special accounting. */ 2131 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 2132 unsigned nid) 2133 { 2134 struct page *page; 2135 2136 page = __alloc_pages(gfp, order, nid, NULL); 2137 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ 2138 if (!static_branch_likely(&vm_numa_stat_key)) 2139 return page; 2140 if (page && page_to_nid(page) == nid) { 2141 preempt_disable(); 2142 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); 2143 preempt_enable(); 2144 } 2145 return page; 2146 } 2147 2148 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, 2149 int nid, struct mempolicy *pol) 2150 { 2151 struct page *page; 2152 gfp_t preferred_gfp; 2153 2154 /* 2155 * This is a two pass approach. The first pass will only try the 2156 * preferred nodes but skip the direct reclaim and allow the 2157 * allocation to fail, while the second pass will try all the 2158 * nodes in system. 2159 */ 2160 preferred_gfp = gfp | __GFP_NOWARN; 2161 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2162 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); 2163 if (!page) 2164 page = __alloc_pages(gfp, order, numa_node_id(), NULL); 2165 2166 return page; 2167 } 2168 2169 /** 2170 * alloc_pages_vma - Allocate a page for a VMA. 2171 * @gfp: GFP flags. 2172 * @order: Order of the GFP allocation. 2173 * @vma: Pointer to VMA or NULL if not available. 2174 * @addr: Virtual address of the allocation. Must be inside @vma. 2175 * @node: Which node to prefer for allocation (modulo policy). 2176 * @hugepage: For hugepages try only the preferred node if possible. 2177 * 2178 * Allocate a page for a specific address in @vma, using the appropriate 2179 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock 2180 * of the mm_struct of the VMA to prevent it from going away. Should be 2181 * used for all allocations for pages that will be mapped into user space. 2182 * 2183 * Return: The page on success or NULL if allocation fails. 2184 */ 2185 struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 2186 unsigned long addr, int node, bool hugepage) 2187 { 2188 struct mempolicy *pol; 2189 struct page *page; 2190 int preferred_nid; 2191 nodemask_t *nmask; 2192 2193 pol = get_vma_policy(vma, addr); 2194 2195 if (pol->mode == MPOL_INTERLEAVE) { 2196 unsigned nid; 2197 2198 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 2199 mpol_cond_put(pol); 2200 page = alloc_page_interleave(gfp, order, nid); 2201 goto out; 2202 } 2203 2204 if (pol->mode == MPOL_PREFERRED_MANY) { 2205 page = alloc_pages_preferred_many(gfp, order, node, pol); 2206 mpol_cond_put(pol); 2207 goto out; 2208 } 2209 2210 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { 2211 int hpage_node = node; 2212 2213 /* 2214 * For hugepage allocation and non-interleave policy which 2215 * allows the current node (or other explicitly preferred 2216 * node) we only try to allocate from the current/preferred 2217 * node and don't fall back to other nodes, as the cost of 2218 * remote accesses would likely offset THP benefits. 2219 * 2220 * If the policy is interleave or does not allow the current 2221 * node in its nodemask, we allocate the standard way. 2222 */ 2223 if (pol->mode == MPOL_PREFERRED) 2224 hpage_node = first_node(pol->nodes); 2225 2226 nmask = policy_nodemask(gfp, pol); 2227 if (!nmask || node_isset(hpage_node, *nmask)) { 2228 mpol_cond_put(pol); 2229 /* 2230 * First, try to allocate THP only on local node, but 2231 * don't reclaim unnecessarily, just compact. 2232 */ 2233 page = __alloc_pages_node(hpage_node, 2234 gfp | __GFP_THISNODE | __GFP_NORETRY, order); 2235 2236 /* 2237 * If hugepage allocations are configured to always 2238 * synchronous compact or the vma has been madvised 2239 * to prefer hugepage backing, retry allowing remote 2240 * memory with both reclaim and compact as well. 2241 */ 2242 if (!page && (gfp & __GFP_DIRECT_RECLAIM)) 2243 page = __alloc_pages_node(hpage_node, 2244 gfp, order); 2245 2246 goto out; 2247 } 2248 } 2249 2250 nmask = policy_nodemask(gfp, pol); 2251 preferred_nid = policy_node(gfp, pol, node); 2252 page = __alloc_pages(gfp, order, preferred_nid, nmask); 2253 mpol_cond_put(pol); 2254 out: 2255 return page; 2256 } 2257 EXPORT_SYMBOL(alloc_pages_vma); 2258 2259 /** 2260 * alloc_pages - Allocate pages. 2261 * @gfp: GFP flags. 2262 * @order: Power of two of number of pages to allocate. 2263 * 2264 * Allocate 1 << @order contiguous pages. The physical address of the 2265 * first page is naturally aligned (eg an order-3 allocation will be aligned 2266 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current 2267 * process is honoured when in process context. 2268 * 2269 * Context: Can be called from any context, providing the appropriate GFP 2270 * flags are used. 2271 * Return: The page on success or NULL if allocation fails. 2272 */ 2273 struct page *alloc_pages(gfp_t gfp, unsigned order) 2274 { 2275 struct mempolicy *pol = &default_policy; 2276 struct page *page; 2277 2278 if (!in_interrupt() && !(gfp & __GFP_THISNODE)) 2279 pol = get_task_policy(current); 2280 2281 /* 2282 * No reference counting needed for current->mempolicy 2283 * nor system default_policy 2284 */ 2285 if (pol->mode == MPOL_INTERLEAVE) 2286 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 2287 else if (pol->mode == MPOL_PREFERRED_MANY) 2288 page = alloc_pages_preferred_many(gfp, order, 2289 numa_node_id(), pol); 2290 else 2291 page = __alloc_pages(gfp, order, 2292 policy_node(gfp, pol, numa_node_id()), 2293 policy_nodemask(gfp, pol)); 2294 2295 return page; 2296 } 2297 EXPORT_SYMBOL(alloc_pages); 2298 2299 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 2300 { 2301 struct mempolicy *pol = mpol_dup(vma_policy(src)); 2302 2303 if (IS_ERR(pol)) 2304 return PTR_ERR(pol); 2305 dst->vm_policy = pol; 2306 return 0; 2307 } 2308 2309 /* 2310 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it 2311 * rebinds the mempolicy its copying by calling mpol_rebind_policy() 2312 * with the mems_allowed returned by cpuset_mems_allowed(). This 2313 * keeps mempolicies cpuset relative after its cpuset moves. See 2314 * further kernel/cpuset.c update_nodemask(). 2315 * 2316 * current's mempolicy may be rebinded by the other task(the task that changes 2317 * cpuset's mems), so we needn't do rebind work for current task. 2318 */ 2319 2320 /* Slow path of a mempolicy duplicate */ 2321 struct mempolicy *__mpol_dup(struct mempolicy *old) 2322 { 2323 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2324 2325 if (!new) 2326 return ERR_PTR(-ENOMEM); 2327 2328 /* task's mempolicy is protected by alloc_lock */ 2329 if (old == current->mempolicy) { 2330 task_lock(current); 2331 *new = *old; 2332 task_unlock(current); 2333 } else 2334 *new = *old; 2335 2336 if (current_cpuset_is_being_rebound()) { 2337 nodemask_t mems = cpuset_mems_allowed(current); 2338 mpol_rebind_policy(new, &mems); 2339 } 2340 atomic_set(&new->refcnt, 1); 2341 return new; 2342 } 2343 2344 /* Slow path of a mempolicy comparison */ 2345 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) 2346 { 2347 if (!a || !b) 2348 return false; 2349 if (a->mode != b->mode) 2350 return false; 2351 if (a->flags != b->flags) 2352 return false; 2353 if (mpol_store_user_nodemask(a)) 2354 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) 2355 return false; 2356 2357 switch (a->mode) { 2358 case MPOL_BIND: 2359 case MPOL_INTERLEAVE: 2360 case MPOL_PREFERRED: 2361 case MPOL_PREFERRED_MANY: 2362 return !!nodes_equal(a->nodes, b->nodes); 2363 case MPOL_LOCAL: 2364 return true; 2365 default: 2366 BUG(); 2367 return false; 2368 } 2369 } 2370 2371 /* 2372 * Shared memory backing store policy support. 2373 * 2374 * Remember policies even when nobody has shared memory mapped. 2375 * The policies are kept in Red-Black tree linked from the inode. 2376 * They are protected by the sp->lock rwlock, which should be held 2377 * for any accesses to the tree. 2378 */ 2379 2380 /* 2381 * lookup first element intersecting start-end. Caller holds sp->lock for 2382 * reading or for writing 2383 */ 2384 static struct sp_node * 2385 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) 2386 { 2387 struct rb_node *n = sp->root.rb_node; 2388 2389 while (n) { 2390 struct sp_node *p = rb_entry(n, struct sp_node, nd); 2391 2392 if (start >= p->end) 2393 n = n->rb_right; 2394 else if (end <= p->start) 2395 n = n->rb_left; 2396 else 2397 break; 2398 } 2399 if (!n) 2400 return NULL; 2401 for (;;) { 2402 struct sp_node *w = NULL; 2403 struct rb_node *prev = rb_prev(n); 2404 if (!prev) 2405 break; 2406 w = rb_entry(prev, struct sp_node, nd); 2407 if (w->end <= start) 2408 break; 2409 n = prev; 2410 } 2411 return rb_entry(n, struct sp_node, nd); 2412 } 2413 2414 /* 2415 * Insert a new shared policy into the list. Caller holds sp->lock for 2416 * writing. 2417 */ 2418 static void sp_insert(struct shared_policy *sp, struct sp_node *new) 2419 { 2420 struct rb_node **p = &sp->root.rb_node; 2421 struct rb_node *parent = NULL; 2422 struct sp_node *nd; 2423 2424 while (*p) { 2425 parent = *p; 2426 nd = rb_entry(parent, struct sp_node, nd); 2427 if (new->start < nd->start) 2428 p = &(*p)->rb_left; 2429 else if (new->end > nd->end) 2430 p = &(*p)->rb_right; 2431 else 2432 BUG(); 2433 } 2434 rb_link_node(&new->nd, parent, p); 2435 rb_insert_color(&new->nd, &sp->root); 2436 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, 2437 new->policy ? new->policy->mode : 0); 2438 } 2439 2440 /* Find shared policy intersecting idx */ 2441 struct mempolicy * 2442 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 2443 { 2444 struct mempolicy *pol = NULL; 2445 struct sp_node *sn; 2446 2447 if (!sp->root.rb_node) 2448 return NULL; 2449 read_lock(&sp->lock); 2450 sn = sp_lookup(sp, idx, idx+1); 2451 if (sn) { 2452 mpol_get(sn->policy); 2453 pol = sn->policy; 2454 } 2455 read_unlock(&sp->lock); 2456 return pol; 2457 } 2458 2459 static void sp_free(struct sp_node *n) 2460 { 2461 mpol_put(n->policy); 2462 kmem_cache_free(sn_cache, n); 2463 } 2464 2465 /** 2466 * mpol_misplaced - check whether current page node is valid in policy 2467 * 2468 * @page: page to be checked 2469 * @vma: vm area where page mapped 2470 * @addr: virtual address where page mapped 2471 * 2472 * Lookup current policy node id for vma,addr and "compare to" page's 2473 * node id. Policy determination "mimics" alloc_page_vma(). 2474 * Called from fault path where we know the vma and faulting address. 2475 * 2476 * Return: NUMA_NO_NODE if the page is in a node that is valid for this 2477 * policy, or a suitable node ID to allocate a replacement page from. 2478 */ 2479 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) 2480 { 2481 struct mempolicy *pol; 2482 struct zoneref *z; 2483 int curnid = page_to_nid(page); 2484 unsigned long pgoff; 2485 int thiscpu = raw_smp_processor_id(); 2486 int thisnid = cpu_to_node(thiscpu); 2487 int polnid = NUMA_NO_NODE; 2488 int ret = NUMA_NO_NODE; 2489 2490 pol = get_vma_policy(vma, addr); 2491 if (!(pol->flags & MPOL_F_MOF)) 2492 goto out; 2493 2494 switch (pol->mode) { 2495 case MPOL_INTERLEAVE: 2496 pgoff = vma->vm_pgoff; 2497 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; 2498 polnid = offset_il_node(pol, pgoff); 2499 break; 2500 2501 case MPOL_PREFERRED: 2502 if (node_isset(curnid, pol->nodes)) 2503 goto out; 2504 polnid = first_node(pol->nodes); 2505 break; 2506 2507 case MPOL_LOCAL: 2508 polnid = numa_node_id(); 2509 break; 2510 2511 case MPOL_BIND: 2512 /* Optimize placement among multiple nodes via NUMA balancing */ 2513 if (pol->flags & MPOL_F_MORON) { 2514 if (node_isset(thisnid, pol->nodes)) 2515 break; 2516 goto out; 2517 } 2518 fallthrough; 2519 2520 case MPOL_PREFERRED_MANY: 2521 /* 2522 * use current page if in policy nodemask, 2523 * else select nearest allowed node, if any. 2524 * If no allowed nodes, use current [!misplaced]. 2525 */ 2526 if (node_isset(curnid, pol->nodes)) 2527 goto out; 2528 z = first_zones_zonelist( 2529 node_zonelist(numa_node_id(), GFP_HIGHUSER), 2530 gfp_zone(GFP_HIGHUSER), 2531 &pol->nodes); 2532 polnid = zone_to_nid(z->zone); 2533 break; 2534 2535 default: 2536 BUG(); 2537 } 2538 2539 /* Migrate the page towards the node whose CPU is referencing it */ 2540 if (pol->flags & MPOL_F_MORON) { 2541 polnid = thisnid; 2542 2543 if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) 2544 goto out; 2545 } 2546 2547 if (curnid != polnid) 2548 ret = polnid; 2549 out: 2550 mpol_cond_put(pol); 2551 2552 return ret; 2553 } 2554 2555 /* 2556 * Drop the (possibly final) reference to task->mempolicy. It needs to be 2557 * dropped after task->mempolicy is set to NULL so that any allocation done as 2558 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed 2559 * policy. 2560 */ 2561 void mpol_put_task_policy(struct task_struct *task) 2562 { 2563 struct mempolicy *pol; 2564 2565 task_lock(task); 2566 pol = task->mempolicy; 2567 task->mempolicy = NULL; 2568 task_unlock(task); 2569 mpol_put(pol); 2570 } 2571 2572 static void sp_delete(struct shared_policy *sp, struct sp_node *n) 2573 { 2574 pr_debug("deleting %lx-l%lx\n", n->start, n->end); 2575 rb_erase(&n->nd, &sp->root); 2576 sp_free(n); 2577 } 2578 2579 static void sp_node_init(struct sp_node *node, unsigned long start, 2580 unsigned long end, struct mempolicy *pol) 2581 { 2582 node->start = start; 2583 node->end = end; 2584 node->policy = pol; 2585 } 2586 2587 static struct sp_node *sp_alloc(unsigned long start, unsigned long end, 2588 struct mempolicy *pol) 2589 { 2590 struct sp_node *n; 2591 struct mempolicy *newpol; 2592 2593 n = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2594 if (!n) 2595 return NULL; 2596 2597 newpol = mpol_dup(pol); 2598 if (IS_ERR(newpol)) { 2599 kmem_cache_free(sn_cache, n); 2600 return NULL; 2601 } 2602 newpol->flags |= MPOL_F_SHARED; 2603 sp_node_init(n, start, end, newpol); 2604 2605 return n; 2606 } 2607 2608 /* Replace a policy range. */ 2609 static int shared_policy_replace(struct shared_policy *sp, unsigned long start, 2610 unsigned long end, struct sp_node *new) 2611 { 2612 struct sp_node *n; 2613 struct sp_node *n_new = NULL; 2614 struct mempolicy *mpol_new = NULL; 2615 int ret = 0; 2616 2617 restart: 2618 write_lock(&sp->lock); 2619 n = sp_lookup(sp, start, end); 2620 /* Take care of old policies in the same range. */ 2621 while (n && n->start < end) { 2622 struct rb_node *next = rb_next(&n->nd); 2623 if (n->start >= start) { 2624 if (n->end <= end) 2625 sp_delete(sp, n); 2626 else 2627 n->start = end; 2628 } else { 2629 /* Old policy spanning whole new range. */ 2630 if (n->end > end) { 2631 if (!n_new) 2632 goto alloc_new; 2633 2634 *mpol_new = *n->policy; 2635 atomic_set(&mpol_new->refcnt, 1); 2636 sp_node_init(n_new, end, n->end, mpol_new); 2637 n->end = start; 2638 sp_insert(sp, n_new); 2639 n_new = NULL; 2640 mpol_new = NULL; 2641 break; 2642 } else 2643 n->end = start; 2644 } 2645 if (!next) 2646 break; 2647 n = rb_entry(next, struct sp_node, nd); 2648 } 2649 if (new) 2650 sp_insert(sp, new); 2651 write_unlock(&sp->lock); 2652 ret = 0; 2653 2654 err_out: 2655 if (mpol_new) 2656 mpol_put(mpol_new); 2657 if (n_new) 2658 kmem_cache_free(sn_cache, n_new); 2659 2660 return ret; 2661 2662 alloc_new: 2663 write_unlock(&sp->lock); 2664 ret = -ENOMEM; 2665 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); 2666 if (!n_new) 2667 goto err_out; 2668 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); 2669 if (!mpol_new) 2670 goto err_out; 2671 goto restart; 2672 } 2673 2674 /** 2675 * mpol_shared_policy_init - initialize shared policy for inode 2676 * @sp: pointer to inode shared policy 2677 * @mpol: struct mempolicy to install 2678 * 2679 * Install non-NULL @mpol in inode's shared policy rb-tree. 2680 * On entry, the current task has a reference on a non-NULL @mpol. 2681 * This must be released on exit. 2682 * This is called at get_inode() calls and we can use GFP_KERNEL. 2683 */ 2684 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) 2685 { 2686 int ret; 2687 2688 sp->root = RB_ROOT; /* empty tree == default mempolicy */ 2689 rwlock_init(&sp->lock); 2690 2691 if (mpol) { 2692 struct vm_area_struct pvma; 2693 struct mempolicy *new; 2694 NODEMASK_SCRATCH(scratch); 2695 2696 if (!scratch) 2697 goto put_mpol; 2698 /* contextualize the tmpfs mount point mempolicy */ 2699 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2700 if (IS_ERR(new)) 2701 goto free_scratch; /* no valid nodemask intersection */ 2702 2703 task_lock(current); 2704 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 2705 task_unlock(current); 2706 if (ret) 2707 goto put_new; 2708 2709 /* Create pseudo-vma that contains just the policy */ 2710 vma_init(&pvma, NULL); 2711 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2712 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2713 2714 put_new: 2715 mpol_put(new); /* drop initial ref */ 2716 free_scratch: 2717 NODEMASK_SCRATCH_FREE(scratch); 2718 put_mpol: 2719 mpol_put(mpol); /* drop our incoming ref on sb mpol */ 2720 } 2721 } 2722 2723 int mpol_set_shared_policy(struct shared_policy *info, 2724 struct vm_area_struct *vma, struct mempolicy *npol) 2725 { 2726 int err; 2727 struct sp_node *new = NULL; 2728 unsigned long sz = vma_pages(vma); 2729 2730 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", 2731 vma->vm_pgoff, 2732 sz, npol ? npol->mode : -1, 2733 npol ? npol->flags : -1, 2734 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); 2735 2736 if (npol) { 2737 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); 2738 if (!new) 2739 return -ENOMEM; 2740 } 2741 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); 2742 if (err && new) 2743 sp_free(new); 2744 return err; 2745 } 2746 2747 /* Free a backing policy store on inode delete. */ 2748 void mpol_free_shared_policy(struct shared_policy *p) 2749 { 2750 struct sp_node *n; 2751 struct rb_node *next; 2752 2753 if (!p->root.rb_node) 2754 return; 2755 write_lock(&p->lock); 2756 next = rb_first(&p->root); 2757 while (next) { 2758 n = rb_entry(next, struct sp_node, nd); 2759 next = rb_next(&n->nd); 2760 sp_delete(p, n); 2761 } 2762 write_unlock(&p->lock); 2763 } 2764 2765 #ifdef CONFIG_NUMA_BALANCING 2766 static int __initdata numabalancing_override; 2767 2768 static void __init check_numabalancing_enable(void) 2769 { 2770 bool numabalancing_default = false; 2771 2772 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) 2773 numabalancing_default = true; 2774 2775 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ 2776 if (numabalancing_override) 2777 set_numabalancing_state(numabalancing_override == 1); 2778 2779 if (num_online_nodes() > 1 && !numabalancing_override) { 2780 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", 2781 numabalancing_default ? "Enabling" : "Disabling"); 2782 set_numabalancing_state(numabalancing_default); 2783 } 2784 } 2785 2786 static int __init setup_numabalancing(char *str) 2787 { 2788 int ret = 0; 2789 if (!str) 2790 goto out; 2791 2792 if (!strcmp(str, "enable")) { 2793 numabalancing_override = 1; 2794 ret = 1; 2795 } else if (!strcmp(str, "disable")) { 2796 numabalancing_override = -1; 2797 ret = 1; 2798 } 2799 out: 2800 if (!ret) 2801 pr_warn("Unable to parse numa_balancing=\n"); 2802 2803 return ret; 2804 } 2805 __setup("numa_balancing=", setup_numabalancing); 2806 #else 2807 static inline void __init check_numabalancing_enable(void) 2808 { 2809 } 2810 #endif /* CONFIG_NUMA_BALANCING */ 2811 2812 /* assumes fs == KERNEL_DS */ 2813 void __init numa_policy_init(void) 2814 { 2815 nodemask_t interleave_nodes; 2816 unsigned long largest = 0; 2817 int nid, prefer = 0; 2818 2819 policy_cache = kmem_cache_create("numa_policy", 2820 sizeof(struct mempolicy), 2821 0, SLAB_PANIC, NULL); 2822 2823 sn_cache = kmem_cache_create("shared_policy_node", 2824 sizeof(struct sp_node), 2825 0, SLAB_PANIC, NULL); 2826 2827 for_each_node(nid) { 2828 preferred_node_policy[nid] = (struct mempolicy) { 2829 .refcnt = ATOMIC_INIT(1), 2830 .mode = MPOL_PREFERRED, 2831 .flags = MPOL_F_MOF | MPOL_F_MORON, 2832 .nodes = nodemask_of_node(nid), 2833 }; 2834 } 2835 2836 /* 2837 * Set interleaving policy for system init. Interleaving is only 2838 * enabled across suitably sized nodes (default is >= 16MB), or 2839 * fall back to the largest node if they're all smaller. 2840 */ 2841 nodes_clear(interleave_nodes); 2842 for_each_node_state(nid, N_MEMORY) { 2843 unsigned long total_pages = node_present_pages(nid); 2844 2845 /* Preserve the largest node */ 2846 if (largest < total_pages) { 2847 largest = total_pages; 2848 prefer = nid; 2849 } 2850 2851 /* Interleave this node? */ 2852 if ((total_pages << PAGE_SHIFT) >= (16 << 20)) 2853 node_set(nid, interleave_nodes); 2854 } 2855 2856 /* All too small, use the largest */ 2857 if (unlikely(nodes_empty(interleave_nodes))) 2858 node_set(prefer, interleave_nodes); 2859 2860 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) 2861 pr_err("%s: interleaving failed\n", __func__); 2862 2863 check_numabalancing_enable(); 2864 } 2865 2866 /* Reset policy of current process to default */ 2867 void numa_default_policy(void) 2868 { 2869 do_set_mempolicy(MPOL_DEFAULT, 0, NULL); 2870 } 2871 2872 /* 2873 * Parse and format mempolicy from/to strings 2874 */ 2875 2876 static const char * const policy_modes[] = 2877 { 2878 [MPOL_DEFAULT] = "default", 2879 [MPOL_PREFERRED] = "prefer", 2880 [MPOL_BIND] = "bind", 2881 [MPOL_INTERLEAVE] = "interleave", 2882 [MPOL_LOCAL] = "local", 2883 [MPOL_PREFERRED_MANY] = "prefer (many)", 2884 }; 2885 2886 2887 #ifdef CONFIG_TMPFS 2888 /** 2889 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. 2890 * @str: string containing mempolicy to parse 2891 * @mpol: pointer to struct mempolicy pointer, returned on success. 2892 * 2893 * Format of input: 2894 * <mode>[=<flags>][:<nodelist>] 2895 * 2896 * On success, returns 0, else 1 2897 */ 2898 int mpol_parse_str(char *str, struct mempolicy **mpol) 2899 { 2900 struct mempolicy *new = NULL; 2901 unsigned short mode_flags; 2902 nodemask_t nodes; 2903 char *nodelist = strchr(str, ':'); 2904 char *flags = strchr(str, '='); 2905 int err = 1, mode; 2906 2907 if (flags) 2908 *flags++ = '\0'; /* terminate mode string */ 2909 2910 if (nodelist) { 2911 /* NUL-terminate mode or flags string */ 2912 *nodelist++ = '\0'; 2913 if (nodelist_parse(nodelist, nodes)) 2914 goto out; 2915 if (!nodes_subset(nodes, node_states[N_MEMORY])) 2916 goto out; 2917 } else 2918 nodes_clear(nodes); 2919 2920 mode = match_string(policy_modes, MPOL_MAX, str); 2921 if (mode < 0) 2922 goto out; 2923 2924 switch (mode) { 2925 case MPOL_PREFERRED: 2926 /* 2927 * Insist on a nodelist of one node only, although later 2928 * we use first_node(nodes) to grab a single node, so here 2929 * nodelist (or nodes) cannot be empty. 2930 */ 2931 if (nodelist) { 2932 char *rest = nodelist; 2933 while (isdigit(*rest)) 2934 rest++; 2935 if (*rest) 2936 goto out; 2937 if (nodes_empty(nodes)) 2938 goto out; 2939 } 2940 break; 2941 case MPOL_INTERLEAVE: 2942 /* 2943 * Default to online nodes with memory if no nodelist 2944 */ 2945 if (!nodelist) 2946 nodes = node_states[N_MEMORY]; 2947 break; 2948 case MPOL_LOCAL: 2949 /* 2950 * Don't allow a nodelist; mpol_new() checks flags 2951 */ 2952 if (nodelist) 2953 goto out; 2954 break; 2955 case MPOL_DEFAULT: 2956 /* 2957 * Insist on a empty nodelist 2958 */ 2959 if (!nodelist) 2960 err = 0; 2961 goto out; 2962 case MPOL_PREFERRED_MANY: 2963 case MPOL_BIND: 2964 /* 2965 * Insist on a nodelist 2966 */ 2967 if (!nodelist) 2968 goto out; 2969 } 2970 2971 mode_flags = 0; 2972 if (flags) { 2973 /* 2974 * Currently, we only support two mutually exclusive 2975 * mode flags. 2976 */ 2977 if (!strcmp(flags, "static")) 2978 mode_flags |= MPOL_F_STATIC_NODES; 2979 else if (!strcmp(flags, "relative")) 2980 mode_flags |= MPOL_F_RELATIVE_NODES; 2981 else 2982 goto out; 2983 } 2984 2985 new = mpol_new(mode, mode_flags, &nodes); 2986 if (IS_ERR(new)) 2987 goto out; 2988 2989 /* 2990 * Save nodes for mpol_to_str() to show the tmpfs mount options 2991 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. 2992 */ 2993 if (mode != MPOL_PREFERRED) { 2994 new->nodes = nodes; 2995 } else if (nodelist) { 2996 nodes_clear(new->nodes); 2997 node_set(first_node(nodes), new->nodes); 2998 } else { 2999 new->mode = MPOL_LOCAL; 3000 } 3001 3002 /* 3003 * Save nodes for contextualization: this will be used to "clone" 3004 * the mempolicy in a specific context [cpuset] at a later time. 3005 */ 3006 new->w.user_nodemask = nodes; 3007 3008 err = 0; 3009 3010 out: 3011 /* Restore string for error message */ 3012 if (nodelist) 3013 *--nodelist = ':'; 3014 if (flags) 3015 *--flags = '='; 3016 if (!err) 3017 *mpol = new; 3018 return err; 3019 } 3020 #endif /* CONFIG_TMPFS */ 3021 3022 /** 3023 * mpol_to_str - format a mempolicy structure for printing 3024 * @buffer: to contain formatted mempolicy string 3025 * @maxlen: length of @buffer 3026 * @pol: pointer to mempolicy to be formatted 3027 * 3028 * Convert @pol into a string. If @buffer is too short, truncate the string. 3029 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the 3030 * longest flag, "relative", and to display at least a few node ids. 3031 */ 3032 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) 3033 { 3034 char *p = buffer; 3035 nodemask_t nodes = NODE_MASK_NONE; 3036 unsigned short mode = MPOL_DEFAULT; 3037 unsigned short flags = 0; 3038 3039 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { 3040 mode = pol->mode; 3041 flags = pol->flags; 3042 } 3043 3044 switch (mode) { 3045 case MPOL_DEFAULT: 3046 case MPOL_LOCAL: 3047 break; 3048 case MPOL_PREFERRED: 3049 case MPOL_PREFERRED_MANY: 3050 case MPOL_BIND: 3051 case MPOL_INTERLEAVE: 3052 nodes = pol->nodes; 3053 break; 3054 default: 3055 WARN_ON_ONCE(1); 3056 snprintf(p, maxlen, "unknown"); 3057 return; 3058 } 3059 3060 p += snprintf(p, maxlen, "%s", policy_modes[mode]); 3061 3062 if (flags & MPOL_MODE_FLAGS) { 3063 p += snprintf(p, buffer + maxlen - p, "="); 3064 3065 /* 3066 * Currently, the only defined flags are mutually exclusive 3067 */ 3068 if (flags & MPOL_F_STATIC_NODES) 3069 p += snprintf(p, buffer + maxlen - p, "static"); 3070 else if (flags & MPOL_F_RELATIVE_NODES) 3071 p += snprintf(p, buffer + maxlen - p, "relative"); 3072 } 3073 3074 if (!nodes_empty(nodes)) 3075 p += scnprintf(p, buffer + maxlen - p, ":%*pbl", 3076 nodemask_pr_args(&nodes)); 3077 } 3078 3079 bool numa_demotion_enabled = false; 3080 3081 #ifdef CONFIG_SYSFS 3082 static ssize_t numa_demotion_enabled_show(struct kobject *kobj, 3083 struct kobj_attribute *attr, char *buf) 3084 { 3085 return sysfs_emit(buf, "%s\n", 3086 numa_demotion_enabled? "true" : "false"); 3087 } 3088 3089 static ssize_t numa_demotion_enabled_store(struct kobject *kobj, 3090 struct kobj_attribute *attr, 3091 const char *buf, size_t count) 3092 { 3093 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 3094 numa_demotion_enabled = true; 3095 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 3096 numa_demotion_enabled = false; 3097 else 3098 return -EINVAL; 3099 3100 return count; 3101 } 3102 3103 static struct kobj_attribute numa_demotion_enabled_attr = 3104 __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show, 3105 numa_demotion_enabled_store); 3106 3107 static struct attribute *numa_attrs[] = { 3108 &numa_demotion_enabled_attr.attr, 3109 NULL, 3110 }; 3111 3112 static const struct attribute_group numa_attr_group = { 3113 .attrs = numa_attrs, 3114 }; 3115 3116 static int __init numa_init_sysfs(void) 3117 { 3118 int err; 3119 struct kobject *numa_kobj; 3120 3121 numa_kobj = kobject_create_and_add("numa", mm_kobj); 3122 if (!numa_kobj) { 3123 pr_err("failed to create numa kobject\n"); 3124 return -ENOMEM; 3125 } 3126 err = sysfs_create_group(numa_kobj, &numa_attr_group); 3127 if (err) { 3128 pr_err("failed to register numa group\n"); 3129 goto delete_obj; 3130 } 3131 return 0; 3132 3133 delete_obj: 3134 kobject_put(numa_kobj); 3135 return err; 3136 } 3137 subsys_initcall(numa_init_sysfs); 3138 #endif 3139