xref: /openbmc/linux/mm/mempolicy.c (revision ef4290e6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * preferred many Try a set of nodes first before normal fallback. This is
35  *                similar to preferred without the special case.
36  *
37  * default        Allocate on the local node first, or when on a VMA
38  *                use the process policy. This is what Linux always did
39  *		  in a NUMA aware kernel and still does by, ahem, default.
40  *
41  * The process policy is applied for most non interrupt memory allocations
42  * in that process' context. Interrupts ignore the policies and always
43  * try to allocate on the local CPU. The VMA policy is only applied for memory
44  * allocations for a VMA in the VM.
45  *
46  * Currently there are a few corner cases in swapping where the policy
47  * is not applied, but the majority should be handled. When process policy
48  * is used it is not remembered over swap outs/swap ins.
49  *
50  * Only the highest zone in the zone hierarchy gets policied. Allocations
51  * requesting a lower zone just use default policy. This implies that
52  * on systems with highmem kernel lowmem allocation don't get policied.
53  * Same with GFP_DMA allocations.
54  *
55  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56  * all users and remembered even when nobody has memory mapped.
57  */
58 
59 /* Notebook:
60    fix mmap readahead to honour policy and enable policy for any page cache
61    object
62    statistics for bigpages
63    global policy for page cache? currently it uses process policy. Requires
64    first item above.
65    handle mremap for shared memory (currently ignored for the policy)
66    grows down?
67    make bind policy root only? It can trigger oom much faster and the
68    kernel is not always grateful with that.
69 */
70 
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72 
73 #include <linux/mempolicy.h>
74 #include <linux/pagewalk.h>
75 #include <linux/highmem.h>
76 #include <linux/hugetlb.h>
77 #include <linux/kernel.h>
78 #include <linux/sched.h>
79 #include <linux/sched/mm.h>
80 #include <linux/sched/numa_balancing.h>
81 #include <linux/sched/task.h>
82 #include <linux/nodemask.h>
83 #include <linux/cpuset.h>
84 #include <linux/slab.h>
85 #include <linux/string.h>
86 #include <linux/export.h>
87 #include <linux/nsproxy.h>
88 #include <linux/interrupt.h>
89 #include <linux/init.h>
90 #include <linux/compat.h>
91 #include <linux/ptrace.h>
92 #include <linux/swap.h>
93 #include <linux/seq_file.h>
94 #include <linux/proc_fs.h>
95 #include <linux/migrate.h>
96 #include <linux/ksm.h>
97 #include <linux/rmap.h>
98 #include <linux/security.h>
99 #include <linux/syscalls.h>
100 #include <linux/ctype.h>
101 #include <linux/mm_inline.h>
102 #include <linux/mmu_notifier.h>
103 #include <linux/printk.h>
104 #include <linux/swapops.h>
105 
106 #include <asm/tlbflush.h>
107 #include <asm/tlb.h>
108 #include <linux/uaccess.h>
109 
110 #include "internal.h"
111 
112 /* Internal flags */
113 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
114 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
115 
116 static struct kmem_cache *policy_cache;
117 static struct kmem_cache *sn_cache;
118 
119 /* Highest zone. An specific allocation for a zone below that is not
120    policied. */
121 enum zone_type policy_zone = 0;
122 
123 /*
124  * run-time system-wide default policy => local allocation
125  */
126 static struct mempolicy default_policy = {
127 	.refcnt = ATOMIC_INIT(1), /* never free it */
128 	.mode = MPOL_LOCAL,
129 };
130 
131 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
132 
133 /**
134  * numa_map_to_online_node - Find closest online node
135  * @node: Node id to start the search
136  *
137  * Lookup the next closest node by distance if @nid is not online.
138  *
139  * Return: this @node if it is online, otherwise the closest node by distance
140  */
141 int numa_map_to_online_node(int node)
142 {
143 	int min_dist = INT_MAX, dist, n, min_node;
144 
145 	if (node == NUMA_NO_NODE || node_online(node))
146 		return node;
147 
148 	min_node = node;
149 	for_each_online_node(n) {
150 		dist = node_distance(node, n);
151 		if (dist < min_dist) {
152 			min_dist = dist;
153 			min_node = n;
154 		}
155 	}
156 
157 	return min_node;
158 }
159 EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160 
161 struct mempolicy *get_task_policy(struct task_struct *p)
162 {
163 	struct mempolicy *pol = p->mempolicy;
164 	int node;
165 
166 	if (pol)
167 		return pol;
168 
169 	node = numa_node_id();
170 	if (node != NUMA_NO_NODE) {
171 		pol = &preferred_node_policy[node];
172 		/* preferred_node_policy is not initialised early in boot */
173 		if (pol->mode)
174 			return pol;
175 	}
176 
177 	return &default_policy;
178 }
179 
180 static const struct mempolicy_operations {
181 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
183 } mpol_ops[MPOL_MAX];
184 
185 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186 {
187 	return pol->flags & MPOL_MODE_FLAGS;
188 }
189 
190 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
191 				   const nodemask_t *rel)
192 {
193 	nodemask_t tmp;
194 	nodes_fold(tmp, *orig, nodes_weight(*rel));
195 	nodes_onto(*ret, tmp, *rel);
196 }
197 
198 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
199 {
200 	if (nodes_empty(*nodes))
201 		return -EINVAL;
202 	pol->nodes = *nodes;
203 	return 0;
204 }
205 
206 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
207 {
208 	if (nodes_empty(*nodes))
209 		return -EINVAL;
210 
211 	nodes_clear(pol->nodes);
212 	node_set(first_node(*nodes), pol->nodes);
213 	return 0;
214 }
215 
216 /*
217  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
218  * any, for the new policy.  mpol_new() has already validated the nodes
219  * parameter with respect to the policy mode and flags.
220  *
221  * Must be called holding task's alloc_lock to protect task's mems_allowed
222  * and mempolicy.  May also be called holding the mmap_lock for write.
223  */
224 static int mpol_set_nodemask(struct mempolicy *pol,
225 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
226 {
227 	int ret;
228 
229 	/*
230 	 * Default (pol==NULL) resp. local memory policies are not a
231 	 * subject of any remapping. They also do not need any special
232 	 * constructor.
233 	 */
234 	if (!pol || pol->mode == MPOL_LOCAL)
235 		return 0;
236 
237 	/* Check N_MEMORY */
238 	nodes_and(nsc->mask1,
239 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
240 
241 	VM_BUG_ON(!nodes);
242 
243 	if (pol->flags & MPOL_F_RELATIVE_NODES)
244 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
245 	else
246 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
247 
248 	if (mpol_store_user_nodemask(pol))
249 		pol->w.user_nodemask = *nodes;
250 	else
251 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
252 
253 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
254 	return ret;
255 }
256 
257 /*
258  * This function just creates a new policy, does some check and simple
259  * initialization. You must invoke mpol_set_nodemask() to set nodes.
260  */
261 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262 				  nodemask_t *nodes)
263 {
264 	struct mempolicy *policy;
265 
266 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
267 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
268 
269 	if (mode == MPOL_DEFAULT) {
270 		if (nodes && !nodes_empty(*nodes))
271 			return ERR_PTR(-EINVAL);
272 		return NULL;
273 	}
274 	VM_BUG_ON(!nodes);
275 
276 	/*
277 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
278 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
279 	 * All other modes require a valid pointer to a non-empty nodemask.
280 	 */
281 	if (mode == MPOL_PREFERRED) {
282 		if (nodes_empty(*nodes)) {
283 			if (((flags & MPOL_F_STATIC_NODES) ||
284 			     (flags & MPOL_F_RELATIVE_NODES)))
285 				return ERR_PTR(-EINVAL);
286 
287 			mode = MPOL_LOCAL;
288 		}
289 	} else if (mode == MPOL_LOCAL) {
290 		if (!nodes_empty(*nodes) ||
291 		    (flags & MPOL_F_STATIC_NODES) ||
292 		    (flags & MPOL_F_RELATIVE_NODES))
293 			return ERR_PTR(-EINVAL);
294 	} else if (nodes_empty(*nodes))
295 		return ERR_PTR(-EINVAL);
296 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
297 	if (!policy)
298 		return ERR_PTR(-ENOMEM);
299 	atomic_set(&policy->refcnt, 1);
300 	policy->mode = mode;
301 	policy->flags = flags;
302 	policy->home_node = NUMA_NO_NODE;
303 
304 	return policy;
305 }
306 
307 /* Slow path of a mpol destructor. */
308 void __mpol_put(struct mempolicy *p)
309 {
310 	if (!atomic_dec_and_test(&p->refcnt))
311 		return;
312 	kmem_cache_free(policy_cache, p);
313 }
314 
315 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
316 {
317 }
318 
319 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
320 {
321 	nodemask_t tmp;
322 
323 	if (pol->flags & MPOL_F_STATIC_NODES)
324 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
325 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
326 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
327 	else {
328 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
329 								*nodes);
330 		pol->w.cpuset_mems_allowed = *nodes;
331 	}
332 
333 	if (nodes_empty(tmp))
334 		tmp = *nodes;
335 
336 	pol->nodes = tmp;
337 }
338 
339 static void mpol_rebind_preferred(struct mempolicy *pol,
340 						const nodemask_t *nodes)
341 {
342 	pol->w.cpuset_mems_allowed = *nodes;
343 }
344 
345 /*
346  * mpol_rebind_policy - Migrate a policy to a different set of nodes
347  *
348  * Per-vma policies are protected by mmap_lock. Allocations using per-task
349  * policies are protected by task->mems_allowed_seq to prevent a premature
350  * OOM/allocation failure due to parallel nodemask modification.
351  */
352 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
353 {
354 	if (!pol || pol->mode == MPOL_LOCAL)
355 		return;
356 	if (!mpol_store_user_nodemask(pol) &&
357 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
358 		return;
359 
360 	mpol_ops[pol->mode].rebind(pol, newmask);
361 }
362 
363 /*
364  * Wrapper for mpol_rebind_policy() that just requires task
365  * pointer, and updates task mempolicy.
366  *
367  * Called with task's alloc_lock held.
368  */
369 
370 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
371 {
372 	mpol_rebind_policy(tsk->mempolicy, new);
373 }
374 
375 /*
376  * Rebind each vma in mm to new nodemask.
377  *
378  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
379  */
380 
381 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
382 {
383 	struct vm_area_struct *vma;
384 	VMA_ITERATOR(vmi, mm, 0);
385 
386 	mmap_write_lock(mm);
387 	for_each_vma(vmi, vma)
388 		mpol_rebind_policy(vma->vm_policy, new);
389 	mmap_write_unlock(mm);
390 }
391 
392 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
393 	[MPOL_DEFAULT] = {
394 		.rebind = mpol_rebind_default,
395 	},
396 	[MPOL_INTERLEAVE] = {
397 		.create = mpol_new_nodemask,
398 		.rebind = mpol_rebind_nodemask,
399 	},
400 	[MPOL_PREFERRED] = {
401 		.create = mpol_new_preferred,
402 		.rebind = mpol_rebind_preferred,
403 	},
404 	[MPOL_BIND] = {
405 		.create = mpol_new_nodemask,
406 		.rebind = mpol_rebind_nodemask,
407 	},
408 	[MPOL_LOCAL] = {
409 		.rebind = mpol_rebind_default,
410 	},
411 	[MPOL_PREFERRED_MANY] = {
412 		.create = mpol_new_nodemask,
413 		.rebind = mpol_rebind_preferred,
414 	},
415 };
416 
417 static int migrate_page_add(struct page *page, struct list_head *pagelist,
418 				unsigned long flags);
419 
420 struct queue_pages {
421 	struct list_head *pagelist;
422 	unsigned long flags;
423 	nodemask_t *nmask;
424 	unsigned long start;
425 	unsigned long end;
426 	struct vm_area_struct *first;
427 };
428 
429 /*
430  * Check if the page's nid is in qp->nmask.
431  *
432  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
433  * in the invert of qp->nmask.
434  */
435 static inline bool queue_pages_required(struct page *page,
436 					struct queue_pages *qp)
437 {
438 	int nid = page_to_nid(page);
439 	unsigned long flags = qp->flags;
440 
441 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
442 }
443 
444 /*
445  * queue_pages_pmd() has three possible return values:
446  * 0 - pages are placed on the right node or queued successfully, or
447  *     special page is met, i.e. huge zero page.
448  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
449  *     specified.
450  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
451  *        existing page was already on a node that does not follow the
452  *        policy.
453  */
454 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
455 				unsigned long end, struct mm_walk *walk)
456 	__releases(ptl)
457 {
458 	int ret = 0;
459 	struct page *page;
460 	struct queue_pages *qp = walk->private;
461 	unsigned long flags;
462 
463 	if (unlikely(is_pmd_migration_entry(*pmd))) {
464 		ret = -EIO;
465 		goto unlock;
466 	}
467 	page = pmd_page(*pmd);
468 	if (is_huge_zero_page(page)) {
469 		walk->action = ACTION_CONTINUE;
470 		goto unlock;
471 	}
472 	if (!queue_pages_required(page, qp))
473 		goto unlock;
474 
475 	flags = qp->flags;
476 	/* go to thp migration */
477 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478 		if (!vma_migratable(walk->vma) ||
479 		    migrate_page_add(page, qp->pagelist, flags)) {
480 			ret = 1;
481 			goto unlock;
482 		}
483 	} else
484 		ret = -EIO;
485 unlock:
486 	spin_unlock(ptl);
487 	return ret;
488 }
489 
490 /*
491  * Scan through pages checking if pages follow certain conditions,
492  * and move them to the pagelist if they do.
493  *
494  * queue_pages_pte_range() has three possible return values:
495  * 0 - pages are placed on the right node or queued successfully, or
496  *     special page is met, i.e. zero page.
497  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
498  *     specified.
499  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
500  *        on a node that does not follow the policy.
501  */
502 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
503 			unsigned long end, struct mm_walk *walk)
504 {
505 	struct vm_area_struct *vma = walk->vma;
506 	struct page *page;
507 	struct queue_pages *qp = walk->private;
508 	unsigned long flags = qp->flags;
509 	bool has_unmovable = false;
510 	pte_t *pte, *mapped_pte;
511 	spinlock_t *ptl;
512 
513 	ptl = pmd_trans_huge_lock(pmd, vma);
514 	if (ptl)
515 		return queue_pages_pmd(pmd, ptl, addr, end, walk);
516 
517 	if (pmd_trans_unstable(pmd))
518 		return 0;
519 
520 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
521 	for (; addr != end; pte++, addr += PAGE_SIZE) {
522 		if (!pte_present(*pte))
523 			continue;
524 		page = vm_normal_page(vma, addr, *pte);
525 		if (!page || is_zone_device_page(page))
526 			continue;
527 		/*
528 		 * vm_normal_page() filters out zero pages, but there might
529 		 * still be PageReserved pages to skip, perhaps in a VDSO.
530 		 */
531 		if (PageReserved(page))
532 			continue;
533 		if (!queue_pages_required(page, qp))
534 			continue;
535 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
536 			/* MPOL_MF_STRICT must be specified if we get here */
537 			if (!vma_migratable(vma)) {
538 				has_unmovable = true;
539 				break;
540 			}
541 
542 			/*
543 			 * Do not abort immediately since there may be
544 			 * temporary off LRU pages in the range.  Still
545 			 * need migrate other LRU pages.
546 			 */
547 			if (migrate_page_add(page, qp->pagelist, flags))
548 				has_unmovable = true;
549 		} else
550 			break;
551 	}
552 	pte_unmap_unlock(mapped_pte, ptl);
553 	cond_resched();
554 
555 	if (has_unmovable)
556 		return 1;
557 
558 	return addr != end ? -EIO : 0;
559 }
560 
561 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
562 			       unsigned long addr, unsigned long end,
563 			       struct mm_walk *walk)
564 {
565 	int ret = 0;
566 #ifdef CONFIG_HUGETLB_PAGE
567 	struct queue_pages *qp = walk->private;
568 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
569 	struct page *page;
570 	spinlock_t *ptl;
571 	pte_t entry;
572 
573 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
574 	entry = huge_ptep_get(pte);
575 	if (!pte_present(entry))
576 		goto unlock;
577 	page = pte_page(entry);
578 	if (!queue_pages_required(page, qp))
579 		goto unlock;
580 
581 	if (flags == MPOL_MF_STRICT) {
582 		/*
583 		 * STRICT alone means only detecting misplaced page and no
584 		 * need to further check other vma.
585 		 */
586 		ret = -EIO;
587 		goto unlock;
588 	}
589 
590 	if (!vma_migratable(walk->vma)) {
591 		/*
592 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
593 		 * stopped walking current vma.
594 		 * Detecting misplaced page but allow migrating pages which
595 		 * have been queued.
596 		 */
597 		ret = 1;
598 		goto unlock;
599 	}
600 
601 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
602 	if (flags & (MPOL_MF_MOVE_ALL) ||
603 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
604 		if (isolate_hugetlb(page, qp->pagelist) &&
605 			(flags & MPOL_MF_STRICT))
606 			/*
607 			 * Failed to isolate page but allow migrating pages
608 			 * which have been queued.
609 			 */
610 			ret = 1;
611 	}
612 unlock:
613 	spin_unlock(ptl);
614 #else
615 	BUG();
616 #endif
617 	return ret;
618 }
619 
620 #ifdef CONFIG_NUMA_BALANCING
621 /*
622  * This is used to mark a range of virtual addresses to be inaccessible.
623  * These are later cleared by a NUMA hinting fault. Depending on these
624  * faults, pages may be migrated for better NUMA placement.
625  *
626  * This is assuming that NUMA faults are handled using PROT_NONE. If
627  * an architecture makes a different choice, it will need further
628  * changes to the core.
629  */
630 unsigned long change_prot_numa(struct vm_area_struct *vma,
631 			unsigned long addr, unsigned long end)
632 {
633 	struct mmu_gather tlb;
634 	int nr_updated;
635 
636 	tlb_gather_mmu(&tlb, vma->vm_mm);
637 
638 	nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
639 				       MM_CP_PROT_NUMA);
640 	if (nr_updated)
641 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
642 
643 	tlb_finish_mmu(&tlb);
644 
645 	return nr_updated;
646 }
647 #else
648 static unsigned long change_prot_numa(struct vm_area_struct *vma,
649 			unsigned long addr, unsigned long end)
650 {
651 	return 0;
652 }
653 #endif /* CONFIG_NUMA_BALANCING */
654 
655 static int queue_pages_test_walk(unsigned long start, unsigned long end,
656 				struct mm_walk *walk)
657 {
658 	struct vm_area_struct *next, *vma = walk->vma;
659 	struct queue_pages *qp = walk->private;
660 	unsigned long endvma = vma->vm_end;
661 	unsigned long flags = qp->flags;
662 
663 	/* range check first */
664 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
665 
666 	if (!qp->first) {
667 		qp->first = vma;
668 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
669 			(qp->start < vma->vm_start))
670 			/* hole at head side of range */
671 			return -EFAULT;
672 	}
673 	next = find_vma(vma->vm_mm, vma->vm_end);
674 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
675 		((vma->vm_end < qp->end) &&
676 		(!next || vma->vm_end < next->vm_start)))
677 		/* hole at middle or tail of range */
678 		return -EFAULT;
679 
680 	/*
681 	 * Need check MPOL_MF_STRICT to return -EIO if possible
682 	 * regardless of vma_migratable
683 	 */
684 	if (!vma_migratable(vma) &&
685 	    !(flags & MPOL_MF_STRICT))
686 		return 1;
687 
688 	if (endvma > end)
689 		endvma = end;
690 
691 	if (flags & MPOL_MF_LAZY) {
692 		/* Similar to task_numa_work, skip inaccessible VMAs */
693 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
694 			!(vma->vm_flags & VM_MIXEDMAP))
695 			change_prot_numa(vma, start, endvma);
696 		return 1;
697 	}
698 
699 	/* queue pages from current vma */
700 	if (flags & MPOL_MF_VALID)
701 		return 0;
702 	return 1;
703 }
704 
705 static const struct mm_walk_ops queue_pages_walk_ops = {
706 	.hugetlb_entry		= queue_pages_hugetlb,
707 	.pmd_entry		= queue_pages_pte_range,
708 	.test_walk		= queue_pages_test_walk,
709 };
710 
711 /*
712  * Walk through page tables and collect pages to be migrated.
713  *
714  * If pages found in a given range are on a set of nodes (determined by
715  * @nodes and @flags,) it's isolated and queued to the pagelist which is
716  * passed via @private.
717  *
718  * queue_pages_range() has three possible return values:
719  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
720  *     specified.
721  * 0 - queue pages successfully or no misplaced page.
722  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
723  *         memory range specified by nodemask and maxnode points outside
724  *         your accessible address space (-EFAULT)
725  */
726 static int
727 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
728 		nodemask_t *nodes, unsigned long flags,
729 		struct list_head *pagelist)
730 {
731 	int err;
732 	struct queue_pages qp = {
733 		.pagelist = pagelist,
734 		.flags = flags,
735 		.nmask = nodes,
736 		.start = start,
737 		.end = end,
738 		.first = NULL,
739 	};
740 
741 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
742 
743 	if (!qp.first)
744 		/* whole range in hole */
745 		err = -EFAULT;
746 
747 	return err;
748 }
749 
750 /*
751  * Apply policy to a single VMA
752  * This must be called with the mmap_lock held for writing.
753  */
754 static int vma_replace_policy(struct vm_area_struct *vma,
755 						struct mempolicy *pol)
756 {
757 	int err;
758 	struct mempolicy *old;
759 	struct mempolicy *new;
760 
761 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
762 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
763 		 vma->vm_ops, vma->vm_file,
764 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
765 
766 	new = mpol_dup(pol);
767 	if (IS_ERR(new))
768 		return PTR_ERR(new);
769 
770 	if (vma->vm_ops && vma->vm_ops->set_policy) {
771 		err = vma->vm_ops->set_policy(vma, new);
772 		if (err)
773 			goto err_out;
774 	}
775 
776 	old = vma->vm_policy;
777 	vma->vm_policy = new; /* protected by mmap_lock */
778 	mpol_put(old);
779 
780 	return 0;
781  err_out:
782 	mpol_put(new);
783 	return err;
784 }
785 
786 /* Step 2: apply policy to a range and do splits. */
787 static int mbind_range(struct mm_struct *mm, unsigned long start,
788 		       unsigned long end, struct mempolicy *new_pol)
789 {
790 	MA_STATE(mas, &mm->mm_mt, start, start);
791 	struct vm_area_struct *prev;
792 	struct vm_area_struct *vma;
793 	int err = 0;
794 	pgoff_t pgoff;
795 
796 	prev = mas_prev(&mas, 0);
797 	if (unlikely(!prev))
798 		mas_set(&mas, start);
799 
800 	vma = mas_find(&mas, end - 1);
801 	if (WARN_ON(!vma))
802 		return 0;
803 
804 	if (start > vma->vm_start)
805 		prev = vma;
806 
807 	for (; vma; vma = mas_next(&mas, end - 1)) {
808 		unsigned long vmstart = max(start, vma->vm_start);
809 		unsigned long vmend = min(end, vma->vm_end);
810 
811 		if (mpol_equal(vma_policy(vma), new_pol))
812 			goto next;
813 
814 		pgoff = vma->vm_pgoff +
815 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
816 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
817 				 vma->anon_vma, vma->vm_file, pgoff,
818 				 new_pol, vma->vm_userfaultfd_ctx,
819 				 anon_vma_name(vma));
820 		if (prev) {
821 			/* vma_merge() invalidated the mas */
822 			mas_pause(&mas);
823 			vma = prev;
824 			goto replace;
825 		}
826 		if (vma->vm_start != vmstart) {
827 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
828 			if (err)
829 				goto out;
830 			/* split_vma() invalidated the mas */
831 			mas_pause(&mas);
832 		}
833 		if (vma->vm_end != vmend) {
834 			err = split_vma(vma->vm_mm, vma, vmend, 0);
835 			if (err)
836 				goto out;
837 			/* split_vma() invalidated the mas */
838 			mas_pause(&mas);
839 		}
840 replace:
841 		err = vma_replace_policy(vma, new_pol);
842 		if (err)
843 			goto out;
844 next:
845 		prev = vma;
846 	}
847 
848 out:
849 	return err;
850 }
851 
852 /* Set the process memory policy */
853 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
854 			     nodemask_t *nodes)
855 {
856 	struct mempolicy *new, *old;
857 	NODEMASK_SCRATCH(scratch);
858 	int ret;
859 
860 	if (!scratch)
861 		return -ENOMEM;
862 
863 	new = mpol_new(mode, flags, nodes);
864 	if (IS_ERR(new)) {
865 		ret = PTR_ERR(new);
866 		goto out;
867 	}
868 
869 	task_lock(current);
870 	ret = mpol_set_nodemask(new, nodes, scratch);
871 	if (ret) {
872 		task_unlock(current);
873 		mpol_put(new);
874 		goto out;
875 	}
876 
877 	old = current->mempolicy;
878 	current->mempolicy = new;
879 	if (new && new->mode == MPOL_INTERLEAVE)
880 		current->il_prev = MAX_NUMNODES-1;
881 	task_unlock(current);
882 	mpol_put(old);
883 	ret = 0;
884 out:
885 	NODEMASK_SCRATCH_FREE(scratch);
886 	return ret;
887 }
888 
889 /*
890  * Return nodemask for policy for get_mempolicy() query
891  *
892  * Called with task's alloc_lock held
893  */
894 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
895 {
896 	nodes_clear(*nodes);
897 	if (p == &default_policy)
898 		return;
899 
900 	switch (p->mode) {
901 	case MPOL_BIND:
902 	case MPOL_INTERLEAVE:
903 	case MPOL_PREFERRED:
904 	case MPOL_PREFERRED_MANY:
905 		*nodes = p->nodes;
906 		break;
907 	case MPOL_LOCAL:
908 		/* return empty node mask for local allocation */
909 		break;
910 	default:
911 		BUG();
912 	}
913 }
914 
915 static int lookup_node(struct mm_struct *mm, unsigned long addr)
916 {
917 	struct page *p = NULL;
918 	int ret;
919 
920 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
921 	if (ret > 0) {
922 		ret = page_to_nid(p);
923 		put_page(p);
924 	}
925 	return ret;
926 }
927 
928 /* Retrieve NUMA policy */
929 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
930 			     unsigned long addr, unsigned long flags)
931 {
932 	int err;
933 	struct mm_struct *mm = current->mm;
934 	struct vm_area_struct *vma = NULL;
935 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
936 
937 	if (flags &
938 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
939 		return -EINVAL;
940 
941 	if (flags & MPOL_F_MEMS_ALLOWED) {
942 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
943 			return -EINVAL;
944 		*policy = 0;	/* just so it's initialized */
945 		task_lock(current);
946 		*nmask  = cpuset_current_mems_allowed;
947 		task_unlock(current);
948 		return 0;
949 	}
950 
951 	if (flags & MPOL_F_ADDR) {
952 		/*
953 		 * Do NOT fall back to task policy if the
954 		 * vma/shared policy at addr is NULL.  We
955 		 * want to return MPOL_DEFAULT in this case.
956 		 */
957 		mmap_read_lock(mm);
958 		vma = vma_lookup(mm, addr);
959 		if (!vma) {
960 			mmap_read_unlock(mm);
961 			return -EFAULT;
962 		}
963 		if (vma->vm_ops && vma->vm_ops->get_policy)
964 			pol = vma->vm_ops->get_policy(vma, addr);
965 		else
966 			pol = vma->vm_policy;
967 	} else if (addr)
968 		return -EINVAL;
969 
970 	if (!pol)
971 		pol = &default_policy;	/* indicates default behavior */
972 
973 	if (flags & MPOL_F_NODE) {
974 		if (flags & MPOL_F_ADDR) {
975 			/*
976 			 * Take a refcount on the mpol, because we are about to
977 			 * drop the mmap_lock, after which only "pol" remains
978 			 * valid, "vma" is stale.
979 			 */
980 			pol_refcount = pol;
981 			vma = NULL;
982 			mpol_get(pol);
983 			mmap_read_unlock(mm);
984 			err = lookup_node(mm, addr);
985 			if (err < 0)
986 				goto out;
987 			*policy = err;
988 		} else if (pol == current->mempolicy &&
989 				pol->mode == MPOL_INTERLEAVE) {
990 			*policy = next_node_in(current->il_prev, pol->nodes);
991 		} else {
992 			err = -EINVAL;
993 			goto out;
994 		}
995 	} else {
996 		*policy = pol == &default_policy ? MPOL_DEFAULT :
997 						pol->mode;
998 		/*
999 		 * Internal mempolicy flags must be masked off before exposing
1000 		 * the policy to userspace.
1001 		 */
1002 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1003 	}
1004 
1005 	err = 0;
1006 	if (nmask) {
1007 		if (mpol_store_user_nodemask(pol)) {
1008 			*nmask = pol->w.user_nodemask;
1009 		} else {
1010 			task_lock(current);
1011 			get_policy_nodemask(pol, nmask);
1012 			task_unlock(current);
1013 		}
1014 	}
1015 
1016  out:
1017 	mpol_cond_put(pol);
1018 	if (vma)
1019 		mmap_read_unlock(mm);
1020 	if (pol_refcount)
1021 		mpol_put(pol_refcount);
1022 	return err;
1023 }
1024 
1025 #ifdef CONFIG_MIGRATION
1026 /*
1027  * page migration, thp tail pages can be passed.
1028  */
1029 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1030 				unsigned long flags)
1031 {
1032 	struct page *head = compound_head(page);
1033 	/*
1034 	 * Avoid migrating a page that is shared with others.
1035 	 */
1036 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1037 		if (!isolate_lru_page(head)) {
1038 			list_add_tail(&head->lru, pagelist);
1039 			mod_node_page_state(page_pgdat(head),
1040 				NR_ISOLATED_ANON + page_is_file_lru(head),
1041 				thp_nr_pages(head));
1042 		} else if (flags & MPOL_MF_STRICT) {
1043 			/*
1044 			 * Non-movable page may reach here.  And, there may be
1045 			 * temporary off LRU pages or non-LRU movable pages.
1046 			 * Treat them as unmovable pages since they can't be
1047 			 * isolated, so they can't be moved at the moment.  It
1048 			 * should return -EIO for this case too.
1049 			 */
1050 			return -EIO;
1051 		}
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 /*
1058  * Migrate pages from one node to a target node.
1059  * Returns error or the number of pages not migrated.
1060  */
1061 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1062 			   int flags)
1063 {
1064 	nodemask_t nmask;
1065 	struct vm_area_struct *vma;
1066 	LIST_HEAD(pagelist);
1067 	int err = 0;
1068 	struct migration_target_control mtc = {
1069 		.nid = dest,
1070 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1071 	};
1072 
1073 	nodes_clear(nmask);
1074 	node_set(source, nmask);
1075 
1076 	/*
1077 	 * This does not "check" the range but isolates all pages that
1078 	 * need migration.  Between passing in the full user address
1079 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1080 	 */
1081 	vma = find_vma(mm, 0);
1082 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1083 	queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1084 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1085 
1086 	if (!list_empty(&pagelist)) {
1087 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1088 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1089 		if (err)
1090 			putback_movable_pages(&pagelist);
1091 	}
1092 
1093 	return err;
1094 }
1095 
1096 /*
1097  * Move pages between the two nodesets so as to preserve the physical
1098  * layout as much as possible.
1099  *
1100  * Returns the number of page that could not be moved.
1101  */
1102 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1103 		     const nodemask_t *to, int flags)
1104 {
1105 	int busy = 0;
1106 	int err = 0;
1107 	nodemask_t tmp;
1108 
1109 	lru_cache_disable();
1110 
1111 	mmap_read_lock(mm);
1112 
1113 	/*
1114 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1115 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1116 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1117 	 * The pair of nodemasks 'to' and 'from' define the map.
1118 	 *
1119 	 * If no pair of bits is found that way, fallback to picking some
1120 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1121 	 * 'source' and 'dest' bits are the same, this represents a node
1122 	 * that will be migrating to itself, so no pages need move.
1123 	 *
1124 	 * If no bits are left in 'tmp', or if all remaining bits left
1125 	 * in 'tmp' correspond to the same bit in 'to', return false
1126 	 * (nothing left to migrate).
1127 	 *
1128 	 * This lets us pick a pair of nodes to migrate between, such that
1129 	 * if possible the dest node is not already occupied by some other
1130 	 * source node, minimizing the risk of overloading the memory on a
1131 	 * node that would happen if we migrated incoming memory to a node
1132 	 * before migrating outgoing memory source that same node.
1133 	 *
1134 	 * A single scan of tmp is sufficient.  As we go, we remember the
1135 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1136 	 * that not only moved, but what's better, moved to an empty slot
1137 	 * (d is not set in tmp), then we break out then, with that pair.
1138 	 * Otherwise when we finish scanning from_tmp, we at least have the
1139 	 * most recent <s, d> pair that moved.  If we get all the way through
1140 	 * the scan of tmp without finding any node that moved, much less
1141 	 * moved to an empty node, then there is nothing left worth migrating.
1142 	 */
1143 
1144 	tmp = *from;
1145 	while (!nodes_empty(tmp)) {
1146 		int s, d;
1147 		int source = NUMA_NO_NODE;
1148 		int dest = 0;
1149 
1150 		for_each_node_mask(s, tmp) {
1151 
1152 			/*
1153 			 * do_migrate_pages() tries to maintain the relative
1154 			 * node relationship of the pages established between
1155 			 * threads and memory areas.
1156                          *
1157 			 * However if the number of source nodes is not equal to
1158 			 * the number of destination nodes we can not preserve
1159 			 * this node relative relationship.  In that case, skip
1160 			 * copying memory from a node that is in the destination
1161 			 * mask.
1162 			 *
1163 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1164 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1165 			 */
1166 
1167 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1168 						(node_isset(s, *to)))
1169 				continue;
1170 
1171 			d = node_remap(s, *from, *to);
1172 			if (s == d)
1173 				continue;
1174 
1175 			source = s;	/* Node moved. Memorize */
1176 			dest = d;
1177 
1178 			/* dest not in remaining from nodes? */
1179 			if (!node_isset(dest, tmp))
1180 				break;
1181 		}
1182 		if (source == NUMA_NO_NODE)
1183 			break;
1184 
1185 		node_clear(source, tmp);
1186 		err = migrate_to_node(mm, source, dest, flags);
1187 		if (err > 0)
1188 			busy += err;
1189 		if (err < 0)
1190 			break;
1191 	}
1192 	mmap_read_unlock(mm);
1193 
1194 	lru_cache_enable();
1195 	if (err < 0)
1196 		return err;
1197 	return busy;
1198 
1199 }
1200 
1201 /*
1202  * Allocate a new page for page migration based on vma policy.
1203  * Start by assuming the page is mapped by the same vma as contains @start.
1204  * Search forward from there, if not.  N.B., this assumes that the
1205  * list of pages handed to migrate_pages()--which is how we get here--
1206  * is in virtual address order.
1207  */
1208 static struct page *new_page(struct page *page, unsigned long start)
1209 {
1210 	struct folio *dst, *src = page_folio(page);
1211 	struct vm_area_struct *vma;
1212 	unsigned long address;
1213 	VMA_ITERATOR(vmi, current->mm, start);
1214 	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
1215 
1216 	for_each_vma(vmi, vma) {
1217 		address = page_address_in_vma(page, vma);
1218 		if (address != -EFAULT)
1219 			break;
1220 	}
1221 
1222 	if (folio_test_hugetlb(src))
1223 		return alloc_huge_page_vma(page_hstate(&src->page),
1224 				vma, address);
1225 
1226 	if (folio_test_large(src))
1227 		gfp = GFP_TRANSHUGE;
1228 
1229 	/*
1230 	 * if !vma, vma_alloc_folio() will use task or system default policy
1231 	 */
1232 	dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
1233 			folio_test_large(src));
1234 	return &dst->page;
1235 }
1236 #else
1237 
1238 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1239 				unsigned long flags)
1240 {
1241 	return -EIO;
1242 }
1243 
1244 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1245 		     const nodemask_t *to, int flags)
1246 {
1247 	return -ENOSYS;
1248 }
1249 
1250 static struct page *new_page(struct page *page, unsigned long start)
1251 {
1252 	return NULL;
1253 }
1254 #endif
1255 
1256 static long do_mbind(unsigned long start, unsigned long len,
1257 		     unsigned short mode, unsigned short mode_flags,
1258 		     nodemask_t *nmask, unsigned long flags)
1259 {
1260 	struct mm_struct *mm = current->mm;
1261 	struct mempolicy *new;
1262 	unsigned long end;
1263 	int err;
1264 	int ret;
1265 	LIST_HEAD(pagelist);
1266 
1267 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1268 		return -EINVAL;
1269 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1270 		return -EPERM;
1271 
1272 	if (start & ~PAGE_MASK)
1273 		return -EINVAL;
1274 
1275 	if (mode == MPOL_DEFAULT)
1276 		flags &= ~MPOL_MF_STRICT;
1277 
1278 	len = PAGE_ALIGN(len);
1279 	end = start + len;
1280 
1281 	if (end < start)
1282 		return -EINVAL;
1283 	if (end == start)
1284 		return 0;
1285 
1286 	new = mpol_new(mode, mode_flags, nmask);
1287 	if (IS_ERR(new))
1288 		return PTR_ERR(new);
1289 
1290 	if (flags & MPOL_MF_LAZY)
1291 		new->flags |= MPOL_F_MOF;
1292 
1293 	/*
1294 	 * If we are using the default policy then operation
1295 	 * on discontinuous address spaces is okay after all
1296 	 */
1297 	if (!new)
1298 		flags |= MPOL_MF_DISCONTIG_OK;
1299 
1300 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1301 		 start, start + len, mode, mode_flags,
1302 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1303 
1304 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1305 
1306 		lru_cache_disable();
1307 	}
1308 	{
1309 		NODEMASK_SCRATCH(scratch);
1310 		if (scratch) {
1311 			mmap_write_lock(mm);
1312 			err = mpol_set_nodemask(new, nmask, scratch);
1313 			if (err)
1314 				mmap_write_unlock(mm);
1315 		} else
1316 			err = -ENOMEM;
1317 		NODEMASK_SCRATCH_FREE(scratch);
1318 	}
1319 	if (err)
1320 		goto mpol_out;
1321 
1322 	ret = queue_pages_range(mm, start, end, nmask,
1323 			  flags | MPOL_MF_INVERT, &pagelist);
1324 
1325 	if (ret < 0) {
1326 		err = ret;
1327 		goto up_out;
1328 	}
1329 
1330 	err = mbind_range(mm, start, end, new);
1331 
1332 	if (!err) {
1333 		int nr_failed = 0;
1334 
1335 		if (!list_empty(&pagelist)) {
1336 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1337 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1338 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1339 			if (nr_failed)
1340 				putback_movable_pages(&pagelist);
1341 		}
1342 
1343 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1344 			err = -EIO;
1345 	} else {
1346 up_out:
1347 		if (!list_empty(&pagelist))
1348 			putback_movable_pages(&pagelist);
1349 	}
1350 
1351 	mmap_write_unlock(mm);
1352 mpol_out:
1353 	mpol_put(new);
1354 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1355 		lru_cache_enable();
1356 	return err;
1357 }
1358 
1359 /*
1360  * User space interface with variable sized bitmaps for nodelists.
1361  */
1362 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1363 		      unsigned long maxnode)
1364 {
1365 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1366 	int ret;
1367 
1368 	if (in_compat_syscall())
1369 		ret = compat_get_bitmap(mask,
1370 					(const compat_ulong_t __user *)nmask,
1371 					maxnode);
1372 	else
1373 		ret = copy_from_user(mask, nmask,
1374 				     nlongs * sizeof(unsigned long));
1375 
1376 	if (ret)
1377 		return -EFAULT;
1378 
1379 	if (maxnode % BITS_PER_LONG)
1380 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1381 
1382 	return 0;
1383 }
1384 
1385 /* Copy a node mask from user space. */
1386 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1387 		     unsigned long maxnode)
1388 {
1389 	--maxnode;
1390 	nodes_clear(*nodes);
1391 	if (maxnode == 0 || !nmask)
1392 		return 0;
1393 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1394 		return -EINVAL;
1395 
1396 	/*
1397 	 * When the user specified more nodes than supported just check
1398 	 * if the non supported part is all zero, one word at a time,
1399 	 * starting at the end.
1400 	 */
1401 	while (maxnode > MAX_NUMNODES) {
1402 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1403 		unsigned long t;
1404 
1405 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1406 			return -EFAULT;
1407 
1408 		if (maxnode - bits >= MAX_NUMNODES) {
1409 			maxnode -= bits;
1410 		} else {
1411 			maxnode = MAX_NUMNODES;
1412 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1413 		}
1414 		if (t)
1415 			return -EINVAL;
1416 	}
1417 
1418 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1419 }
1420 
1421 /* Copy a kernel node mask to user space */
1422 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1423 			      nodemask_t *nodes)
1424 {
1425 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1426 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1427 	bool compat = in_compat_syscall();
1428 
1429 	if (compat)
1430 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1431 
1432 	if (copy > nbytes) {
1433 		if (copy > PAGE_SIZE)
1434 			return -EINVAL;
1435 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1436 			return -EFAULT;
1437 		copy = nbytes;
1438 		maxnode = nr_node_ids;
1439 	}
1440 
1441 	if (compat)
1442 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1443 					 nodes_addr(*nodes), maxnode);
1444 
1445 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1446 }
1447 
1448 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1449 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1450 {
1451 	*flags = *mode & MPOL_MODE_FLAGS;
1452 	*mode &= ~MPOL_MODE_FLAGS;
1453 
1454 	if ((unsigned int)(*mode) >=  MPOL_MAX)
1455 		return -EINVAL;
1456 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1457 		return -EINVAL;
1458 	if (*flags & MPOL_F_NUMA_BALANCING) {
1459 		if (*mode != MPOL_BIND)
1460 			return -EINVAL;
1461 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
1462 	}
1463 	return 0;
1464 }
1465 
1466 static long kernel_mbind(unsigned long start, unsigned long len,
1467 			 unsigned long mode, const unsigned long __user *nmask,
1468 			 unsigned long maxnode, unsigned int flags)
1469 {
1470 	unsigned short mode_flags;
1471 	nodemask_t nodes;
1472 	int lmode = mode;
1473 	int err;
1474 
1475 	start = untagged_addr(start);
1476 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1477 	if (err)
1478 		return err;
1479 
1480 	err = get_nodes(&nodes, nmask, maxnode);
1481 	if (err)
1482 		return err;
1483 
1484 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1485 }
1486 
1487 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1488 		unsigned long, home_node, unsigned long, flags)
1489 {
1490 	struct mm_struct *mm = current->mm;
1491 	struct vm_area_struct *vma;
1492 	struct mempolicy *new;
1493 	unsigned long vmstart;
1494 	unsigned long vmend;
1495 	unsigned long end;
1496 	int err = -ENOENT;
1497 	VMA_ITERATOR(vmi, mm, start);
1498 
1499 	start = untagged_addr(start);
1500 	if (start & ~PAGE_MASK)
1501 		return -EINVAL;
1502 	/*
1503 	 * flags is used for future extension if any.
1504 	 */
1505 	if (flags != 0)
1506 		return -EINVAL;
1507 
1508 	/*
1509 	 * Check home_node is online to avoid accessing uninitialized
1510 	 * NODE_DATA.
1511 	 */
1512 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1513 		return -EINVAL;
1514 
1515 	len = PAGE_ALIGN(len);
1516 	end = start + len;
1517 
1518 	if (end < start)
1519 		return -EINVAL;
1520 	if (end == start)
1521 		return 0;
1522 	mmap_write_lock(mm);
1523 	for_each_vma_range(vmi, vma, end) {
1524 		vmstart = max(start, vma->vm_start);
1525 		vmend   = min(end, vma->vm_end);
1526 		new = mpol_dup(vma_policy(vma));
1527 		if (IS_ERR(new)) {
1528 			err = PTR_ERR(new);
1529 			break;
1530 		}
1531 		/*
1532 		 * Only update home node if there is an existing vma policy
1533 		 */
1534 		if (!new)
1535 			continue;
1536 
1537 		/*
1538 		 * If any vma in the range got policy other than MPOL_BIND
1539 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1540 		 * the home node for vmas we already updated before.
1541 		 */
1542 		if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
1543 			mpol_put(new);
1544 			err = -EOPNOTSUPP;
1545 			break;
1546 		}
1547 
1548 		new->home_node = home_node;
1549 		err = mbind_range(mm, vmstart, vmend, new);
1550 		mpol_put(new);
1551 		if (err)
1552 			break;
1553 	}
1554 	mmap_write_unlock(mm);
1555 	return err;
1556 }
1557 
1558 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1559 		unsigned long, mode, const unsigned long __user *, nmask,
1560 		unsigned long, maxnode, unsigned int, flags)
1561 {
1562 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1563 }
1564 
1565 /* Set the process memory policy */
1566 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1567 				 unsigned long maxnode)
1568 {
1569 	unsigned short mode_flags;
1570 	nodemask_t nodes;
1571 	int lmode = mode;
1572 	int err;
1573 
1574 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1575 	if (err)
1576 		return err;
1577 
1578 	err = get_nodes(&nodes, nmask, maxnode);
1579 	if (err)
1580 		return err;
1581 
1582 	return do_set_mempolicy(lmode, mode_flags, &nodes);
1583 }
1584 
1585 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1586 		unsigned long, maxnode)
1587 {
1588 	return kernel_set_mempolicy(mode, nmask, maxnode);
1589 }
1590 
1591 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1592 				const unsigned long __user *old_nodes,
1593 				const unsigned long __user *new_nodes)
1594 {
1595 	struct mm_struct *mm = NULL;
1596 	struct task_struct *task;
1597 	nodemask_t task_nodes;
1598 	int err;
1599 	nodemask_t *old;
1600 	nodemask_t *new;
1601 	NODEMASK_SCRATCH(scratch);
1602 
1603 	if (!scratch)
1604 		return -ENOMEM;
1605 
1606 	old = &scratch->mask1;
1607 	new = &scratch->mask2;
1608 
1609 	err = get_nodes(old, old_nodes, maxnode);
1610 	if (err)
1611 		goto out;
1612 
1613 	err = get_nodes(new, new_nodes, maxnode);
1614 	if (err)
1615 		goto out;
1616 
1617 	/* Find the mm_struct */
1618 	rcu_read_lock();
1619 	task = pid ? find_task_by_vpid(pid) : current;
1620 	if (!task) {
1621 		rcu_read_unlock();
1622 		err = -ESRCH;
1623 		goto out;
1624 	}
1625 	get_task_struct(task);
1626 
1627 	err = -EINVAL;
1628 
1629 	/*
1630 	 * Check if this process has the right to modify the specified process.
1631 	 * Use the regular "ptrace_may_access()" checks.
1632 	 */
1633 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1634 		rcu_read_unlock();
1635 		err = -EPERM;
1636 		goto out_put;
1637 	}
1638 	rcu_read_unlock();
1639 
1640 	task_nodes = cpuset_mems_allowed(task);
1641 	/* Is the user allowed to access the target nodes? */
1642 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1643 		err = -EPERM;
1644 		goto out_put;
1645 	}
1646 
1647 	task_nodes = cpuset_mems_allowed(current);
1648 	nodes_and(*new, *new, task_nodes);
1649 	if (nodes_empty(*new))
1650 		goto out_put;
1651 
1652 	err = security_task_movememory(task);
1653 	if (err)
1654 		goto out_put;
1655 
1656 	mm = get_task_mm(task);
1657 	put_task_struct(task);
1658 
1659 	if (!mm) {
1660 		err = -EINVAL;
1661 		goto out;
1662 	}
1663 
1664 	err = do_migrate_pages(mm, old, new,
1665 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1666 
1667 	mmput(mm);
1668 out:
1669 	NODEMASK_SCRATCH_FREE(scratch);
1670 
1671 	return err;
1672 
1673 out_put:
1674 	put_task_struct(task);
1675 	goto out;
1676 
1677 }
1678 
1679 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1680 		const unsigned long __user *, old_nodes,
1681 		const unsigned long __user *, new_nodes)
1682 {
1683 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1684 }
1685 
1686 
1687 /* Retrieve NUMA policy */
1688 static int kernel_get_mempolicy(int __user *policy,
1689 				unsigned long __user *nmask,
1690 				unsigned long maxnode,
1691 				unsigned long addr,
1692 				unsigned long flags)
1693 {
1694 	int err;
1695 	int pval;
1696 	nodemask_t nodes;
1697 
1698 	if (nmask != NULL && maxnode < nr_node_ids)
1699 		return -EINVAL;
1700 
1701 	addr = untagged_addr(addr);
1702 
1703 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1704 
1705 	if (err)
1706 		return err;
1707 
1708 	if (policy && put_user(pval, policy))
1709 		return -EFAULT;
1710 
1711 	if (nmask)
1712 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1713 
1714 	return err;
1715 }
1716 
1717 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1718 		unsigned long __user *, nmask, unsigned long, maxnode,
1719 		unsigned long, addr, unsigned long, flags)
1720 {
1721 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1722 }
1723 
1724 bool vma_migratable(struct vm_area_struct *vma)
1725 {
1726 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1727 		return false;
1728 
1729 	/*
1730 	 * DAX device mappings require predictable access latency, so avoid
1731 	 * incurring periodic faults.
1732 	 */
1733 	if (vma_is_dax(vma))
1734 		return false;
1735 
1736 	if (is_vm_hugetlb_page(vma) &&
1737 		!hugepage_migration_supported(hstate_vma(vma)))
1738 		return false;
1739 
1740 	/*
1741 	 * Migration allocates pages in the highest zone. If we cannot
1742 	 * do so then migration (at least from node to node) is not
1743 	 * possible.
1744 	 */
1745 	if (vma->vm_file &&
1746 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1747 			< policy_zone)
1748 		return false;
1749 	return true;
1750 }
1751 
1752 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1753 						unsigned long addr)
1754 {
1755 	struct mempolicy *pol = NULL;
1756 
1757 	if (vma) {
1758 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1759 			pol = vma->vm_ops->get_policy(vma, addr);
1760 		} else if (vma->vm_policy) {
1761 			pol = vma->vm_policy;
1762 
1763 			/*
1764 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1765 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1766 			 * count on these policies which will be dropped by
1767 			 * mpol_cond_put() later
1768 			 */
1769 			if (mpol_needs_cond_ref(pol))
1770 				mpol_get(pol);
1771 		}
1772 	}
1773 
1774 	return pol;
1775 }
1776 
1777 /*
1778  * get_vma_policy(@vma, @addr)
1779  * @vma: virtual memory area whose policy is sought
1780  * @addr: address in @vma for shared policy lookup
1781  *
1782  * Returns effective policy for a VMA at specified address.
1783  * Falls back to current->mempolicy or system default policy, as necessary.
1784  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1785  * count--added by the get_policy() vm_op, as appropriate--to protect against
1786  * freeing by another task.  It is the caller's responsibility to free the
1787  * extra reference for shared policies.
1788  */
1789 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1790 						unsigned long addr)
1791 {
1792 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1793 
1794 	if (!pol)
1795 		pol = get_task_policy(current);
1796 
1797 	return pol;
1798 }
1799 
1800 bool vma_policy_mof(struct vm_area_struct *vma)
1801 {
1802 	struct mempolicy *pol;
1803 
1804 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1805 		bool ret = false;
1806 
1807 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1808 		if (pol && (pol->flags & MPOL_F_MOF))
1809 			ret = true;
1810 		mpol_cond_put(pol);
1811 
1812 		return ret;
1813 	}
1814 
1815 	pol = vma->vm_policy;
1816 	if (!pol)
1817 		pol = get_task_policy(current);
1818 
1819 	return pol->flags & MPOL_F_MOF;
1820 }
1821 
1822 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1823 {
1824 	enum zone_type dynamic_policy_zone = policy_zone;
1825 
1826 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1827 
1828 	/*
1829 	 * if policy->nodes has movable memory only,
1830 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1831 	 *
1832 	 * policy->nodes is intersect with node_states[N_MEMORY].
1833 	 * so if the following test fails, it implies
1834 	 * policy->nodes has movable memory only.
1835 	 */
1836 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1837 		dynamic_policy_zone = ZONE_MOVABLE;
1838 
1839 	return zone >= dynamic_policy_zone;
1840 }
1841 
1842 /*
1843  * Return a nodemask representing a mempolicy for filtering nodes for
1844  * page allocation
1845  */
1846 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1847 {
1848 	int mode = policy->mode;
1849 
1850 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1851 	if (unlikely(mode == MPOL_BIND) &&
1852 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1853 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1854 		return &policy->nodes;
1855 
1856 	if (mode == MPOL_PREFERRED_MANY)
1857 		return &policy->nodes;
1858 
1859 	return NULL;
1860 }
1861 
1862 /*
1863  * Return the  preferred node id for 'prefer' mempolicy, and return
1864  * the given id for all other policies.
1865  *
1866  * policy_node() is always coupled with policy_nodemask(), which
1867  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1868  */
1869 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1870 {
1871 	if (policy->mode == MPOL_PREFERRED) {
1872 		nd = first_node(policy->nodes);
1873 	} else {
1874 		/*
1875 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1876 		 * because we might easily break the expectation to stay on the
1877 		 * requested node and not break the policy.
1878 		 */
1879 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1880 	}
1881 
1882 	if ((policy->mode == MPOL_BIND ||
1883 	     policy->mode == MPOL_PREFERRED_MANY) &&
1884 	    policy->home_node != NUMA_NO_NODE)
1885 		return policy->home_node;
1886 
1887 	return nd;
1888 }
1889 
1890 /* Do dynamic interleaving for a process */
1891 static unsigned interleave_nodes(struct mempolicy *policy)
1892 {
1893 	unsigned next;
1894 	struct task_struct *me = current;
1895 
1896 	next = next_node_in(me->il_prev, policy->nodes);
1897 	if (next < MAX_NUMNODES)
1898 		me->il_prev = next;
1899 	return next;
1900 }
1901 
1902 /*
1903  * Depending on the memory policy provide a node from which to allocate the
1904  * next slab entry.
1905  */
1906 unsigned int mempolicy_slab_node(void)
1907 {
1908 	struct mempolicy *policy;
1909 	int node = numa_mem_id();
1910 
1911 	if (!in_task())
1912 		return node;
1913 
1914 	policy = current->mempolicy;
1915 	if (!policy)
1916 		return node;
1917 
1918 	switch (policy->mode) {
1919 	case MPOL_PREFERRED:
1920 		return first_node(policy->nodes);
1921 
1922 	case MPOL_INTERLEAVE:
1923 		return interleave_nodes(policy);
1924 
1925 	case MPOL_BIND:
1926 	case MPOL_PREFERRED_MANY:
1927 	{
1928 		struct zoneref *z;
1929 
1930 		/*
1931 		 * Follow bind policy behavior and start allocation at the
1932 		 * first node.
1933 		 */
1934 		struct zonelist *zonelist;
1935 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1936 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1937 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1938 							&policy->nodes);
1939 		return z->zone ? zone_to_nid(z->zone) : node;
1940 	}
1941 	case MPOL_LOCAL:
1942 		return node;
1943 
1944 	default:
1945 		BUG();
1946 	}
1947 }
1948 
1949 /*
1950  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1951  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1952  * number of present nodes.
1953  */
1954 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1955 {
1956 	nodemask_t nodemask = pol->nodes;
1957 	unsigned int target, nnodes;
1958 	int i;
1959 	int nid;
1960 	/*
1961 	 * The barrier will stabilize the nodemask in a register or on
1962 	 * the stack so that it will stop changing under the code.
1963 	 *
1964 	 * Between first_node() and next_node(), pol->nodes could be changed
1965 	 * by other threads. So we put pol->nodes in a local stack.
1966 	 */
1967 	barrier();
1968 
1969 	nnodes = nodes_weight(nodemask);
1970 	if (!nnodes)
1971 		return numa_node_id();
1972 	target = (unsigned int)n % nnodes;
1973 	nid = first_node(nodemask);
1974 	for (i = 0; i < target; i++)
1975 		nid = next_node(nid, nodemask);
1976 	return nid;
1977 }
1978 
1979 /* Determine a node number for interleave */
1980 static inline unsigned interleave_nid(struct mempolicy *pol,
1981 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1982 {
1983 	if (vma) {
1984 		unsigned long off;
1985 
1986 		/*
1987 		 * for small pages, there is no difference between
1988 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1989 		 * for huge pages, since vm_pgoff is in units of small
1990 		 * pages, we need to shift off the always 0 bits to get
1991 		 * a useful offset.
1992 		 */
1993 		BUG_ON(shift < PAGE_SHIFT);
1994 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1995 		off += (addr - vma->vm_start) >> shift;
1996 		return offset_il_node(pol, off);
1997 	} else
1998 		return interleave_nodes(pol);
1999 }
2000 
2001 #ifdef CONFIG_HUGETLBFS
2002 /*
2003  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2004  * @vma: virtual memory area whose policy is sought
2005  * @addr: address in @vma for shared policy lookup and interleave policy
2006  * @gfp_flags: for requested zone
2007  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2008  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2009  *
2010  * Returns a nid suitable for a huge page allocation and a pointer
2011  * to the struct mempolicy for conditional unref after allocation.
2012  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2013  * to the mempolicy's @nodemask for filtering the zonelist.
2014  *
2015  * Must be protected by read_mems_allowed_begin()
2016  */
2017 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2018 				struct mempolicy **mpol, nodemask_t **nodemask)
2019 {
2020 	int nid;
2021 	int mode;
2022 
2023 	*mpol = get_vma_policy(vma, addr);
2024 	*nodemask = NULL;
2025 	mode = (*mpol)->mode;
2026 
2027 	if (unlikely(mode == MPOL_INTERLEAVE)) {
2028 		nid = interleave_nid(*mpol, vma, addr,
2029 					huge_page_shift(hstate_vma(vma)));
2030 	} else {
2031 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2032 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2033 			*nodemask = &(*mpol)->nodes;
2034 	}
2035 	return nid;
2036 }
2037 
2038 /*
2039  * init_nodemask_of_mempolicy
2040  *
2041  * If the current task's mempolicy is "default" [NULL], return 'false'
2042  * to indicate default policy.  Otherwise, extract the policy nodemask
2043  * for 'bind' or 'interleave' policy into the argument nodemask, or
2044  * initialize the argument nodemask to contain the single node for
2045  * 'preferred' or 'local' policy and return 'true' to indicate presence
2046  * of non-default mempolicy.
2047  *
2048  * We don't bother with reference counting the mempolicy [mpol_get/put]
2049  * because the current task is examining it's own mempolicy and a task's
2050  * mempolicy is only ever changed by the task itself.
2051  *
2052  * N.B., it is the caller's responsibility to free a returned nodemask.
2053  */
2054 bool init_nodemask_of_mempolicy(nodemask_t *mask)
2055 {
2056 	struct mempolicy *mempolicy;
2057 
2058 	if (!(mask && current->mempolicy))
2059 		return false;
2060 
2061 	task_lock(current);
2062 	mempolicy = current->mempolicy;
2063 	switch (mempolicy->mode) {
2064 	case MPOL_PREFERRED:
2065 	case MPOL_PREFERRED_MANY:
2066 	case MPOL_BIND:
2067 	case MPOL_INTERLEAVE:
2068 		*mask = mempolicy->nodes;
2069 		break;
2070 
2071 	case MPOL_LOCAL:
2072 		init_nodemask_of_node(mask, numa_node_id());
2073 		break;
2074 
2075 	default:
2076 		BUG();
2077 	}
2078 	task_unlock(current);
2079 
2080 	return true;
2081 }
2082 #endif
2083 
2084 /*
2085  * mempolicy_in_oom_domain
2086  *
2087  * If tsk's mempolicy is "bind", check for intersection between mask and
2088  * the policy nodemask. Otherwise, return true for all other policies
2089  * including "interleave", as a tsk with "interleave" policy may have
2090  * memory allocated from all nodes in system.
2091  *
2092  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2093  */
2094 bool mempolicy_in_oom_domain(struct task_struct *tsk,
2095 					const nodemask_t *mask)
2096 {
2097 	struct mempolicy *mempolicy;
2098 	bool ret = true;
2099 
2100 	if (!mask)
2101 		return ret;
2102 
2103 	task_lock(tsk);
2104 	mempolicy = tsk->mempolicy;
2105 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2106 		ret = nodes_intersects(mempolicy->nodes, *mask);
2107 	task_unlock(tsk);
2108 
2109 	return ret;
2110 }
2111 
2112 /* Allocate a page in interleaved policy.
2113    Own path because it needs to do special accounting. */
2114 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2115 					unsigned nid)
2116 {
2117 	struct page *page;
2118 
2119 	page = __alloc_pages(gfp, order, nid, NULL);
2120 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2121 	if (!static_branch_likely(&vm_numa_stat_key))
2122 		return page;
2123 	if (page && page_to_nid(page) == nid) {
2124 		preempt_disable();
2125 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2126 		preempt_enable();
2127 	}
2128 	return page;
2129 }
2130 
2131 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2132 						int nid, struct mempolicy *pol)
2133 {
2134 	struct page *page;
2135 	gfp_t preferred_gfp;
2136 
2137 	/*
2138 	 * This is a two pass approach. The first pass will only try the
2139 	 * preferred nodes but skip the direct reclaim and allow the
2140 	 * allocation to fail, while the second pass will try all the
2141 	 * nodes in system.
2142 	 */
2143 	preferred_gfp = gfp | __GFP_NOWARN;
2144 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2145 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2146 	if (!page)
2147 		page = __alloc_pages(gfp, order, nid, NULL);
2148 
2149 	return page;
2150 }
2151 
2152 /**
2153  * vma_alloc_folio - Allocate a folio for a VMA.
2154  * @gfp: GFP flags.
2155  * @order: Order of the folio.
2156  * @vma: Pointer to VMA or NULL if not available.
2157  * @addr: Virtual address of the allocation.  Must be inside @vma.
2158  * @hugepage: For hugepages try only the preferred node if possible.
2159  *
2160  * Allocate a folio for a specific address in @vma, using the appropriate
2161  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2162  * of the mm_struct of the VMA to prevent it from going away.  Should be
2163  * used for all allocations for folios that will be mapped into user space.
2164  *
2165  * Return: The folio on success or NULL if allocation fails.
2166  */
2167 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2168 		unsigned long addr, bool hugepage)
2169 {
2170 	struct mempolicy *pol;
2171 	int node = numa_node_id();
2172 	struct folio *folio;
2173 	int preferred_nid;
2174 	nodemask_t *nmask;
2175 
2176 	pol = get_vma_policy(vma, addr);
2177 
2178 	if (pol->mode == MPOL_INTERLEAVE) {
2179 		struct page *page;
2180 		unsigned nid;
2181 
2182 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2183 		mpol_cond_put(pol);
2184 		gfp |= __GFP_COMP;
2185 		page = alloc_page_interleave(gfp, order, nid);
2186 		if (page && order > 1)
2187 			prep_transhuge_page(page);
2188 		folio = (struct folio *)page;
2189 		goto out;
2190 	}
2191 
2192 	if (pol->mode == MPOL_PREFERRED_MANY) {
2193 		struct page *page;
2194 
2195 		node = policy_node(gfp, pol, node);
2196 		gfp |= __GFP_COMP;
2197 		page = alloc_pages_preferred_many(gfp, order, node, pol);
2198 		mpol_cond_put(pol);
2199 		if (page && order > 1)
2200 			prep_transhuge_page(page);
2201 		folio = (struct folio *)page;
2202 		goto out;
2203 	}
2204 
2205 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2206 		int hpage_node = node;
2207 
2208 		/*
2209 		 * For hugepage allocation and non-interleave policy which
2210 		 * allows the current node (or other explicitly preferred
2211 		 * node) we only try to allocate from the current/preferred
2212 		 * node and don't fall back to other nodes, as the cost of
2213 		 * remote accesses would likely offset THP benefits.
2214 		 *
2215 		 * If the policy is interleave or does not allow the current
2216 		 * node in its nodemask, we allocate the standard way.
2217 		 */
2218 		if (pol->mode == MPOL_PREFERRED)
2219 			hpage_node = first_node(pol->nodes);
2220 
2221 		nmask = policy_nodemask(gfp, pol);
2222 		if (!nmask || node_isset(hpage_node, *nmask)) {
2223 			mpol_cond_put(pol);
2224 			/*
2225 			 * First, try to allocate THP only on local node, but
2226 			 * don't reclaim unnecessarily, just compact.
2227 			 */
2228 			folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2229 					__GFP_NORETRY, order, hpage_node);
2230 
2231 			/*
2232 			 * If hugepage allocations are configured to always
2233 			 * synchronous compact or the vma has been madvised
2234 			 * to prefer hugepage backing, retry allowing remote
2235 			 * memory with both reclaim and compact as well.
2236 			 */
2237 			if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2238 				folio = __folio_alloc(gfp, order, hpage_node,
2239 						      nmask);
2240 
2241 			goto out;
2242 		}
2243 	}
2244 
2245 	nmask = policy_nodemask(gfp, pol);
2246 	preferred_nid = policy_node(gfp, pol, node);
2247 	folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2248 	mpol_cond_put(pol);
2249 out:
2250 	return folio;
2251 }
2252 EXPORT_SYMBOL(vma_alloc_folio);
2253 
2254 /**
2255  * alloc_pages - Allocate pages.
2256  * @gfp: GFP flags.
2257  * @order: Power of two of number of pages to allocate.
2258  *
2259  * Allocate 1 << @order contiguous pages.  The physical address of the
2260  * first page is naturally aligned (eg an order-3 allocation will be aligned
2261  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2262  * process is honoured when in process context.
2263  *
2264  * Context: Can be called from any context, providing the appropriate GFP
2265  * flags are used.
2266  * Return: The page on success or NULL if allocation fails.
2267  */
2268 struct page *alloc_pages(gfp_t gfp, unsigned order)
2269 {
2270 	struct mempolicy *pol = &default_policy;
2271 	struct page *page;
2272 
2273 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2274 		pol = get_task_policy(current);
2275 
2276 	/*
2277 	 * No reference counting needed for current->mempolicy
2278 	 * nor system default_policy
2279 	 */
2280 	if (pol->mode == MPOL_INTERLEAVE)
2281 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2282 	else if (pol->mode == MPOL_PREFERRED_MANY)
2283 		page = alloc_pages_preferred_many(gfp, order,
2284 				  policy_node(gfp, pol, numa_node_id()), pol);
2285 	else
2286 		page = __alloc_pages(gfp, order,
2287 				policy_node(gfp, pol, numa_node_id()),
2288 				policy_nodemask(gfp, pol));
2289 
2290 	return page;
2291 }
2292 EXPORT_SYMBOL(alloc_pages);
2293 
2294 struct folio *folio_alloc(gfp_t gfp, unsigned order)
2295 {
2296 	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2297 
2298 	if (page && order > 1)
2299 		prep_transhuge_page(page);
2300 	return (struct folio *)page;
2301 }
2302 EXPORT_SYMBOL(folio_alloc);
2303 
2304 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2305 		struct mempolicy *pol, unsigned long nr_pages,
2306 		struct page **page_array)
2307 {
2308 	int nodes;
2309 	unsigned long nr_pages_per_node;
2310 	int delta;
2311 	int i;
2312 	unsigned long nr_allocated;
2313 	unsigned long total_allocated = 0;
2314 
2315 	nodes = nodes_weight(pol->nodes);
2316 	nr_pages_per_node = nr_pages / nodes;
2317 	delta = nr_pages - nodes * nr_pages_per_node;
2318 
2319 	for (i = 0; i < nodes; i++) {
2320 		if (delta) {
2321 			nr_allocated = __alloc_pages_bulk(gfp,
2322 					interleave_nodes(pol), NULL,
2323 					nr_pages_per_node + 1, NULL,
2324 					page_array);
2325 			delta--;
2326 		} else {
2327 			nr_allocated = __alloc_pages_bulk(gfp,
2328 					interleave_nodes(pol), NULL,
2329 					nr_pages_per_node, NULL, page_array);
2330 		}
2331 
2332 		page_array += nr_allocated;
2333 		total_allocated += nr_allocated;
2334 	}
2335 
2336 	return total_allocated;
2337 }
2338 
2339 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2340 		struct mempolicy *pol, unsigned long nr_pages,
2341 		struct page **page_array)
2342 {
2343 	gfp_t preferred_gfp;
2344 	unsigned long nr_allocated = 0;
2345 
2346 	preferred_gfp = gfp | __GFP_NOWARN;
2347 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2348 
2349 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2350 					   nr_pages, NULL, page_array);
2351 
2352 	if (nr_allocated < nr_pages)
2353 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2354 				nr_pages - nr_allocated, NULL,
2355 				page_array + nr_allocated);
2356 	return nr_allocated;
2357 }
2358 
2359 /* alloc pages bulk and mempolicy should be considered at the
2360  * same time in some situation such as vmalloc.
2361  *
2362  * It can accelerate memory allocation especially interleaving
2363  * allocate memory.
2364  */
2365 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2366 		unsigned long nr_pages, struct page **page_array)
2367 {
2368 	struct mempolicy *pol = &default_policy;
2369 
2370 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2371 		pol = get_task_policy(current);
2372 
2373 	if (pol->mode == MPOL_INTERLEAVE)
2374 		return alloc_pages_bulk_array_interleave(gfp, pol,
2375 							 nr_pages, page_array);
2376 
2377 	if (pol->mode == MPOL_PREFERRED_MANY)
2378 		return alloc_pages_bulk_array_preferred_many(gfp,
2379 				numa_node_id(), pol, nr_pages, page_array);
2380 
2381 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2382 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2383 				  page_array);
2384 }
2385 
2386 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2387 {
2388 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2389 
2390 	if (IS_ERR(pol))
2391 		return PTR_ERR(pol);
2392 	dst->vm_policy = pol;
2393 	return 0;
2394 }
2395 
2396 /*
2397  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2398  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2399  * with the mems_allowed returned by cpuset_mems_allowed().  This
2400  * keeps mempolicies cpuset relative after its cpuset moves.  See
2401  * further kernel/cpuset.c update_nodemask().
2402  *
2403  * current's mempolicy may be rebinded by the other task(the task that changes
2404  * cpuset's mems), so we needn't do rebind work for current task.
2405  */
2406 
2407 /* Slow path of a mempolicy duplicate */
2408 struct mempolicy *__mpol_dup(struct mempolicy *old)
2409 {
2410 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2411 
2412 	if (!new)
2413 		return ERR_PTR(-ENOMEM);
2414 
2415 	/* task's mempolicy is protected by alloc_lock */
2416 	if (old == current->mempolicy) {
2417 		task_lock(current);
2418 		*new = *old;
2419 		task_unlock(current);
2420 	} else
2421 		*new = *old;
2422 
2423 	if (current_cpuset_is_being_rebound()) {
2424 		nodemask_t mems = cpuset_mems_allowed(current);
2425 		mpol_rebind_policy(new, &mems);
2426 	}
2427 	atomic_set(&new->refcnt, 1);
2428 	return new;
2429 }
2430 
2431 /* Slow path of a mempolicy comparison */
2432 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2433 {
2434 	if (!a || !b)
2435 		return false;
2436 	if (a->mode != b->mode)
2437 		return false;
2438 	if (a->flags != b->flags)
2439 		return false;
2440 	if (a->home_node != b->home_node)
2441 		return false;
2442 	if (mpol_store_user_nodemask(a))
2443 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2444 			return false;
2445 
2446 	switch (a->mode) {
2447 	case MPOL_BIND:
2448 	case MPOL_INTERLEAVE:
2449 	case MPOL_PREFERRED:
2450 	case MPOL_PREFERRED_MANY:
2451 		return !!nodes_equal(a->nodes, b->nodes);
2452 	case MPOL_LOCAL:
2453 		return true;
2454 	default:
2455 		BUG();
2456 		return false;
2457 	}
2458 }
2459 
2460 /*
2461  * Shared memory backing store policy support.
2462  *
2463  * Remember policies even when nobody has shared memory mapped.
2464  * The policies are kept in Red-Black tree linked from the inode.
2465  * They are protected by the sp->lock rwlock, which should be held
2466  * for any accesses to the tree.
2467  */
2468 
2469 /*
2470  * lookup first element intersecting start-end.  Caller holds sp->lock for
2471  * reading or for writing
2472  */
2473 static struct sp_node *
2474 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2475 {
2476 	struct rb_node *n = sp->root.rb_node;
2477 
2478 	while (n) {
2479 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2480 
2481 		if (start >= p->end)
2482 			n = n->rb_right;
2483 		else if (end <= p->start)
2484 			n = n->rb_left;
2485 		else
2486 			break;
2487 	}
2488 	if (!n)
2489 		return NULL;
2490 	for (;;) {
2491 		struct sp_node *w = NULL;
2492 		struct rb_node *prev = rb_prev(n);
2493 		if (!prev)
2494 			break;
2495 		w = rb_entry(prev, struct sp_node, nd);
2496 		if (w->end <= start)
2497 			break;
2498 		n = prev;
2499 	}
2500 	return rb_entry(n, struct sp_node, nd);
2501 }
2502 
2503 /*
2504  * Insert a new shared policy into the list.  Caller holds sp->lock for
2505  * writing.
2506  */
2507 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2508 {
2509 	struct rb_node **p = &sp->root.rb_node;
2510 	struct rb_node *parent = NULL;
2511 	struct sp_node *nd;
2512 
2513 	while (*p) {
2514 		parent = *p;
2515 		nd = rb_entry(parent, struct sp_node, nd);
2516 		if (new->start < nd->start)
2517 			p = &(*p)->rb_left;
2518 		else if (new->end > nd->end)
2519 			p = &(*p)->rb_right;
2520 		else
2521 			BUG();
2522 	}
2523 	rb_link_node(&new->nd, parent, p);
2524 	rb_insert_color(&new->nd, &sp->root);
2525 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2526 		 new->policy ? new->policy->mode : 0);
2527 }
2528 
2529 /* Find shared policy intersecting idx */
2530 struct mempolicy *
2531 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2532 {
2533 	struct mempolicy *pol = NULL;
2534 	struct sp_node *sn;
2535 
2536 	if (!sp->root.rb_node)
2537 		return NULL;
2538 	read_lock(&sp->lock);
2539 	sn = sp_lookup(sp, idx, idx+1);
2540 	if (sn) {
2541 		mpol_get(sn->policy);
2542 		pol = sn->policy;
2543 	}
2544 	read_unlock(&sp->lock);
2545 	return pol;
2546 }
2547 
2548 static void sp_free(struct sp_node *n)
2549 {
2550 	mpol_put(n->policy);
2551 	kmem_cache_free(sn_cache, n);
2552 }
2553 
2554 /**
2555  * mpol_misplaced - check whether current page node is valid in policy
2556  *
2557  * @page: page to be checked
2558  * @vma: vm area where page mapped
2559  * @addr: virtual address where page mapped
2560  *
2561  * Lookup current policy node id for vma,addr and "compare to" page's
2562  * node id.  Policy determination "mimics" alloc_page_vma().
2563  * Called from fault path where we know the vma and faulting address.
2564  *
2565  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2566  * policy, or a suitable node ID to allocate a replacement page from.
2567  */
2568 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2569 {
2570 	struct mempolicy *pol;
2571 	struct zoneref *z;
2572 	int curnid = page_to_nid(page);
2573 	unsigned long pgoff;
2574 	int thiscpu = raw_smp_processor_id();
2575 	int thisnid = cpu_to_node(thiscpu);
2576 	int polnid = NUMA_NO_NODE;
2577 	int ret = NUMA_NO_NODE;
2578 
2579 	pol = get_vma_policy(vma, addr);
2580 	if (!(pol->flags & MPOL_F_MOF))
2581 		goto out;
2582 
2583 	switch (pol->mode) {
2584 	case MPOL_INTERLEAVE:
2585 		pgoff = vma->vm_pgoff;
2586 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2587 		polnid = offset_il_node(pol, pgoff);
2588 		break;
2589 
2590 	case MPOL_PREFERRED:
2591 		if (node_isset(curnid, pol->nodes))
2592 			goto out;
2593 		polnid = first_node(pol->nodes);
2594 		break;
2595 
2596 	case MPOL_LOCAL:
2597 		polnid = numa_node_id();
2598 		break;
2599 
2600 	case MPOL_BIND:
2601 		/* Optimize placement among multiple nodes via NUMA balancing */
2602 		if (pol->flags & MPOL_F_MORON) {
2603 			if (node_isset(thisnid, pol->nodes))
2604 				break;
2605 			goto out;
2606 		}
2607 		fallthrough;
2608 
2609 	case MPOL_PREFERRED_MANY:
2610 		/*
2611 		 * use current page if in policy nodemask,
2612 		 * else select nearest allowed node, if any.
2613 		 * If no allowed nodes, use current [!misplaced].
2614 		 */
2615 		if (node_isset(curnid, pol->nodes))
2616 			goto out;
2617 		z = first_zones_zonelist(
2618 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2619 				gfp_zone(GFP_HIGHUSER),
2620 				&pol->nodes);
2621 		polnid = zone_to_nid(z->zone);
2622 		break;
2623 
2624 	default:
2625 		BUG();
2626 	}
2627 
2628 	/* Migrate the page towards the node whose CPU is referencing it */
2629 	if (pol->flags & MPOL_F_MORON) {
2630 		polnid = thisnid;
2631 
2632 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2633 			goto out;
2634 	}
2635 
2636 	if (curnid != polnid)
2637 		ret = polnid;
2638 out:
2639 	mpol_cond_put(pol);
2640 
2641 	return ret;
2642 }
2643 
2644 /*
2645  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2646  * dropped after task->mempolicy is set to NULL so that any allocation done as
2647  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2648  * policy.
2649  */
2650 void mpol_put_task_policy(struct task_struct *task)
2651 {
2652 	struct mempolicy *pol;
2653 
2654 	task_lock(task);
2655 	pol = task->mempolicy;
2656 	task->mempolicy = NULL;
2657 	task_unlock(task);
2658 	mpol_put(pol);
2659 }
2660 
2661 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2662 {
2663 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2664 	rb_erase(&n->nd, &sp->root);
2665 	sp_free(n);
2666 }
2667 
2668 static void sp_node_init(struct sp_node *node, unsigned long start,
2669 			unsigned long end, struct mempolicy *pol)
2670 {
2671 	node->start = start;
2672 	node->end = end;
2673 	node->policy = pol;
2674 }
2675 
2676 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2677 				struct mempolicy *pol)
2678 {
2679 	struct sp_node *n;
2680 	struct mempolicy *newpol;
2681 
2682 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2683 	if (!n)
2684 		return NULL;
2685 
2686 	newpol = mpol_dup(pol);
2687 	if (IS_ERR(newpol)) {
2688 		kmem_cache_free(sn_cache, n);
2689 		return NULL;
2690 	}
2691 	newpol->flags |= MPOL_F_SHARED;
2692 	sp_node_init(n, start, end, newpol);
2693 
2694 	return n;
2695 }
2696 
2697 /* Replace a policy range. */
2698 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2699 				 unsigned long end, struct sp_node *new)
2700 {
2701 	struct sp_node *n;
2702 	struct sp_node *n_new = NULL;
2703 	struct mempolicy *mpol_new = NULL;
2704 	int ret = 0;
2705 
2706 restart:
2707 	write_lock(&sp->lock);
2708 	n = sp_lookup(sp, start, end);
2709 	/* Take care of old policies in the same range. */
2710 	while (n && n->start < end) {
2711 		struct rb_node *next = rb_next(&n->nd);
2712 		if (n->start >= start) {
2713 			if (n->end <= end)
2714 				sp_delete(sp, n);
2715 			else
2716 				n->start = end;
2717 		} else {
2718 			/* Old policy spanning whole new range. */
2719 			if (n->end > end) {
2720 				if (!n_new)
2721 					goto alloc_new;
2722 
2723 				*mpol_new = *n->policy;
2724 				atomic_set(&mpol_new->refcnt, 1);
2725 				sp_node_init(n_new, end, n->end, mpol_new);
2726 				n->end = start;
2727 				sp_insert(sp, n_new);
2728 				n_new = NULL;
2729 				mpol_new = NULL;
2730 				break;
2731 			} else
2732 				n->end = start;
2733 		}
2734 		if (!next)
2735 			break;
2736 		n = rb_entry(next, struct sp_node, nd);
2737 	}
2738 	if (new)
2739 		sp_insert(sp, new);
2740 	write_unlock(&sp->lock);
2741 	ret = 0;
2742 
2743 err_out:
2744 	if (mpol_new)
2745 		mpol_put(mpol_new);
2746 	if (n_new)
2747 		kmem_cache_free(sn_cache, n_new);
2748 
2749 	return ret;
2750 
2751 alloc_new:
2752 	write_unlock(&sp->lock);
2753 	ret = -ENOMEM;
2754 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2755 	if (!n_new)
2756 		goto err_out;
2757 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2758 	if (!mpol_new)
2759 		goto err_out;
2760 	atomic_set(&mpol_new->refcnt, 1);
2761 	goto restart;
2762 }
2763 
2764 /**
2765  * mpol_shared_policy_init - initialize shared policy for inode
2766  * @sp: pointer to inode shared policy
2767  * @mpol:  struct mempolicy to install
2768  *
2769  * Install non-NULL @mpol in inode's shared policy rb-tree.
2770  * On entry, the current task has a reference on a non-NULL @mpol.
2771  * This must be released on exit.
2772  * This is called at get_inode() calls and we can use GFP_KERNEL.
2773  */
2774 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2775 {
2776 	int ret;
2777 
2778 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2779 	rwlock_init(&sp->lock);
2780 
2781 	if (mpol) {
2782 		struct vm_area_struct pvma;
2783 		struct mempolicy *new;
2784 		NODEMASK_SCRATCH(scratch);
2785 
2786 		if (!scratch)
2787 			goto put_mpol;
2788 		/* contextualize the tmpfs mount point mempolicy */
2789 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2790 		if (IS_ERR(new))
2791 			goto free_scratch; /* no valid nodemask intersection */
2792 
2793 		task_lock(current);
2794 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2795 		task_unlock(current);
2796 		if (ret)
2797 			goto put_new;
2798 
2799 		/* Create pseudo-vma that contains just the policy */
2800 		vma_init(&pvma, NULL);
2801 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2802 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2803 
2804 put_new:
2805 		mpol_put(new);			/* drop initial ref */
2806 free_scratch:
2807 		NODEMASK_SCRATCH_FREE(scratch);
2808 put_mpol:
2809 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2810 	}
2811 }
2812 
2813 int mpol_set_shared_policy(struct shared_policy *info,
2814 			struct vm_area_struct *vma, struct mempolicy *npol)
2815 {
2816 	int err;
2817 	struct sp_node *new = NULL;
2818 	unsigned long sz = vma_pages(vma);
2819 
2820 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2821 		 vma->vm_pgoff,
2822 		 sz, npol ? npol->mode : -1,
2823 		 npol ? npol->flags : -1,
2824 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2825 
2826 	if (npol) {
2827 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2828 		if (!new)
2829 			return -ENOMEM;
2830 	}
2831 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2832 	if (err && new)
2833 		sp_free(new);
2834 	return err;
2835 }
2836 
2837 /* Free a backing policy store on inode delete. */
2838 void mpol_free_shared_policy(struct shared_policy *p)
2839 {
2840 	struct sp_node *n;
2841 	struct rb_node *next;
2842 
2843 	if (!p->root.rb_node)
2844 		return;
2845 	write_lock(&p->lock);
2846 	next = rb_first(&p->root);
2847 	while (next) {
2848 		n = rb_entry(next, struct sp_node, nd);
2849 		next = rb_next(&n->nd);
2850 		sp_delete(p, n);
2851 	}
2852 	write_unlock(&p->lock);
2853 }
2854 
2855 #ifdef CONFIG_NUMA_BALANCING
2856 static int __initdata numabalancing_override;
2857 
2858 static void __init check_numabalancing_enable(void)
2859 {
2860 	bool numabalancing_default = false;
2861 
2862 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2863 		numabalancing_default = true;
2864 
2865 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2866 	if (numabalancing_override)
2867 		set_numabalancing_state(numabalancing_override == 1);
2868 
2869 	if (num_online_nodes() > 1 && !numabalancing_override) {
2870 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2871 			numabalancing_default ? "Enabling" : "Disabling");
2872 		set_numabalancing_state(numabalancing_default);
2873 	}
2874 }
2875 
2876 static int __init setup_numabalancing(char *str)
2877 {
2878 	int ret = 0;
2879 	if (!str)
2880 		goto out;
2881 
2882 	if (!strcmp(str, "enable")) {
2883 		numabalancing_override = 1;
2884 		ret = 1;
2885 	} else if (!strcmp(str, "disable")) {
2886 		numabalancing_override = -1;
2887 		ret = 1;
2888 	}
2889 out:
2890 	if (!ret)
2891 		pr_warn("Unable to parse numa_balancing=\n");
2892 
2893 	return ret;
2894 }
2895 __setup("numa_balancing=", setup_numabalancing);
2896 #else
2897 static inline void __init check_numabalancing_enable(void)
2898 {
2899 }
2900 #endif /* CONFIG_NUMA_BALANCING */
2901 
2902 /* assumes fs == KERNEL_DS */
2903 void __init numa_policy_init(void)
2904 {
2905 	nodemask_t interleave_nodes;
2906 	unsigned long largest = 0;
2907 	int nid, prefer = 0;
2908 
2909 	policy_cache = kmem_cache_create("numa_policy",
2910 					 sizeof(struct mempolicy),
2911 					 0, SLAB_PANIC, NULL);
2912 
2913 	sn_cache = kmem_cache_create("shared_policy_node",
2914 				     sizeof(struct sp_node),
2915 				     0, SLAB_PANIC, NULL);
2916 
2917 	for_each_node(nid) {
2918 		preferred_node_policy[nid] = (struct mempolicy) {
2919 			.refcnt = ATOMIC_INIT(1),
2920 			.mode = MPOL_PREFERRED,
2921 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2922 			.nodes = nodemask_of_node(nid),
2923 		};
2924 	}
2925 
2926 	/*
2927 	 * Set interleaving policy for system init. Interleaving is only
2928 	 * enabled across suitably sized nodes (default is >= 16MB), or
2929 	 * fall back to the largest node if they're all smaller.
2930 	 */
2931 	nodes_clear(interleave_nodes);
2932 	for_each_node_state(nid, N_MEMORY) {
2933 		unsigned long total_pages = node_present_pages(nid);
2934 
2935 		/* Preserve the largest node */
2936 		if (largest < total_pages) {
2937 			largest = total_pages;
2938 			prefer = nid;
2939 		}
2940 
2941 		/* Interleave this node? */
2942 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2943 			node_set(nid, interleave_nodes);
2944 	}
2945 
2946 	/* All too small, use the largest */
2947 	if (unlikely(nodes_empty(interleave_nodes)))
2948 		node_set(prefer, interleave_nodes);
2949 
2950 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2951 		pr_err("%s: interleaving failed\n", __func__);
2952 
2953 	check_numabalancing_enable();
2954 }
2955 
2956 /* Reset policy of current process to default */
2957 void numa_default_policy(void)
2958 {
2959 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2960 }
2961 
2962 /*
2963  * Parse and format mempolicy from/to strings
2964  */
2965 
2966 static const char * const policy_modes[] =
2967 {
2968 	[MPOL_DEFAULT]    = "default",
2969 	[MPOL_PREFERRED]  = "prefer",
2970 	[MPOL_BIND]       = "bind",
2971 	[MPOL_INTERLEAVE] = "interleave",
2972 	[MPOL_LOCAL]      = "local",
2973 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2974 };
2975 
2976 
2977 #ifdef CONFIG_TMPFS
2978 /**
2979  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2980  * @str:  string containing mempolicy to parse
2981  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2982  *
2983  * Format of input:
2984  *	<mode>[=<flags>][:<nodelist>]
2985  *
2986  * Return: %0 on success, else %1
2987  */
2988 int mpol_parse_str(char *str, struct mempolicy **mpol)
2989 {
2990 	struct mempolicy *new = NULL;
2991 	unsigned short mode_flags;
2992 	nodemask_t nodes;
2993 	char *nodelist = strchr(str, ':');
2994 	char *flags = strchr(str, '=');
2995 	int err = 1, mode;
2996 
2997 	if (flags)
2998 		*flags++ = '\0';	/* terminate mode string */
2999 
3000 	if (nodelist) {
3001 		/* NUL-terminate mode or flags string */
3002 		*nodelist++ = '\0';
3003 		if (nodelist_parse(nodelist, nodes))
3004 			goto out;
3005 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
3006 			goto out;
3007 	} else
3008 		nodes_clear(nodes);
3009 
3010 	mode = match_string(policy_modes, MPOL_MAX, str);
3011 	if (mode < 0)
3012 		goto out;
3013 
3014 	switch (mode) {
3015 	case MPOL_PREFERRED:
3016 		/*
3017 		 * Insist on a nodelist of one node only, although later
3018 		 * we use first_node(nodes) to grab a single node, so here
3019 		 * nodelist (or nodes) cannot be empty.
3020 		 */
3021 		if (nodelist) {
3022 			char *rest = nodelist;
3023 			while (isdigit(*rest))
3024 				rest++;
3025 			if (*rest)
3026 				goto out;
3027 			if (nodes_empty(nodes))
3028 				goto out;
3029 		}
3030 		break;
3031 	case MPOL_INTERLEAVE:
3032 		/*
3033 		 * Default to online nodes with memory if no nodelist
3034 		 */
3035 		if (!nodelist)
3036 			nodes = node_states[N_MEMORY];
3037 		break;
3038 	case MPOL_LOCAL:
3039 		/*
3040 		 * Don't allow a nodelist;  mpol_new() checks flags
3041 		 */
3042 		if (nodelist)
3043 			goto out;
3044 		break;
3045 	case MPOL_DEFAULT:
3046 		/*
3047 		 * Insist on a empty nodelist
3048 		 */
3049 		if (!nodelist)
3050 			err = 0;
3051 		goto out;
3052 	case MPOL_PREFERRED_MANY:
3053 	case MPOL_BIND:
3054 		/*
3055 		 * Insist on a nodelist
3056 		 */
3057 		if (!nodelist)
3058 			goto out;
3059 	}
3060 
3061 	mode_flags = 0;
3062 	if (flags) {
3063 		/*
3064 		 * Currently, we only support two mutually exclusive
3065 		 * mode flags.
3066 		 */
3067 		if (!strcmp(flags, "static"))
3068 			mode_flags |= MPOL_F_STATIC_NODES;
3069 		else if (!strcmp(flags, "relative"))
3070 			mode_flags |= MPOL_F_RELATIVE_NODES;
3071 		else
3072 			goto out;
3073 	}
3074 
3075 	new = mpol_new(mode, mode_flags, &nodes);
3076 	if (IS_ERR(new))
3077 		goto out;
3078 
3079 	/*
3080 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3081 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3082 	 */
3083 	if (mode != MPOL_PREFERRED) {
3084 		new->nodes = nodes;
3085 	} else if (nodelist) {
3086 		nodes_clear(new->nodes);
3087 		node_set(first_node(nodes), new->nodes);
3088 	} else {
3089 		new->mode = MPOL_LOCAL;
3090 	}
3091 
3092 	/*
3093 	 * Save nodes for contextualization: this will be used to "clone"
3094 	 * the mempolicy in a specific context [cpuset] at a later time.
3095 	 */
3096 	new->w.user_nodemask = nodes;
3097 
3098 	err = 0;
3099 
3100 out:
3101 	/* Restore string for error message */
3102 	if (nodelist)
3103 		*--nodelist = ':';
3104 	if (flags)
3105 		*--flags = '=';
3106 	if (!err)
3107 		*mpol = new;
3108 	return err;
3109 }
3110 #endif /* CONFIG_TMPFS */
3111 
3112 /**
3113  * mpol_to_str - format a mempolicy structure for printing
3114  * @buffer:  to contain formatted mempolicy string
3115  * @maxlen:  length of @buffer
3116  * @pol:  pointer to mempolicy to be formatted
3117  *
3118  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3119  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3120  * longest flag, "relative", and to display at least a few node ids.
3121  */
3122 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3123 {
3124 	char *p = buffer;
3125 	nodemask_t nodes = NODE_MASK_NONE;
3126 	unsigned short mode = MPOL_DEFAULT;
3127 	unsigned short flags = 0;
3128 
3129 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3130 		mode = pol->mode;
3131 		flags = pol->flags;
3132 	}
3133 
3134 	switch (mode) {
3135 	case MPOL_DEFAULT:
3136 	case MPOL_LOCAL:
3137 		break;
3138 	case MPOL_PREFERRED:
3139 	case MPOL_PREFERRED_MANY:
3140 	case MPOL_BIND:
3141 	case MPOL_INTERLEAVE:
3142 		nodes = pol->nodes;
3143 		break;
3144 	default:
3145 		WARN_ON_ONCE(1);
3146 		snprintf(p, maxlen, "unknown");
3147 		return;
3148 	}
3149 
3150 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3151 
3152 	if (flags & MPOL_MODE_FLAGS) {
3153 		p += snprintf(p, buffer + maxlen - p, "=");
3154 
3155 		/*
3156 		 * Currently, the only defined flags are mutually exclusive
3157 		 */
3158 		if (flags & MPOL_F_STATIC_NODES)
3159 			p += snprintf(p, buffer + maxlen - p, "static");
3160 		else if (flags & MPOL_F_RELATIVE_NODES)
3161 			p += snprintf(p, buffer + maxlen - p, "relative");
3162 	}
3163 
3164 	if (!nodes_empty(nodes))
3165 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3166 			       nodemask_pr_args(&nodes));
3167 }
3168