xref: /openbmc/linux/mm/mlock.c (revision a6978d1b7bb8f3a25305e8ff7d367f7289614c5d)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *	linux/mm/mlock.c
4   *
5   *  (C) Copyright 1995 Linus Torvalds
6   *  (C) Copyright 2002 Christoph Hellwig
7   */
8  
9  #include <linux/capability.h>
10  #include <linux/mman.h>
11  #include <linux/mm.h>
12  #include <linux/sched/user.h>
13  #include <linux/swap.h>
14  #include <linux/swapops.h>
15  #include <linux/pagemap.h>
16  #include <linux/pagevec.h>
17  #include <linux/pagewalk.h>
18  #include <linux/mempolicy.h>
19  #include <linux/syscalls.h>
20  #include <linux/sched.h>
21  #include <linux/export.h>
22  #include <linux/rmap.h>
23  #include <linux/mmzone.h>
24  #include <linux/hugetlb.h>
25  #include <linux/memcontrol.h>
26  #include <linux/mm_inline.h>
27  #include <linux/secretmem.h>
28  
29  #include "internal.h"
30  
31  struct mlock_fbatch {
32  	local_lock_t lock;
33  	struct folio_batch fbatch;
34  };
35  
36  static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
37  	.lock = INIT_LOCAL_LOCK(lock),
38  };
39  
40  bool can_do_mlock(void)
41  {
42  	if (rlimit(RLIMIT_MEMLOCK) != 0)
43  		return true;
44  	if (capable(CAP_IPC_LOCK))
45  		return true;
46  	return false;
47  }
48  EXPORT_SYMBOL(can_do_mlock);
49  
50  /*
51   * Mlocked folios are marked with the PG_mlocked flag for efficient testing
52   * in vmscan and, possibly, the fault path; and to support semi-accurate
53   * statistics.
54   *
55   * An mlocked folio [folio_test_mlocked(folio)] is unevictable.  As such, it
56   * will be ostensibly placed on the LRU "unevictable" list (actually no such
57   * list exists), rather than the [in]active lists. PG_unevictable is set to
58   * indicate the unevictable state.
59   */
60  
61  static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
62  {
63  	/* There is nothing more we can do while it's off LRU */
64  	if (!folio_test_clear_lru(folio))
65  		return lruvec;
66  
67  	lruvec = folio_lruvec_relock_irq(folio, lruvec);
68  
69  	if (unlikely(folio_evictable(folio))) {
70  		/*
71  		 * This is a little surprising, but quite possible: PG_mlocked
72  		 * must have got cleared already by another CPU.  Could this
73  		 * folio be unevictable?  I'm not sure, but move it now if so.
74  		 */
75  		if (folio_test_unevictable(folio)) {
76  			lruvec_del_folio(lruvec, folio);
77  			folio_clear_unevictable(folio);
78  			lruvec_add_folio(lruvec, folio);
79  
80  			__count_vm_events(UNEVICTABLE_PGRESCUED,
81  					  folio_nr_pages(folio));
82  		}
83  		goto out;
84  	}
85  
86  	if (folio_test_unevictable(folio)) {
87  		if (folio_test_mlocked(folio))
88  			folio->mlock_count++;
89  		goto out;
90  	}
91  
92  	lruvec_del_folio(lruvec, folio);
93  	folio_clear_active(folio);
94  	folio_set_unevictable(folio);
95  	folio->mlock_count = !!folio_test_mlocked(folio);
96  	lruvec_add_folio(lruvec, folio);
97  	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
98  out:
99  	folio_set_lru(folio);
100  	return lruvec;
101  }
102  
103  static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
104  {
105  	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
106  
107  	lruvec = folio_lruvec_relock_irq(folio, lruvec);
108  
109  	/* As above, this is a little surprising, but possible */
110  	if (unlikely(folio_evictable(folio)))
111  		goto out;
112  
113  	folio_set_unevictable(folio);
114  	folio->mlock_count = !!folio_test_mlocked(folio);
115  	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
116  out:
117  	lruvec_add_folio(lruvec, folio);
118  	folio_set_lru(folio);
119  	return lruvec;
120  }
121  
122  static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
123  {
124  	int nr_pages = folio_nr_pages(folio);
125  	bool isolated = false;
126  
127  	if (!folio_test_clear_lru(folio))
128  		goto munlock;
129  
130  	isolated = true;
131  	lruvec = folio_lruvec_relock_irq(folio, lruvec);
132  
133  	if (folio_test_unevictable(folio)) {
134  		/* Then mlock_count is maintained, but might undercount */
135  		if (folio->mlock_count)
136  			folio->mlock_count--;
137  		if (folio->mlock_count)
138  			goto out;
139  	}
140  	/* else assume that was the last mlock: reclaim will fix it if not */
141  
142  munlock:
143  	if (folio_test_clear_mlocked(folio)) {
144  		__zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
145  		if (isolated || !folio_test_unevictable(folio))
146  			__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147  		else
148  			__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149  	}
150  
151  	/* folio_evictable() has to be checked *after* clearing Mlocked */
152  	if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
153  		lruvec_del_folio(lruvec, folio);
154  		folio_clear_unevictable(folio);
155  		lruvec_add_folio(lruvec, folio);
156  		__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157  	}
158  out:
159  	if (isolated)
160  		folio_set_lru(folio);
161  	return lruvec;
162  }
163  
164  /*
165   * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
166   */
167  #define LRU_FOLIO 0x1
168  #define NEW_FOLIO 0x2
169  static inline struct folio *mlock_lru(struct folio *folio)
170  {
171  	return (struct folio *)((unsigned long)folio + LRU_FOLIO);
172  }
173  
174  static inline struct folio *mlock_new(struct folio *folio)
175  {
176  	return (struct folio *)((unsigned long)folio + NEW_FOLIO);
177  }
178  
179  /*
180   * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181   * make use of such folio pointer flags in future, but for now just keep it for
182   * mlock.  We could use three separate folio batches instead, but one feels
183   * better (munlocking a full folio batch does not need to drain mlocking folio
184   * batches first).
185   */
186  static void mlock_folio_batch(struct folio_batch *fbatch)
187  {
188  	struct lruvec *lruvec = NULL;
189  	unsigned long mlock;
190  	struct folio *folio;
191  	int i;
192  
193  	for (i = 0; i < folio_batch_count(fbatch); i++) {
194  		folio = fbatch->folios[i];
195  		mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196  		folio = (struct folio *)((unsigned long)folio - mlock);
197  		fbatch->folios[i] = folio;
198  
199  		if (mlock & LRU_FOLIO)
200  			lruvec = __mlock_folio(folio, lruvec);
201  		else if (mlock & NEW_FOLIO)
202  			lruvec = __mlock_new_folio(folio, lruvec);
203  		else
204  			lruvec = __munlock_folio(folio, lruvec);
205  	}
206  
207  	if (lruvec)
208  		unlock_page_lruvec_irq(lruvec);
209  	folios_put(fbatch->folios, folio_batch_count(fbatch));
210  	folio_batch_reinit(fbatch);
211  }
212  
213  void mlock_drain_local(void)
214  {
215  	struct folio_batch *fbatch;
216  
217  	local_lock(&mlock_fbatch.lock);
218  	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
219  	if (folio_batch_count(fbatch))
220  		mlock_folio_batch(fbatch);
221  	local_unlock(&mlock_fbatch.lock);
222  }
223  
224  void mlock_drain_remote(int cpu)
225  {
226  	struct folio_batch *fbatch;
227  
228  	WARN_ON_ONCE(cpu_online(cpu));
229  	fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
230  	if (folio_batch_count(fbatch))
231  		mlock_folio_batch(fbatch);
232  }
233  
234  bool need_mlock_drain(int cpu)
235  {
236  	return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
237  }
238  
239  /**
240   * mlock_folio - mlock a folio already on (or temporarily off) LRU
241   * @folio: folio to be mlocked.
242   */
243  void mlock_folio(struct folio *folio)
244  {
245  	struct folio_batch *fbatch;
246  
247  	local_lock(&mlock_fbatch.lock);
248  	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
249  
250  	if (!folio_test_set_mlocked(folio)) {
251  		int nr_pages = folio_nr_pages(folio);
252  
253  		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
254  		__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
255  	}
256  
257  	folio_get(folio);
258  	if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
259  	    folio_test_large(folio) || lru_cache_disabled())
260  		mlock_folio_batch(fbatch);
261  	local_unlock(&mlock_fbatch.lock);
262  }
263  
264  /**
265   * mlock_new_folio - mlock a newly allocated folio not yet on LRU
266   * @folio: folio to be mlocked, either normal or a THP head.
267   */
268  void mlock_new_folio(struct folio *folio)
269  {
270  	struct folio_batch *fbatch;
271  	int nr_pages = folio_nr_pages(folio);
272  
273  	local_lock(&mlock_fbatch.lock);
274  	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
275  	folio_set_mlocked(folio);
276  
277  	zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
278  	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
279  
280  	folio_get(folio);
281  	if (!folio_batch_add(fbatch, mlock_new(folio)) ||
282  	    folio_test_large(folio) || lru_cache_disabled())
283  		mlock_folio_batch(fbatch);
284  	local_unlock(&mlock_fbatch.lock);
285  }
286  
287  /**
288   * munlock_folio - munlock a folio
289   * @folio: folio to be munlocked, either normal or a THP head.
290   */
291  void munlock_folio(struct folio *folio)
292  {
293  	struct folio_batch *fbatch;
294  
295  	local_lock(&mlock_fbatch.lock);
296  	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
297  	/*
298  	 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
299  	 * which will check whether the folio is multiply mlocked.
300  	 */
301  	folio_get(folio);
302  	if (!folio_batch_add(fbatch, folio) ||
303  	    folio_test_large(folio) || lru_cache_disabled())
304  		mlock_folio_batch(fbatch);
305  	local_unlock(&mlock_fbatch.lock);
306  }
307  
308  static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
309  			   unsigned long end, struct mm_walk *walk)
310  
311  {
312  	struct vm_area_struct *vma = walk->vma;
313  	spinlock_t *ptl;
314  	pte_t *start_pte, *pte;
315  	pte_t ptent;
316  	struct folio *folio;
317  
318  	ptl = pmd_trans_huge_lock(pmd, vma);
319  	if (ptl) {
320  		if (!pmd_present(*pmd))
321  			goto out;
322  		if (is_huge_zero_pmd(*pmd))
323  			goto out;
324  		folio = page_folio(pmd_page(*pmd));
325  		if (vma->vm_flags & VM_LOCKED)
326  			mlock_folio(folio);
327  		else
328  			munlock_folio(folio);
329  		goto out;
330  	}
331  
332  	start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
333  	if (!start_pte) {
334  		walk->action = ACTION_AGAIN;
335  		return 0;
336  	}
337  	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
338  		ptent = ptep_get(pte);
339  		if (!pte_present(ptent))
340  			continue;
341  		folio = vm_normal_folio(vma, addr, ptent);
342  		if (!folio || folio_is_zone_device(folio))
343  			continue;
344  		if (folio_test_large(folio))
345  			continue;
346  		if (vma->vm_flags & VM_LOCKED)
347  			mlock_folio(folio);
348  		else
349  			munlock_folio(folio);
350  	}
351  	pte_unmap(start_pte);
352  out:
353  	spin_unlock(ptl);
354  	cond_resched();
355  	return 0;
356  }
357  
358  /*
359   * mlock_vma_pages_range() - mlock any pages already in the range,
360   *                           or munlock all pages in the range.
361   * @vma - vma containing range to be mlock()ed or munlock()ed
362   * @start - start address in @vma of the range
363   * @end - end of range in @vma
364   * @newflags - the new set of flags for @vma.
365   *
366   * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
367   * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
368   */
369  static void mlock_vma_pages_range(struct vm_area_struct *vma,
370  	unsigned long start, unsigned long end, vm_flags_t newflags)
371  {
372  	static const struct mm_walk_ops mlock_walk_ops = {
373  		.pmd_entry = mlock_pte_range,
374  		.walk_lock = PGWALK_WRLOCK_VERIFY,
375  	};
376  
377  	/*
378  	 * There is a slight chance that concurrent page migration,
379  	 * or page reclaim finding a page of this now-VM_LOCKED vma,
380  	 * will call mlock_vma_folio() and raise page's mlock_count:
381  	 * double counting, leaving the page unevictable indefinitely.
382  	 * Communicate this danger to mlock_vma_folio() with VM_IO,
383  	 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
384  	 * mmap_lock is held in write mode here, so this weird
385  	 * combination should not be visible to other mmap_lock users;
386  	 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
387  	 */
388  	if (newflags & VM_LOCKED)
389  		newflags |= VM_IO;
390  	vma_start_write(vma);
391  	vm_flags_reset_once(vma, newflags);
392  
393  	lru_add_drain();
394  	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
395  	lru_add_drain();
396  
397  	if (newflags & VM_IO) {
398  		newflags &= ~VM_IO;
399  		vm_flags_reset_once(vma, newflags);
400  	}
401  }
402  
403  /*
404   * mlock_fixup  - handle mlock[all]/munlock[all] requests.
405   *
406   * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
407   * munlock is a no-op.  However, for some special vmas, we go ahead and
408   * populate the ptes.
409   *
410   * For vmas that pass the filters, merge/split as appropriate.
411   */
412  static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
413  	       struct vm_area_struct **prev, unsigned long start,
414  	       unsigned long end, vm_flags_t newflags)
415  {
416  	struct mm_struct *mm = vma->vm_mm;
417  	pgoff_t pgoff;
418  	int nr_pages;
419  	int ret = 0;
420  	vm_flags_t oldflags = vma->vm_flags;
421  
422  	if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
423  	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
424  	    vma_is_dax(vma) || vma_is_secretmem(vma))
425  		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
426  		goto out;
427  
428  	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
429  	*prev = vma_merge(vmi, mm, *prev, start, end, newflags,
430  			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
431  			vma->vm_userfaultfd_ctx, anon_vma_name(vma));
432  	if (*prev) {
433  		vma = *prev;
434  		goto success;
435  	}
436  
437  	if (start != vma->vm_start) {
438  		ret = split_vma(vmi, vma, start, 1);
439  		if (ret)
440  			goto out;
441  	}
442  
443  	if (end != vma->vm_end) {
444  		ret = split_vma(vmi, vma, end, 0);
445  		if (ret)
446  			goto out;
447  	}
448  
449  success:
450  	/*
451  	 * Keep track of amount of locked VM.
452  	 */
453  	nr_pages = (end - start) >> PAGE_SHIFT;
454  	if (!(newflags & VM_LOCKED))
455  		nr_pages = -nr_pages;
456  	else if (oldflags & VM_LOCKED)
457  		nr_pages = 0;
458  	mm->locked_vm += nr_pages;
459  
460  	/*
461  	 * vm_flags is protected by the mmap_lock held in write mode.
462  	 * It's okay if try_to_unmap_one unmaps a page just after we
463  	 * set VM_LOCKED, populate_vma_page_range will bring it back.
464  	 */
465  	if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
466  		/* No work to do, and mlocking twice would be wrong */
467  		vma_start_write(vma);
468  		vm_flags_reset(vma, newflags);
469  	} else {
470  		mlock_vma_pages_range(vma, start, end, newflags);
471  	}
472  out:
473  	*prev = vma;
474  	return ret;
475  }
476  
477  static int apply_vma_lock_flags(unsigned long start, size_t len,
478  				vm_flags_t flags)
479  {
480  	unsigned long nstart, end, tmp;
481  	struct vm_area_struct *vma, *prev;
482  	VMA_ITERATOR(vmi, current->mm, start);
483  
484  	VM_BUG_ON(offset_in_page(start));
485  	VM_BUG_ON(len != PAGE_ALIGN(len));
486  	end = start + len;
487  	if (end < start)
488  		return -EINVAL;
489  	if (end == start)
490  		return 0;
491  	vma = vma_iter_load(&vmi);
492  	if (!vma)
493  		return -ENOMEM;
494  
495  	prev = vma_prev(&vmi);
496  	if (start > vma->vm_start)
497  		prev = vma;
498  
499  	nstart = start;
500  	tmp = vma->vm_start;
501  	for_each_vma_range(vmi, vma, end) {
502  		int error;
503  		vm_flags_t newflags;
504  
505  		if (vma->vm_start != tmp)
506  			return -ENOMEM;
507  
508  		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
509  		newflags |= flags;
510  		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
511  		tmp = vma->vm_end;
512  		if (tmp > end)
513  			tmp = end;
514  		error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
515  		if (error)
516  			return error;
517  		tmp = vma_iter_end(&vmi);
518  		nstart = tmp;
519  	}
520  
521  	if (tmp < end)
522  		return -ENOMEM;
523  
524  	return 0;
525  }
526  
527  /*
528   * Go through vma areas and sum size of mlocked
529   * vma pages, as return value.
530   * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
531   * is also counted.
532   * Return value: previously mlocked page counts
533   */
534  static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
535  		unsigned long start, size_t len)
536  {
537  	struct vm_area_struct *vma;
538  	unsigned long count = 0;
539  	unsigned long end;
540  	VMA_ITERATOR(vmi, mm, start);
541  
542  	/* Don't overflow past ULONG_MAX */
543  	if (unlikely(ULONG_MAX - len < start))
544  		end = ULONG_MAX;
545  	else
546  		end = start + len;
547  
548  	for_each_vma_range(vmi, vma, end) {
549  		if (vma->vm_flags & VM_LOCKED) {
550  			if (start > vma->vm_start)
551  				count -= (start - vma->vm_start);
552  			if (end < vma->vm_end) {
553  				count += end - vma->vm_start;
554  				break;
555  			}
556  			count += vma->vm_end - vma->vm_start;
557  		}
558  	}
559  
560  	return count >> PAGE_SHIFT;
561  }
562  
563  /*
564   * convert get_user_pages() return value to posix mlock() error
565   */
566  static int __mlock_posix_error_return(long retval)
567  {
568  	if (retval == -EFAULT)
569  		retval = -ENOMEM;
570  	else if (retval == -ENOMEM)
571  		retval = -EAGAIN;
572  	return retval;
573  }
574  
575  static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
576  {
577  	unsigned long locked;
578  	unsigned long lock_limit;
579  	int error = -ENOMEM;
580  
581  	start = untagged_addr(start);
582  
583  	if (!can_do_mlock())
584  		return -EPERM;
585  
586  	len = PAGE_ALIGN(len + (offset_in_page(start)));
587  	start &= PAGE_MASK;
588  
589  	lock_limit = rlimit(RLIMIT_MEMLOCK);
590  	lock_limit >>= PAGE_SHIFT;
591  	locked = len >> PAGE_SHIFT;
592  
593  	if (mmap_write_lock_killable(current->mm))
594  		return -EINTR;
595  
596  	locked += current->mm->locked_vm;
597  	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
598  		/*
599  		 * It is possible that the regions requested intersect with
600  		 * previously mlocked areas, that part area in "mm->locked_vm"
601  		 * should not be counted to new mlock increment count. So check
602  		 * and adjust locked count if necessary.
603  		 */
604  		locked -= count_mm_mlocked_page_nr(current->mm,
605  				start, len);
606  	}
607  
608  	/* check against resource limits */
609  	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
610  		error = apply_vma_lock_flags(start, len, flags);
611  
612  	mmap_write_unlock(current->mm);
613  	if (error)
614  		return error;
615  
616  	error = __mm_populate(start, len, 0);
617  	if (error)
618  		return __mlock_posix_error_return(error);
619  	return 0;
620  }
621  
622  SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
623  {
624  	return do_mlock(start, len, VM_LOCKED);
625  }
626  
627  SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
628  {
629  	vm_flags_t vm_flags = VM_LOCKED;
630  
631  	if (flags & ~MLOCK_ONFAULT)
632  		return -EINVAL;
633  
634  	if (flags & MLOCK_ONFAULT)
635  		vm_flags |= VM_LOCKONFAULT;
636  
637  	return do_mlock(start, len, vm_flags);
638  }
639  
640  SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
641  {
642  	int ret;
643  
644  	start = untagged_addr(start);
645  
646  	len = PAGE_ALIGN(len + (offset_in_page(start)));
647  	start &= PAGE_MASK;
648  
649  	if (mmap_write_lock_killable(current->mm))
650  		return -EINTR;
651  	ret = apply_vma_lock_flags(start, len, 0);
652  	mmap_write_unlock(current->mm);
653  
654  	return ret;
655  }
656  
657  /*
658   * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
659   * and translate into the appropriate modifications to mm->def_flags and/or the
660   * flags for all current VMAs.
661   *
662   * There are a couple of subtleties with this.  If mlockall() is called multiple
663   * times with different flags, the values do not necessarily stack.  If mlockall
664   * is called once including the MCL_FUTURE flag and then a second time without
665   * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
666   */
667  static int apply_mlockall_flags(int flags)
668  {
669  	VMA_ITERATOR(vmi, current->mm, 0);
670  	struct vm_area_struct *vma, *prev = NULL;
671  	vm_flags_t to_add = 0;
672  
673  	current->mm->def_flags &= ~VM_LOCKED_MASK;
674  	if (flags & MCL_FUTURE) {
675  		current->mm->def_flags |= VM_LOCKED;
676  
677  		if (flags & MCL_ONFAULT)
678  			current->mm->def_flags |= VM_LOCKONFAULT;
679  
680  		if (!(flags & MCL_CURRENT))
681  			goto out;
682  	}
683  
684  	if (flags & MCL_CURRENT) {
685  		to_add |= VM_LOCKED;
686  		if (flags & MCL_ONFAULT)
687  			to_add |= VM_LOCKONFAULT;
688  	}
689  
690  	for_each_vma(vmi, vma) {
691  		vm_flags_t newflags;
692  
693  		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
694  		newflags |= to_add;
695  
696  		/* Ignore errors */
697  		mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
698  			    newflags);
699  		cond_resched();
700  	}
701  out:
702  	return 0;
703  }
704  
705  SYSCALL_DEFINE1(mlockall, int, flags)
706  {
707  	unsigned long lock_limit;
708  	int ret;
709  
710  	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
711  	    flags == MCL_ONFAULT)
712  		return -EINVAL;
713  
714  	if (!can_do_mlock())
715  		return -EPERM;
716  
717  	lock_limit = rlimit(RLIMIT_MEMLOCK);
718  	lock_limit >>= PAGE_SHIFT;
719  
720  	if (mmap_write_lock_killable(current->mm))
721  		return -EINTR;
722  
723  	ret = -ENOMEM;
724  	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
725  	    capable(CAP_IPC_LOCK))
726  		ret = apply_mlockall_flags(flags);
727  	mmap_write_unlock(current->mm);
728  	if (!ret && (flags & MCL_CURRENT))
729  		mm_populate(0, TASK_SIZE);
730  
731  	return ret;
732  }
733  
734  SYSCALL_DEFINE0(munlockall)
735  {
736  	int ret;
737  
738  	if (mmap_write_lock_killable(current->mm))
739  		return -EINTR;
740  	ret = apply_mlockall_flags(0);
741  	mmap_write_unlock(current->mm);
742  	return ret;
743  }
744  
745  /*
746   * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
747   * shm segments) get accounted against the user_struct instead.
748   */
749  static DEFINE_SPINLOCK(shmlock_user_lock);
750  
751  int user_shm_lock(size_t size, struct ucounts *ucounts)
752  {
753  	unsigned long lock_limit, locked;
754  	long memlock;
755  	int allowed = 0;
756  
757  	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
758  	lock_limit = rlimit(RLIMIT_MEMLOCK);
759  	if (lock_limit != RLIM_INFINITY)
760  		lock_limit >>= PAGE_SHIFT;
761  	spin_lock(&shmlock_user_lock);
762  	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
763  
764  	if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
765  		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
766  		goto out;
767  	}
768  	if (!get_ucounts(ucounts)) {
769  		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
770  		allowed = 0;
771  		goto out;
772  	}
773  	allowed = 1;
774  out:
775  	spin_unlock(&shmlock_user_lock);
776  	return allowed;
777  }
778  
779  void user_shm_unlock(size_t size, struct ucounts *ucounts)
780  {
781  	spin_lock(&shmlock_user_lock);
782  	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
783  	spin_unlock(&shmlock_user_lock);
784  	put_ucounts(ucounts);
785  }
786