xref: /openbmc/linux/mm/mlock.c (revision 11976fe2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/mm/mlock.c
4  *
5  *  (C) Copyright 1995 Linus Torvalds
6  *  (C) Copyright 2002 Christoph Hellwig
7  */
8 
9 #include <linux/capability.h>
10 #include <linux/mman.h>
11 #include <linux/mm.h>
12 #include <linux/sched/user.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/pagewalk.h>
18 #include <linux/mempolicy.h>
19 #include <linux/syscalls.h>
20 #include <linux/sched.h>
21 #include <linux/export.h>
22 #include <linux/rmap.h>
23 #include <linux/mmzone.h>
24 #include <linux/hugetlb.h>
25 #include <linux/memcontrol.h>
26 #include <linux/mm_inline.h>
27 #include <linux/secretmem.h>
28 
29 #include "internal.h"
30 
31 struct mlock_fbatch {
32 	local_lock_t lock;
33 	struct folio_batch fbatch;
34 };
35 
36 static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
37 	.lock = INIT_LOCAL_LOCK(lock),
38 };
39 
40 bool can_do_mlock(void)
41 {
42 	if (rlimit(RLIMIT_MEMLOCK) != 0)
43 		return true;
44 	if (capable(CAP_IPC_LOCK))
45 		return true;
46 	return false;
47 }
48 EXPORT_SYMBOL(can_do_mlock);
49 
50 /*
51  * Mlocked folios are marked with the PG_mlocked flag for efficient testing
52  * in vmscan and, possibly, the fault path; and to support semi-accurate
53  * statistics.
54  *
55  * An mlocked folio [folio_test_mlocked(folio)] is unevictable.  As such, it
56  * will be ostensibly placed on the LRU "unevictable" list (actually no such
57  * list exists), rather than the [in]active lists. PG_unevictable is set to
58  * indicate the unevictable state.
59  */
60 
61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
62 {
63 	/* There is nothing more we can do while it's off LRU */
64 	if (!folio_test_clear_lru(folio))
65 		return lruvec;
66 
67 	lruvec = folio_lruvec_relock_irq(folio, lruvec);
68 
69 	if (unlikely(folio_evictable(folio))) {
70 		/*
71 		 * This is a little surprising, but quite possible: PG_mlocked
72 		 * must have got cleared already by another CPU.  Could this
73 		 * folio be unevictable?  I'm not sure, but move it now if so.
74 		 */
75 		if (folio_test_unevictable(folio)) {
76 			lruvec_del_folio(lruvec, folio);
77 			folio_clear_unevictable(folio);
78 			lruvec_add_folio(lruvec, folio);
79 
80 			__count_vm_events(UNEVICTABLE_PGRESCUED,
81 					  folio_nr_pages(folio));
82 		}
83 		goto out;
84 	}
85 
86 	if (folio_test_unevictable(folio)) {
87 		if (folio_test_mlocked(folio))
88 			folio->mlock_count++;
89 		goto out;
90 	}
91 
92 	lruvec_del_folio(lruvec, folio);
93 	folio_clear_active(folio);
94 	folio_set_unevictable(folio);
95 	folio->mlock_count = !!folio_test_mlocked(folio);
96 	lruvec_add_folio(lruvec, folio);
97 	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
98 out:
99 	folio_set_lru(folio);
100 	return lruvec;
101 }
102 
103 static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
104 {
105 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
106 
107 	lruvec = folio_lruvec_relock_irq(folio, lruvec);
108 
109 	/* As above, this is a little surprising, but possible */
110 	if (unlikely(folio_evictable(folio)))
111 		goto out;
112 
113 	folio_set_unevictable(folio);
114 	folio->mlock_count = !!folio_test_mlocked(folio);
115 	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
116 out:
117 	lruvec_add_folio(lruvec, folio);
118 	folio_set_lru(folio);
119 	return lruvec;
120 }
121 
122 static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
123 {
124 	int nr_pages = folio_nr_pages(folio);
125 	bool isolated = false;
126 
127 	if (!folio_test_clear_lru(folio))
128 		goto munlock;
129 
130 	isolated = true;
131 	lruvec = folio_lruvec_relock_irq(folio, lruvec);
132 
133 	if (folio_test_unevictable(folio)) {
134 		/* Then mlock_count is maintained, but might undercount */
135 		if (folio->mlock_count)
136 			folio->mlock_count--;
137 		if (folio->mlock_count)
138 			goto out;
139 	}
140 	/* else assume that was the last mlock: reclaim will fix it if not */
141 
142 munlock:
143 	if (folio_test_clear_mlocked(folio)) {
144 		__zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
145 		if (isolated || !folio_test_unevictable(folio))
146 			__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147 		else
148 			__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149 	}
150 
151 	/* folio_evictable() has to be checked *after* clearing Mlocked */
152 	if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
153 		lruvec_del_folio(lruvec, folio);
154 		folio_clear_unevictable(folio);
155 		lruvec_add_folio(lruvec, folio);
156 		__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157 	}
158 out:
159 	if (isolated)
160 		folio_set_lru(folio);
161 	return lruvec;
162 }
163 
164 /*
165  * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
166  */
167 #define LRU_FOLIO 0x1
168 #define NEW_FOLIO 0x2
169 static inline struct folio *mlock_lru(struct folio *folio)
170 {
171 	return (struct folio *)((unsigned long)folio + LRU_FOLIO);
172 }
173 
174 static inline struct folio *mlock_new(struct folio *folio)
175 {
176 	return (struct folio *)((unsigned long)folio + NEW_FOLIO);
177 }
178 
179 /*
180  * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181  * make use of such folio pointer flags in future, but for now just keep it for
182  * mlock.  We could use three separate folio batches instead, but one feels
183  * better (munlocking a full folio batch does not need to drain mlocking folio
184  * batches first).
185  */
186 static void mlock_folio_batch(struct folio_batch *fbatch)
187 {
188 	struct lruvec *lruvec = NULL;
189 	unsigned long mlock;
190 	struct folio *folio;
191 	int i;
192 
193 	for (i = 0; i < folio_batch_count(fbatch); i++) {
194 		folio = fbatch->folios[i];
195 		mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196 		folio = (struct folio *)((unsigned long)folio - mlock);
197 		fbatch->folios[i] = folio;
198 
199 		if (mlock & LRU_FOLIO)
200 			lruvec = __mlock_folio(folio, lruvec);
201 		else if (mlock & NEW_FOLIO)
202 			lruvec = __mlock_new_folio(folio, lruvec);
203 		else
204 			lruvec = __munlock_folio(folio, lruvec);
205 	}
206 
207 	if (lruvec)
208 		unlock_page_lruvec_irq(lruvec);
209 	folios_put(fbatch->folios, folio_batch_count(fbatch));
210 	folio_batch_reinit(fbatch);
211 }
212 
213 void mlock_drain_local(void)
214 {
215 	struct folio_batch *fbatch;
216 
217 	local_lock(&mlock_fbatch.lock);
218 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
219 	if (folio_batch_count(fbatch))
220 		mlock_folio_batch(fbatch);
221 	local_unlock(&mlock_fbatch.lock);
222 }
223 
224 void mlock_drain_remote(int cpu)
225 {
226 	struct folio_batch *fbatch;
227 
228 	WARN_ON_ONCE(cpu_online(cpu));
229 	fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
230 	if (folio_batch_count(fbatch))
231 		mlock_folio_batch(fbatch);
232 }
233 
234 bool need_mlock_drain(int cpu)
235 {
236 	return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
237 }
238 
239 /**
240  * mlock_folio - mlock a folio already on (or temporarily off) LRU
241  * @folio: folio to be mlocked.
242  */
243 void mlock_folio(struct folio *folio)
244 {
245 	struct folio_batch *fbatch;
246 
247 	local_lock(&mlock_fbatch.lock);
248 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
249 
250 	if (!folio_test_set_mlocked(folio)) {
251 		int nr_pages = folio_nr_pages(folio);
252 
253 		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
254 		__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
255 	}
256 
257 	folio_get(folio);
258 	if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
259 	    folio_test_large(folio) || lru_cache_disabled())
260 		mlock_folio_batch(fbatch);
261 	local_unlock(&mlock_fbatch.lock);
262 }
263 
264 /**
265  * mlock_new_folio - mlock a newly allocated folio not yet on LRU
266  * @folio: folio to be mlocked, either normal or a THP head.
267  */
268 void mlock_new_folio(struct folio *folio)
269 {
270 	struct folio_batch *fbatch;
271 	int nr_pages = folio_nr_pages(folio);
272 
273 	local_lock(&mlock_fbatch.lock);
274 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
275 	folio_set_mlocked(folio);
276 
277 	zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
278 	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
279 
280 	folio_get(folio);
281 	if (!folio_batch_add(fbatch, mlock_new(folio)) ||
282 	    folio_test_large(folio) || lru_cache_disabled())
283 		mlock_folio_batch(fbatch);
284 	local_unlock(&mlock_fbatch.lock);
285 }
286 
287 /**
288  * munlock_folio - munlock a folio
289  * @folio: folio to be munlocked, either normal or a THP head.
290  */
291 void munlock_folio(struct folio *folio)
292 {
293 	struct folio_batch *fbatch;
294 
295 	local_lock(&mlock_fbatch.lock);
296 	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
297 	/*
298 	 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
299 	 * which will check whether the folio is multiply mlocked.
300 	 */
301 	folio_get(folio);
302 	if (!folio_batch_add(fbatch, folio) ||
303 	    folio_test_large(folio) || lru_cache_disabled())
304 		mlock_folio_batch(fbatch);
305 	local_unlock(&mlock_fbatch.lock);
306 }
307 
308 static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
309 			   unsigned long end, struct mm_walk *walk)
310 
311 {
312 	struct vm_area_struct *vma = walk->vma;
313 	spinlock_t *ptl;
314 	pte_t *start_pte, *pte;
315 	struct folio *folio;
316 
317 	ptl = pmd_trans_huge_lock(pmd, vma);
318 	if (ptl) {
319 		if (!pmd_present(*pmd))
320 			goto out;
321 		if (is_huge_zero_pmd(*pmd))
322 			goto out;
323 		folio = page_folio(pmd_page(*pmd));
324 		if (vma->vm_flags & VM_LOCKED)
325 			mlock_folio(folio);
326 		else
327 			munlock_folio(folio);
328 		goto out;
329 	}
330 
331 	start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
332 	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
333 		if (!pte_present(*pte))
334 			continue;
335 		folio = vm_normal_folio(vma, addr, *pte);
336 		if (!folio || folio_is_zone_device(folio))
337 			continue;
338 		if (folio_test_large(folio))
339 			continue;
340 		if (vma->vm_flags & VM_LOCKED)
341 			mlock_folio(folio);
342 		else
343 			munlock_folio(folio);
344 	}
345 	pte_unmap(start_pte);
346 out:
347 	spin_unlock(ptl);
348 	cond_resched();
349 	return 0;
350 }
351 
352 /*
353  * mlock_vma_pages_range() - mlock any pages already in the range,
354  *                           or munlock all pages in the range.
355  * @vma - vma containing range to be mlock()ed or munlock()ed
356  * @start - start address in @vma of the range
357  * @end - end of range in @vma
358  * @newflags - the new set of flags for @vma.
359  *
360  * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
361  * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
362  */
363 static void mlock_vma_pages_range(struct vm_area_struct *vma,
364 	unsigned long start, unsigned long end, vm_flags_t newflags)
365 {
366 	static const struct mm_walk_ops mlock_walk_ops = {
367 		.pmd_entry = mlock_pte_range,
368 	};
369 
370 	/*
371 	 * There is a slight chance that concurrent page migration,
372 	 * or page reclaim finding a page of this now-VM_LOCKED vma,
373 	 * will call mlock_vma_folio() and raise page's mlock_count:
374 	 * double counting, leaving the page unevictable indefinitely.
375 	 * Communicate this danger to mlock_vma_folio() with VM_IO,
376 	 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
377 	 * mmap_lock is held in write mode here, so this weird
378 	 * combination should not be visible to other mmap_lock users;
379 	 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
380 	 */
381 	if (newflags & VM_LOCKED)
382 		newflags |= VM_IO;
383 	vm_flags_reset_once(vma, newflags);
384 
385 	lru_add_drain();
386 	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
387 	lru_add_drain();
388 
389 	if (newflags & VM_IO) {
390 		newflags &= ~VM_IO;
391 		vm_flags_reset_once(vma, newflags);
392 	}
393 }
394 
395 /*
396  * mlock_fixup  - handle mlock[all]/munlock[all] requests.
397  *
398  * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
399  * munlock is a no-op.  However, for some special vmas, we go ahead and
400  * populate the ptes.
401  *
402  * For vmas that pass the filters, merge/split as appropriate.
403  */
404 static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
405 	       struct vm_area_struct **prev, unsigned long start,
406 	       unsigned long end, vm_flags_t newflags)
407 {
408 	struct mm_struct *mm = vma->vm_mm;
409 	pgoff_t pgoff;
410 	int nr_pages;
411 	int ret = 0;
412 	vm_flags_t oldflags = vma->vm_flags;
413 
414 	if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
415 	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
416 	    vma_is_dax(vma) || vma_is_secretmem(vma))
417 		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
418 		goto out;
419 
420 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
421 	*prev = vma_merge(vmi, mm, *prev, start, end, newflags,
422 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
423 			vma->vm_userfaultfd_ctx, anon_vma_name(vma));
424 	if (*prev) {
425 		vma = *prev;
426 		goto success;
427 	}
428 
429 	if (start != vma->vm_start) {
430 		ret = split_vma(vmi, vma, start, 1);
431 		if (ret)
432 			goto out;
433 	}
434 
435 	if (end != vma->vm_end) {
436 		ret = split_vma(vmi, vma, end, 0);
437 		if (ret)
438 			goto out;
439 	}
440 
441 success:
442 	/*
443 	 * Keep track of amount of locked VM.
444 	 */
445 	nr_pages = (end - start) >> PAGE_SHIFT;
446 	if (!(newflags & VM_LOCKED))
447 		nr_pages = -nr_pages;
448 	else if (oldflags & VM_LOCKED)
449 		nr_pages = 0;
450 	mm->locked_vm += nr_pages;
451 
452 	/*
453 	 * vm_flags is protected by the mmap_lock held in write mode.
454 	 * It's okay if try_to_unmap_one unmaps a page just after we
455 	 * set VM_LOCKED, populate_vma_page_range will bring it back.
456 	 */
457 
458 	if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
459 		/* No work to do, and mlocking twice would be wrong */
460 		vm_flags_reset(vma, newflags);
461 	} else {
462 		mlock_vma_pages_range(vma, start, end, newflags);
463 	}
464 out:
465 	*prev = vma;
466 	return ret;
467 }
468 
469 static int apply_vma_lock_flags(unsigned long start, size_t len,
470 				vm_flags_t flags)
471 {
472 	unsigned long nstart, end, tmp;
473 	struct vm_area_struct *vma, *prev;
474 	int error;
475 	VMA_ITERATOR(vmi, current->mm, start);
476 
477 	VM_BUG_ON(offset_in_page(start));
478 	VM_BUG_ON(len != PAGE_ALIGN(len));
479 	end = start + len;
480 	if (end < start)
481 		return -EINVAL;
482 	if (end == start)
483 		return 0;
484 	vma = vma_iter_load(&vmi);
485 	if (!vma)
486 		return -ENOMEM;
487 
488 	prev = vma_prev(&vmi);
489 	if (start > vma->vm_start)
490 		prev = vma;
491 
492 	nstart = start;
493 	tmp = vma->vm_start;
494 	for_each_vma_range(vmi, vma, end) {
495 		vm_flags_t newflags;
496 
497 		if (vma->vm_start != tmp)
498 			return -ENOMEM;
499 
500 		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
501 		newflags |= flags;
502 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
503 		tmp = vma->vm_end;
504 		if (tmp > end)
505 			tmp = end;
506 		error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
507 		if (error)
508 			break;
509 		nstart = tmp;
510 	}
511 
512 	if (vma_iter_end(&vmi) < end)
513 		return -ENOMEM;
514 
515 	return error;
516 }
517 
518 /*
519  * Go through vma areas and sum size of mlocked
520  * vma pages, as return value.
521  * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
522  * is also counted.
523  * Return value: previously mlocked page counts
524  */
525 static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
526 		unsigned long start, size_t len)
527 {
528 	struct vm_area_struct *vma;
529 	unsigned long count = 0;
530 	unsigned long end;
531 	VMA_ITERATOR(vmi, mm, start);
532 
533 	/* Don't overflow past ULONG_MAX */
534 	if (unlikely(ULONG_MAX - len < start))
535 		end = ULONG_MAX;
536 	else
537 		end = start + len;
538 
539 	for_each_vma_range(vmi, vma, end) {
540 		if (vma->vm_flags & VM_LOCKED) {
541 			if (start > vma->vm_start)
542 				count -= (start - vma->vm_start);
543 			if (end < vma->vm_end) {
544 				count += end - vma->vm_start;
545 				break;
546 			}
547 			count += vma->vm_end - vma->vm_start;
548 		}
549 	}
550 
551 	return count >> PAGE_SHIFT;
552 }
553 
554 /*
555  * convert get_user_pages() return value to posix mlock() error
556  */
557 static int __mlock_posix_error_return(long retval)
558 {
559 	if (retval == -EFAULT)
560 		retval = -ENOMEM;
561 	else if (retval == -ENOMEM)
562 		retval = -EAGAIN;
563 	return retval;
564 }
565 
566 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
567 {
568 	unsigned long locked;
569 	unsigned long lock_limit;
570 	int error = -ENOMEM;
571 
572 	start = untagged_addr(start);
573 
574 	if (!can_do_mlock())
575 		return -EPERM;
576 
577 	len = PAGE_ALIGN(len + (offset_in_page(start)));
578 	start &= PAGE_MASK;
579 
580 	lock_limit = rlimit(RLIMIT_MEMLOCK);
581 	lock_limit >>= PAGE_SHIFT;
582 	locked = len >> PAGE_SHIFT;
583 
584 	if (mmap_write_lock_killable(current->mm))
585 		return -EINTR;
586 
587 	locked += current->mm->locked_vm;
588 	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
589 		/*
590 		 * It is possible that the regions requested intersect with
591 		 * previously mlocked areas, that part area in "mm->locked_vm"
592 		 * should not be counted to new mlock increment count. So check
593 		 * and adjust locked count if necessary.
594 		 */
595 		locked -= count_mm_mlocked_page_nr(current->mm,
596 				start, len);
597 	}
598 
599 	/* check against resource limits */
600 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
601 		error = apply_vma_lock_flags(start, len, flags);
602 
603 	mmap_write_unlock(current->mm);
604 	if (error)
605 		return error;
606 
607 	error = __mm_populate(start, len, 0);
608 	if (error)
609 		return __mlock_posix_error_return(error);
610 	return 0;
611 }
612 
613 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
614 {
615 	return do_mlock(start, len, VM_LOCKED);
616 }
617 
618 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
619 {
620 	vm_flags_t vm_flags = VM_LOCKED;
621 
622 	if (flags & ~MLOCK_ONFAULT)
623 		return -EINVAL;
624 
625 	if (flags & MLOCK_ONFAULT)
626 		vm_flags |= VM_LOCKONFAULT;
627 
628 	return do_mlock(start, len, vm_flags);
629 }
630 
631 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
632 {
633 	int ret;
634 
635 	start = untagged_addr(start);
636 
637 	len = PAGE_ALIGN(len + (offset_in_page(start)));
638 	start &= PAGE_MASK;
639 
640 	if (mmap_write_lock_killable(current->mm))
641 		return -EINTR;
642 	ret = apply_vma_lock_flags(start, len, 0);
643 	mmap_write_unlock(current->mm);
644 
645 	return ret;
646 }
647 
648 /*
649  * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
650  * and translate into the appropriate modifications to mm->def_flags and/or the
651  * flags for all current VMAs.
652  *
653  * There are a couple of subtleties with this.  If mlockall() is called multiple
654  * times with different flags, the values do not necessarily stack.  If mlockall
655  * is called once including the MCL_FUTURE flag and then a second time without
656  * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
657  */
658 static int apply_mlockall_flags(int flags)
659 {
660 	VMA_ITERATOR(vmi, current->mm, 0);
661 	struct vm_area_struct *vma, *prev = NULL;
662 	vm_flags_t to_add = 0;
663 
664 	current->mm->def_flags &= ~VM_LOCKED_MASK;
665 	if (flags & MCL_FUTURE) {
666 		current->mm->def_flags |= VM_LOCKED;
667 
668 		if (flags & MCL_ONFAULT)
669 			current->mm->def_flags |= VM_LOCKONFAULT;
670 
671 		if (!(flags & MCL_CURRENT))
672 			goto out;
673 	}
674 
675 	if (flags & MCL_CURRENT) {
676 		to_add |= VM_LOCKED;
677 		if (flags & MCL_ONFAULT)
678 			to_add |= VM_LOCKONFAULT;
679 	}
680 
681 	for_each_vma(vmi, vma) {
682 		vm_flags_t newflags;
683 
684 		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
685 		newflags |= to_add;
686 
687 		/* Ignore errors */
688 		mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
689 			    newflags);
690 		cond_resched();
691 	}
692 out:
693 	return 0;
694 }
695 
696 SYSCALL_DEFINE1(mlockall, int, flags)
697 {
698 	unsigned long lock_limit;
699 	int ret;
700 
701 	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
702 	    flags == MCL_ONFAULT)
703 		return -EINVAL;
704 
705 	if (!can_do_mlock())
706 		return -EPERM;
707 
708 	lock_limit = rlimit(RLIMIT_MEMLOCK);
709 	lock_limit >>= PAGE_SHIFT;
710 
711 	if (mmap_write_lock_killable(current->mm))
712 		return -EINTR;
713 
714 	ret = -ENOMEM;
715 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
716 	    capable(CAP_IPC_LOCK))
717 		ret = apply_mlockall_flags(flags);
718 	mmap_write_unlock(current->mm);
719 	if (!ret && (flags & MCL_CURRENT))
720 		mm_populate(0, TASK_SIZE);
721 
722 	return ret;
723 }
724 
725 SYSCALL_DEFINE0(munlockall)
726 {
727 	int ret;
728 
729 	if (mmap_write_lock_killable(current->mm))
730 		return -EINTR;
731 	ret = apply_mlockall_flags(0);
732 	mmap_write_unlock(current->mm);
733 	return ret;
734 }
735 
736 /*
737  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
738  * shm segments) get accounted against the user_struct instead.
739  */
740 static DEFINE_SPINLOCK(shmlock_user_lock);
741 
742 int user_shm_lock(size_t size, struct ucounts *ucounts)
743 {
744 	unsigned long lock_limit, locked;
745 	long memlock;
746 	int allowed = 0;
747 
748 	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
749 	lock_limit = rlimit(RLIMIT_MEMLOCK);
750 	if (lock_limit != RLIM_INFINITY)
751 		lock_limit >>= PAGE_SHIFT;
752 	spin_lock(&shmlock_user_lock);
753 	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
754 
755 	if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
756 		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
757 		goto out;
758 	}
759 	if (!get_ucounts(ucounts)) {
760 		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
761 		allowed = 0;
762 		goto out;
763 	}
764 	allowed = 1;
765 out:
766 	spin_unlock(&shmlock_user_lock);
767 	return allowed;
768 }
769 
770 void user_shm_unlock(size_t size, struct ucounts *ucounts)
771 {
772 	spin_lock(&shmlock_user_lock);
773 	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
774 	spin_unlock(&shmlock_user_lock);
775 	put_ucounts(ucounts);
776 }
777