1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/mlock.c 4 * 5 * (C) Copyright 1995 Linus Torvalds 6 * (C) Copyright 2002 Christoph Hellwig 7 */ 8 9 #include <linux/capability.h> 10 #include <linux/mman.h> 11 #include <linux/mm.h> 12 #include <linux/sched/user.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/pagemap.h> 16 #include <linux/pagevec.h> 17 #include <linux/mempolicy.h> 18 #include <linux/syscalls.h> 19 #include <linux/sched.h> 20 #include <linux/export.h> 21 #include <linux/rmap.h> 22 #include <linux/mmzone.h> 23 #include <linux/hugetlb.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm_inline.h> 26 27 #include "internal.h" 28 29 bool can_do_mlock(void) 30 { 31 if (rlimit(RLIMIT_MEMLOCK) != 0) 32 return true; 33 if (capable(CAP_IPC_LOCK)) 34 return true; 35 return false; 36 } 37 EXPORT_SYMBOL(can_do_mlock); 38 39 /* 40 * Mlocked pages are marked with PageMlocked() flag for efficient testing 41 * in vmscan and, possibly, the fault path; and to support semi-accurate 42 * statistics. 43 * 44 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will 45 * be placed on the LRU "unevictable" list, rather than the [in]active lists. 46 * The unevictable list is an LRU sibling list to the [in]active lists. 47 * PageUnevictable is set to indicate the unevictable state. 48 * 49 * When lazy mlocking via vmscan, it is important to ensure that the 50 * vma's VM_LOCKED status is not concurrently being modified, otherwise we 51 * may have mlocked a page that is being munlocked. So lazy mlock must take 52 * the mmap_lock for read, and verify that the vma really is locked 53 * (see mm/rmap.c). 54 */ 55 56 /* 57 * LRU accounting for clear_page_mlock() 58 */ 59 void clear_page_mlock(struct page *page) 60 { 61 int nr_pages; 62 63 if (!TestClearPageMlocked(page)) 64 return; 65 66 nr_pages = thp_nr_pages(page); 67 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); 68 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 69 /* 70 * The previous TestClearPageMlocked() corresponds to the smp_mb() 71 * in __pagevec_lru_add_fn(). 72 * 73 * See __pagevec_lru_add_fn for more explanation. 74 */ 75 if (!isolate_lru_page(page)) { 76 putback_lru_page(page); 77 } else { 78 /* 79 * We lost the race. the page already moved to evictable list. 80 */ 81 if (PageUnevictable(page)) 82 count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); 83 } 84 } 85 86 /* 87 * Mark page as mlocked if not already. 88 * If page on LRU, isolate and putback to move to unevictable list. 89 */ 90 void mlock_vma_page(struct page *page) 91 { 92 /* Serialize with page migration */ 93 BUG_ON(!PageLocked(page)); 94 95 VM_BUG_ON_PAGE(PageTail(page), page); 96 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 97 98 if (!TestSetPageMlocked(page)) { 99 int nr_pages = thp_nr_pages(page); 100 101 mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); 102 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); 103 if (!isolate_lru_page(page)) 104 putback_lru_page(page); 105 } 106 } 107 108 /* 109 * Isolate a page from LRU with optional get_page() pin. 110 * Assumes lru_lock already held and page already pinned. 111 */ 112 static bool __munlock_isolate_lru_page(struct page *page, bool getpage) 113 { 114 if (PageLRU(page)) { 115 struct lruvec *lruvec; 116 117 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); 118 if (getpage) 119 get_page(page); 120 ClearPageLRU(page); 121 del_page_from_lru_list(page, lruvec, page_lru(page)); 122 return true; 123 } 124 125 return false; 126 } 127 128 /* 129 * Finish munlock after successful page isolation 130 * 131 * Page must be locked. This is a wrapper for try_to_munlock() 132 * and putback_lru_page() with munlock accounting. 133 */ 134 static void __munlock_isolated_page(struct page *page) 135 { 136 /* 137 * Optimization: if the page was mapped just once, that's our mapping 138 * and we don't need to check all the other vmas. 139 */ 140 if (page_mapcount(page) > 1) 141 try_to_munlock(page); 142 143 /* Did try_to_unlock() succeed or punt? */ 144 if (!PageMlocked(page)) 145 count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page)); 146 147 putback_lru_page(page); 148 } 149 150 /* 151 * Accounting for page isolation fail during munlock 152 * 153 * Performs accounting when page isolation fails in munlock. There is nothing 154 * else to do because it means some other task has already removed the page 155 * from the LRU. putback_lru_page() will take care of removing the page from 156 * the unevictable list, if necessary. vmscan [page_referenced()] will move 157 * the page back to the unevictable list if some other vma has it mlocked. 158 */ 159 static void __munlock_isolation_failed(struct page *page) 160 { 161 int nr_pages = thp_nr_pages(page); 162 163 if (PageUnevictable(page)) 164 __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); 165 else 166 __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); 167 } 168 169 /** 170 * munlock_vma_page - munlock a vma page 171 * @page: page to be unlocked, either a normal page or THP page head 172 * 173 * returns the size of the page as a page mask (0 for normal page, 174 * HPAGE_PMD_NR - 1 for THP head page) 175 * 176 * called from munlock()/munmap() path with page supposedly on the LRU. 177 * When we munlock a page, because the vma where we found the page is being 178 * munlock()ed or munmap()ed, we want to check whether other vmas hold the 179 * page locked so that we can leave it on the unevictable lru list and not 180 * bother vmscan with it. However, to walk the page's rmap list in 181 * try_to_munlock() we must isolate the page from the LRU. If some other 182 * task has removed the page from the LRU, we won't be able to do that. 183 * So we clear the PageMlocked as we might not get another chance. If we 184 * can't isolate the page, we leave it for putback_lru_page() and vmscan 185 * [page_referenced()/try_to_unmap()] to deal with. 186 */ 187 unsigned int munlock_vma_page(struct page *page) 188 { 189 int nr_pages; 190 pg_data_t *pgdat = page_pgdat(page); 191 192 /* For try_to_munlock() and to serialize with page migration */ 193 BUG_ON(!PageLocked(page)); 194 195 VM_BUG_ON_PAGE(PageTail(page), page); 196 197 /* 198 * Serialize with any parallel __split_huge_page_refcount() which 199 * might otherwise copy PageMlocked to part of the tail pages before 200 * we clear it in the head page. It also stabilizes thp_nr_pages(). 201 */ 202 spin_lock_irq(&pgdat->lru_lock); 203 204 if (!TestClearPageMlocked(page)) { 205 /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ 206 nr_pages = 1; 207 goto unlock_out; 208 } 209 210 nr_pages = thp_nr_pages(page); 211 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); 212 213 if (__munlock_isolate_lru_page(page, true)) { 214 spin_unlock_irq(&pgdat->lru_lock); 215 __munlock_isolated_page(page); 216 goto out; 217 } 218 __munlock_isolation_failed(page); 219 220 unlock_out: 221 spin_unlock_irq(&pgdat->lru_lock); 222 223 out: 224 return nr_pages - 1; 225 } 226 227 /* 228 * convert get_user_pages() return value to posix mlock() error 229 */ 230 static int __mlock_posix_error_return(long retval) 231 { 232 if (retval == -EFAULT) 233 retval = -ENOMEM; 234 else if (retval == -ENOMEM) 235 retval = -EAGAIN; 236 return retval; 237 } 238 239 /* 240 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec() 241 * 242 * The fast path is available only for evictable pages with single mapping. 243 * Then we can bypass the per-cpu pvec and get better performance. 244 * when mapcount > 1 we need try_to_munlock() which can fail. 245 * when !page_evictable(), we need the full redo logic of putback_lru_page to 246 * avoid leaving evictable page in unevictable list. 247 * 248 * In case of success, @page is added to @pvec and @pgrescued is incremented 249 * in case that the page was previously unevictable. @page is also unlocked. 250 */ 251 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, 252 int *pgrescued) 253 { 254 VM_BUG_ON_PAGE(PageLRU(page), page); 255 VM_BUG_ON_PAGE(!PageLocked(page), page); 256 257 if (page_mapcount(page) <= 1 && page_evictable(page)) { 258 pagevec_add(pvec, page); 259 if (TestClearPageUnevictable(page)) 260 (*pgrescued)++; 261 unlock_page(page); 262 return true; 263 } 264 265 return false; 266 } 267 268 /* 269 * Putback multiple evictable pages to the LRU 270 * 271 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of 272 * the pages might have meanwhile become unevictable but that is OK. 273 */ 274 static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) 275 { 276 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); 277 /* 278 *__pagevec_lru_add() calls release_pages() so we don't call 279 * put_page() explicitly 280 */ 281 __pagevec_lru_add(pvec); 282 count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); 283 } 284 285 /* 286 * Munlock a batch of pages from the same zone 287 * 288 * The work is split to two main phases. First phase clears the Mlocked flag 289 * and attempts to isolate the pages, all under a single zone lru lock. 290 * The second phase finishes the munlock only for pages where isolation 291 * succeeded. 292 * 293 * Note that the pagevec may be modified during the process. 294 */ 295 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) 296 { 297 int i; 298 int nr = pagevec_count(pvec); 299 int delta_munlocked = -nr; 300 struct pagevec pvec_putback; 301 int pgrescued = 0; 302 303 pagevec_init(&pvec_putback); 304 305 /* Phase 1: page isolation */ 306 spin_lock_irq(&zone->zone_pgdat->lru_lock); 307 for (i = 0; i < nr; i++) { 308 struct page *page = pvec->pages[i]; 309 310 if (TestClearPageMlocked(page)) { 311 /* 312 * We already have pin from follow_page_mask() 313 * so we can spare the get_page() here. 314 */ 315 if (__munlock_isolate_lru_page(page, false)) 316 continue; 317 else 318 __munlock_isolation_failed(page); 319 } else { 320 delta_munlocked++; 321 } 322 323 /* 324 * We won't be munlocking this page in the next phase 325 * but we still need to release the follow_page_mask() 326 * pin. We cannot do it under lru_lock however. If it's 327 * the last pin, __page_cache_release() would deadlock. 328 */ 329 pagevec_add(&pvec_putback, pvec->pages[i]); 330 pvec->pages[i] = NULL; 331 } 332 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); 333 spin_unlock_irq(&zone->zone_pgdat->lru_lock); 334 335 /* Now we can release pins of pages that we are not munlocking */ 336 pagevec_release(&pvec_putback); 337 338 /* Phase 2: page munlock */ 339 for (i = 0; i < nr; i++) { 340 struct page *page = pvec->pages[i]; 341 342 if (page) { 343 lock_page(page); 344 if (!__putback_lru_fast_prepare(page, &pvec_putback, 345 &pgrescued)) { 346 /* 347 * Slow path. We don't want to lose the last 348 * pin before unlock_page() 349 */ 350 get_page(page); /* for putback_lru_page() */ 351 __munlock_isolated_page(page); 352 unlock_page(page); 353 put_page(page); /* from follow_page_mask() */ 354 } 355 } 356 } 357 358 /* 359 * Phase 3: page putback for pages that qualified for the fast path 360 * This will also call put_page() to return pin from follow_page_mask() 361 */ 362 if (pagevec_count(&pvec_putback)) 363 __putback_lru_fast(&pvec_putback, pgrescued); 364 } 365 366 /* 367 * Fill up pagevec for __munlock_pagevec using pte walk 368 * 369 * The function expects that the struct page corresponding to @start address is 370 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone. 371 * 372 * The rest of @pvec is filled by subsequent pages within the same pmd and same 373 * zone, as long as the pte's are present and vm_normal_page() succeeds. These 374 * pages also get pinned. 375 * 376 * Returns the address of the next page that should be scanned. This equals 377 * @start + PAGE_SIZE when no page could be added by the pte walk. 378 */ 379 static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, 380 struct vm_area_struct *vma, struct zone *zone, 381 unsigned long start, unsigned long end) 382 { 383 pte_t *pte; 384 spinlock_t *ptl; 385 386 /* 387 * Initialize pte walk starting at the already pinned page where we 388 * are sure that there is a pte, as it was pinned under the same 389 * mmap_lock write op. 390 */ 391 pte = get_locked_pte(vma->vm_mm, start, &ptl); 392 /* Make sure we do not cross the page table boundary */ 393 end = pgd_addr_end(start, end); 394 end = p4d_addr_end(start, end); 395 end = pud_addr_end(start, end); 396 end = pmd_addr_end(start, end); 397 398 /* The page next to the pinned page is the first we will try to get */ 399 start += PAGE_SIZE; 400 while (start < end) { 401 struct page *page = NULL; 402 pte++; 403 if (pte_present(*pte)) 404 page = vm_normal_page(vma, start, *pte); 405 /* 406 * Break if page could not be obtained or the page's node+zone does not 407 * match 408 */ 409 if (!page || page_zone(page) != zone) 410 break; 411 412 /* 413 * Do not use pagevec for PTE-mapped THP, 414 * munlock_vma_pages_range() will handle them. 415 */ 416 if (PageTransCompound(page)) 417 break; 418 419 get_page(page); 420 /* 421 * Increase the address that will be returned *before* the 422 * eventual break due to pvec becoming full by adding the page 423 */ 424 start += PAGE_SIZE; 425 if (pagevec_add(pvec, page) == 0) 426 break; 427 } 428 pte_unmap_unlock(pte, ptl); 429 return start; 430 } 431 432 /* 433 * munlock_vma_pages_range() - munlock all pages in the vma range.' 434 * @vma - vma containing range to be munlock()ed. 435 * @start - start address in @vma of the range 436 * @end - end of range in @vma. 437 * 438 * For mremap(), munmap() and exit(). 439 * 440 * Called with @vma VM_LOCKED. 441 * 442 * Returns with VM_LOCKED cleared. Callers must be prepared to 443 * deal with this. 444 * 445 * We don't save and restore VM_LOCKED here because pages are 446 * still on lru. In unmap path, pages might be scanned by reclaim 447 * and re-mlocked by try_to_{munlock|unmap} before we unmap and 448 * free them. This will result in freeing mlocked pages. 449 */ 450 void munlock_vma_pages_range(struct vm_area_struct *vma, 451 unsigned long start, unsigned long end) 452 { 453 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; 454 455 while (start < end) { 456 struct page *page; 457 unsigned int page_mask = 0; 458 unsigned long page_increm; 459 struct pagevec pvec; 460 struct zone *zone; 461 462 pagevec_init(&pvec); 463 /* 464 * Although FOLL_DUMP is intended for get_dump_page(), 465 * it just so happens that its special treatment of the 466 * ZERO_PAGE (returning an error instead of doing get_page) 467 * suits munlock very well (and if somehow an abnormal page 468 * has sneaked into the range, we won't oops here: great). 469 */ 470 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); 471 472 if (page && !IS_ERR(page)) { 473 if (PageTransTail(page)) { 474 VM_BUG_ON_PAGE(PageMlocked(page), page); 475 put_page(page); /* follow_page_mask() */ 476 } else if (PageTransHuge(page)) { 477 lock_page(page); 478 /* 479 * Any THP page found by follow_page_mask() may 480 * have gotten split before reaching 481 * munlock_vma_page(), so we need to compute 482 * the page_mask here instead. 483 */ 484 page_mask = munlock_vma_page(page); 485 unlock_page(page); 486 put_page(page); /* follow_page_mask() */ 487 } else { 488 /* 489 * Non-huge pages are handled in batches via 490 * pagevec. The pin from follow_page_mask() 491 * prevents them from collapsing by THP. 492 */ 493 pagevec_add(&pvec, page); 494 zone = page_zone(page); 495 496 /* 497 * Try to fill the rest of pagevec using fast 498 * pte walk. This will also update start to 499 * the next page to process. Then munlock the 500 * pagevec. 501 */ 502 start = __munlock_pagevec_fill(&pvec, vma, 503 zone, start, end); 504 __munlock_pagevec(&pvec, zone); 505 goto next; 506 } 507 } 508 page_increm = 1 + page_mask; 509 start += page_increm * PAGE_SIZE; 510 next: 511 cond_resched(); 512 } 513 } 514 515 /* 516 * mlock_fixup - handle mlock[all]/munlock[all] requests. 517 * 518 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and 519 * munlock is a no-op. However, for some special vmas, we go ahead and 520 * populate the ptes. 521 * 522 * For vmas that pass the filters, merge/split as appropriate. 523 */ 524 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, 525 unsigned long start, unsigned long end, vm_flags_t newflags) 526 { 527 struct mm_struct *mm = vma->vm_mm; 528 pgoff_t pgoff; 529 int nr_pages; 530 int ret = 0; 531 int lock = !!(newflags & VM_LOCKED); 532 vm_flags_t old_flags = vma->vm_flags; 533 534 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || 535 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || 536 vma_is_dax(vma)) 537 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ 538 goto out; 539 540 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 541 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, 542 vma->vm_file, pgoff, vma_policy(vma), 543 vma->vm_userfaultfd_ctx); 544 if (*prev) { 545 vma = *prev; 546 goto success; 547 } 548 549 if (start != vma->vm_start) { 550 ret = split_vma(mm, vma, start, 1); 551 if (ret) 552 goto out; 553 } 554 555 if (end != vma->vm_end) { 556 ret = split_vma(mm, vma, end, 0); 557 if (ret) 558 goto out; 559 } 560 561 success: 562 /* 563 * Keep track of amount of locked VM. 564 */ 565 nr_pages = (end - start) >> PAGE_SHIFT; 566 if (!lock) 567 nr_pages = -nr_pages; 568 else if (old_flags & VM_LOCKED) 569 nr_pages = 0; 570 mm->locked_vm += nr_pages; 571 572 /* 573 * vm_flags is protected by the mmap_lock held in write mode. 574 * It's okay if try_to_unmap_one unmaps a page just after we 575 * set VM_LOCKED, populate_vma_page_range will bring it back. 576 */ 577 578 if (lock) 579 vma->vm_flags = newflags; 580 else 581 munlock_vma_pages_range(vma, start, end); 582 583 out: 584 *prev = vma; 585 return ret; 586 } 587 588 static int apply_vma_lock_flags(unsigned long start, size_t len, 589 vm_flags_t flags) 590 { 591 unsigned long nstart, end, tmp; 592 struct vm_area_struct * vma, * prev; 593 int error; 594 595 VM_BUG_ON(offset_in_page(start)); 596 VM_BUG_ON(len != PAGE_ALIGN(len)); 597 end = start + len; 598 if (end < start) 599 return -EINVAL; 600 if (end == start) 601 return 0; 602 vma = find_vma(current->mm, start); 603 if (!vma || vma->vm_start > start) 604 return -ENOMEM; 605 606 prev = vma->vm_prev; 607 if (start > vma->vm_start) 608 prev = vma; 609 610 for (nstart = start ; ; ) { 611 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 612 613 newflags |= flags; 614 615 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 616 tmp = vma->vm_end; 617 if (tmp > end) 618 tmp = end; 619 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); 620 if (error) 621 break; 622 nstart = tmp; 623 if (nstart < prev->vm_end) 624 nstart = prev->vm_end; 625 if (nstart >= end) 626 break; 627 628 vma = prev->vm_next; 629 if (!vma || vma->vm_start != nstart) { 630 error = -ENOMEM; 631 break; 632 } 633 } 634 return error; 635 } 636 637 /* 638 * Go through vma areas and sum size of mlocked 639 * vma pages, as return value. 640 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT) 641 * is also counted. 642 * Return value: previously mlocked page counts 643 */ 644 static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, 645 unsigned long start, size_t len) 646 { 647 struct vm_area_struct *vma; 648 unsigned long count = 0; 649 650 if (mm == NULL) 651 mm = current->mm; 652 653 vma = find_vma(mm, start); 654 if (vma == NULL) 655 vma = mm->mmap; 656 657 for (; vma ; vma = vma->vm_next) { 658 if (start >= vma->vm_end) 659 continue; 660 if (start + len <= vma->vm_start) 661 break; 662 if (vma->vm_flags & VM_LOCKED) { 663 if (start > vma->vm_start) 664 count -= (start - vma->vm_start); 665 if (start + len < vma->vm_end) { 666 count += start + len - vma->vm_start; 667 break; 668 } 669 count += vma->vm_end - vma->vm_start; 670 } 671 } 672 673 return count >> PAGE_SHIFT; 674 } 675 676 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) 677 { 678 unsigned long locked; 679 unsigned long lock_limit; 680 int error = -ENOMEM; 681 682 start = untagged_addr(start); 683 684 if (!can_do_mlock()) 685 return -EPERM; 686 687 len = PAGE_ALIGN(len + (offset_in_page(start))); 688 start &= PAGE_MASK; 689 690 lock_limit = rlimit(RLIMIT_MEMLOCK); 691 lock_limit >>= PAGE_SHIFT; 692 locked = len >> PAGE_SHIFT; 693 694 if (mmap_write_lock_killable(current->mm)) 695 return -EINTR; 696 697 locked += current->mm->locked_vm; 698 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { 699 /* 700 * It is possible that the regions requested intersect with 701 * previously mlocked areas, that part area in "mm->locked_vm" 702 * should not be counted to new mlock increment count. So check 703 * and adjust locked count if necessary. 704 */ 705 locked -= count_mm_mlocked_page_nr(current->mm, 706 start, len); 707 } 708 709 /* check against resource limits */ 710 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) 711 error = apply_vma_lock_flags(start, len, flags); 712 713 mmap_write_unlock(current->mm); 714 if (error) 715 return error; 716 717 error = __mm_populate(start, len, 0); 718 if (error) 719 return __mlock_posix_error_return(error); 720 return 0; 721 } 722 723 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) 724 { 725 return do_mlock(start, len, VM_LOCKED); 726 } 727 728 SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) 729 { 730 vm_flags_t vm_flags = VM_LOCKED; 731 732 if (flags & ~MLOCK_ONFAULT) 733 return -EINVAL; 734 735 if (flags & MLOCK_ONFAULT) 736 vm_flags |= VM_LOCKONFAULT; 737 738 return do_mlock(start, len, vm_flags); 739 } 740 741 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) 742 { 743 int ret; 744 745 start = untagged_addr(start); 746 747 len = PAGE_ALIGN(len + (offset_in_page(start))); 748 start &= PAGE_MASK; 749 750 if (mmap_write_lock_killable(current->mm)) 751 return -EINTR; 752 ret = apply_vma_lock_flags(start, len, 0); 753 mmap_write_unlock(current->mm); 754 755 return ret; 756 } 757 758 /* 759 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall) 760 * and translate into the appropriate modifications to mm->def_flags and/or the 761 * flags for all current VMAs. 762 * 763 * There are a couple of subtleties with this. If mlockall() is called multiple 764 * times with different flags, the values do not necessarily stack. If mlockall 765 * is called once including the MCL_FUTURE flag and then a second time without 766 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags. 767 */ 768 static int apply_mlockall_flags(int flags) 769 { 770 struct vm_area_struct * vma, * prev = NULL; 771 vm_flags_t to_add = 0; 772 773 current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; 774 if (flags & MCL_FUTURE) { 775 current->mm->def_flags |= VM_LOCKED; 776 777 if (flags & MCL_ONFAULT) 778 current->mm->def_flags |= VM_LOCKONFAULT; 779 780 if (!(flags & MCL_CURRENT)) 781 goto out; 782 } 783 784 if (flags & MCL_CURRENT) { 785 to_add |= VM_LOCKED; 786 if (flags & MCL_ONFAULT) 787 to_add |= VM_LOCKONFAULT; 788 } 789 790 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { 791 vm_flags_t newflags; 792 793 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 794 newflags |= to_add; 795 796 /* Ignore errors */ 797 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 798 cond_resched(); 799 } 800 out: 801 return 0; 802 } 803 804 SYSCALL_DEFINE1(mlockall, int, flags) 805 { 806 unsigned long lock_limit; 807 int ret; 808 809 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) || 810 flags == MCL_ONFAULT) 811 return -EINVAL; 812 813 if (!can_do_mlock()) 814 return -EPERM; 815 816 lock_limit = rlimit(RLIMIT_MEMLOCK); 817 lock_limit >>= PAGE_SHIFT; 818 819 if (mmap_write_lock_killable(current->mm)) 820 return -EINTR; 821 822 ret = -ENOMEM; 823 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || 824 capable(CAP_IPC_LOCK)) 825 ret = apply_mlockall_flags(flags); 826 mmap_write_unlock(current->mm); 827 if (!ret && (flags & MCL_CURRENT)) 828 mm_populate(0, TASK_SIZE); 829 830 return ret; 831 } 832 833 SYSCALL_DEFINE0(munlockall) 834 { 835 int ret; 836 837 if (mmap_write_lock_killable(current->mm)) 838 return -EINTR; 839 ret = apply_mlockall_flags(0); 840 mmap_write_unlock(current->mm); 841 return ret; 842 } 843 844 /* 845 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB 846 * shm segments) get accounted against the user_struct instead. 847 */ 848 static DEFINE_SPINLOCK(shmlock_user_lock); 849 850 int user_shm_lock(size_t size, struct user_struct *user) 851 { 852 unsigned long lock_limit, locked; 853 int allowed = 0; 854 855 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 856 lock_limit = rlimit(RLIMIT_MEMLOCK); 857 if (lock_limit == RLIM_INFINITY) 858 allowed = 1; 859 lock_limit >>= PAGE_SHIFT; 860 spin_lock(&shmlock_user_lock); 861 if (!allowed && 862 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) 863 goto out; 864 get_uid(user); 865 user->locked_shm += locked; 866 allowed = 1; 867 out: 868 spin_unlock(&shmlock_user_lock); 869 return allowed; 870 } 871 872 void user_shm_unlock(size_t size, struct user_struct *user) 873 { 874 spin_lock(&shmlock_user_lock); 875 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 876 spin_unlock(&shmlock_user_lock); 877 free_uid(user); 878 } 879