1 /* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 31 #include <linux/mm.h> 32 #include <linux/random.h> 33 #include <linux/sched/signal.h> 34 #include <linux/export.h> 35 #include <linux/swap.h> 36 #include <linux/uio.h> 37 #include <linux/khugepaged.h> 38 #include <linux/hugetlb.h> 39 #include <linux/frontswap.h> 40 #include <linux/fs_parser.h> 41 42 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ 43 44 static struct vfsmount *shm_mnt; 45 46 #ifdef CONFIG_SHMEM 47 /* 48 * This virtual memory filesystem is heavily based on the ramfs. It 49 * extends ramfs by the ability to use swap and honor resource limits 50 * which makes it a completely usable filesystem. 51 */ 52 53 #include <linux/xattr.h> 54 #include <linux/exportfs.h> 55 #include <linux/posix_acl.h> 56 #include <linux/posix_acl_xattr.h> 57 #include <linux/mman.h> 58 #include <linux/string.h> 59 #include <linux/slab.h> 60 #include <linux/backing-dev.h> 61 #include <linux/shmem_fs.h> 62 #include <linux/writeback.h> 63 #include <linux/blkdev.h> 64 #include <linux/pagevec.h> 65 #include <linux/percpu_counter.h> 66 #include <linux/falloc.h> 67 #include <linux/splice.h> 68 #include <linux/security.h> 69 #include <linux/swapops.h> 70 #include <linux/mempolicy.h> 71 #include <linux/namei.h> 72 #include <linux/ctype.h> 73 #include <linux/migrate.h> 74 #include <linux/highmem.h> 75 #include <linux/seq_file.h> 76 #include <linux/magic.h> 77 #include <linux/syscalls.h> 78 #include <linux/fcntl.h> 79 #include <uapi/linux/memfd.h> 80 #include <linux/userfaultfd_k.h> 81 #include <linux/rmap.h> 82 #include <linux/uuid.h> 83 84 #include <linux/uaccess.h> 85 86 #include "internal.h" 87 88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 90 91 /* Pretend that each entry is of this size in directory's i_size */ 92 #define BOGO_DIRENT_SIZE 20 93 94 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 95 #define SHORT_SYMLINK_LEN 128 96 97 /* 98 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 99 * inode->i_private (with i_mutex making sure that it has only one user at 100 * a time): we would prefer not to enlarge the shmem inode just for that. 101 */ 102 struct shmem_falloc { 103 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 104 pgoff_t start; /* start of range currently being fallocated */ 105 pgoff_t next; /* the next page offset to be fallocated */ 106 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 107 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 108 }; 109 110 struct shmem_options { 111 unsigned long long blocks; 112 unsigned long long inodes; 113 struct mempolicy *mpol; 114 kuid_t uid; 115 kgid_t gid; 116 umode_t mode; 117 bool full_inums; 118 int huge; 119 int seen; 120 #define SHMEM_SEEN_BLOCKS 1 121 #define SHMEM_SEEN_INODES 2 122 #define SHMEM_SEEN_HUGE 4 123 #define SHMEM_SEEN_INUMS 8 124 }; 125 126 #ifdef CONFIG_TMPFS 127 static unsigned long shmem_default_max_blocks(void) 128 { 129 return totalram_pages() / 2; 130 } 131 132 static unsigned long shmem_default_max_inodes(void) 133 { 134 unsigned long nr_pages = totalram_pages(); 135 136 return min(nr_pages - totalhigh_pages(), nr_pages / 2); 137 } 138 #endif 139 140 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 141 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 142 struct shmem_inode_info *info, pgoff_t index); 143 static int shmem_swapin_page(struct inode *inode, pgoff_t index, 144 struct page **pagep, enum sgp_type sgp, 145 gfp_t gfp, struct vm_area_struct *vma, 146 vm_fault_t *fault_type); 147 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 148 struct page **pagep, enum sgp_type sgp, 149 gfp_t gfp, struct vm_area_struct *vma, 150 struct vm_fault *vmf, vm_fault_t *fault_type); 151 152 int shmem_getpage(struct inode *inode, pgoff_t index, 153 struct page **pagep, enum sgp_type sgp) 154 { 155 return shmem_getpage_gfp(inode, index, pagep, sgp, 156 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 157 } 158 159 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 160 { 161 return sb->s_fs_info; 162 } 163 164 /* 165 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 166 * for shared memory and for shared anonymous (/dev/zero) mappings 167 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 168 * consistent with the pre-accounting of private mappings ... 169 */ 170 static inline int shmem_acct_size(unsigned long flags, loff_t size) 171 { 172 return (flags & VM_NORESERVE) ? 173 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 174 } 175 176 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 177 { 178 if (!(flags & VM_NORESERVE)) 179 vm_unacct_memory(VM_ACCT(size)); 180 } 181 182 static inline int shmem_reacct_size(unsigned long flags, 183 loff_t oldsize, loff_t newsize) 184 { 185 if (!(flags & VM_NORESERVE)) { 186 if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 187 return security_vm_enough_memory_mm(current->mm, 188 VM_ACCT(newsize) - VM_ACCT(oldsize)); 189 else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 190 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 191 } 192 return 0; 193 } 194 195 /* 196 * ... whereas tmpfs objects are accounted incrementally as 197 * pages are allocated, in order to allow large sparse files. 198 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 199 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 200 */ 201 static inline int shmem_acct_block(unsigned long flags, long pages) 202 { 203 if (!(flags & VM_NORESERVE)) 204 return 0; 205 206 return security_vm_enough_memory_mm(current->mm, 207 pages * VM_ACCT(PAGE_SIZE)); 208 } 209 210 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 211 { 212 if (flags & VM_NORESERVE) 213 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 214 } 215 216 static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 217 { 218 struct shmem_inode_info *info = SHMEM_I(inode); 219 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 220 221 if (shmem_acct_block(info->flags, pages)) 222 return false; 223 224 if (sbinfo->max_blocks) { 225 if (percpu_counter_compare(&sbinfo->used_blocks, 226 sbinfo->max_blocks - pages) > 0) 227 goto unacct; 228 percpu_counter_add(&sbinfo->used_blocks, pages); 229 } 230 231 return true; 232 233 unacct: 234 shmem_unacct_blocks(info->flags, pages); 235 return false; 236 } 237 238 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 239 { 240 struct shmem_inode_info *info = SHMEM_I(inode); 241 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 242 243 if (sbinfo->max_blocks) 244 percpu_counter_sub(&sbinfo->used_blocks, pages); 245 shmem_unacct_blocks(info->flags, pages); 246 } 247 248 static const struct super_operations shmem_ops; 249 const struct address_space_operations shmem_aops; 250 static const struct file_operations shmem_file_operations; 251 static const struct inode_operations shmem_inode_operations; 252 static const struct inode_operations shmem_dir_inode_operations; 253 static const struct inode_operations shmem_special_inode_operations; 254 static const struct vm_operations_struct shmem_vm_ops; 255 static struct file_system_type shmem_fs_type; 256 257 bool vma_is_shmem(struct vm_area_struct *vma) 258 { 259 return vma->vm_ops == &shmem_vm_ops; 260 } 261 262 static LIST_HEAD(shmem_swaplist); 263 static DEFINE_MUTEX(shmem_swaplist_mutex); 264 265 /* 266 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 267 * produces a novel ino for the newly allocated inode. 268 * 269 * It may also be called when making a hard link to permit the space needed by 270 * each dentry. However, in that case, no new inode number is needed since that 271 * internally draws from another pool of inode numbers (currently global 272 * get_next_ino()). This case is indicated by passing NULL as inop. 273 */ 274 #define SHMEM_INO_BATCH 1024 275 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 276 { 277 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 278 ino_t ino; 279 280 if (!(sb->s_flags & SB_KERNMOUNT)) { 281 spin_lock(&sbinfo->stat_lock); 282 if (sbinfo->max_inodes) { 283 if (!sbinfo->free_inodes) { 284 spin_unlock(&sbinfo->stat_lock); 285 return -ENOSPC; 286 } 287 sbinfo->free_inodes--; 288 } 289 if (inop) { 290 ino = sbinfo->next_ino++; 291 if (unlikely(is_zero_ino(ino))) 292 ino = sbinfo->next_ino++; 293 if (unlikely(!sbinfo->full_inums && 294 ino > UINT_MAX)) { 295 /* 296 * Emulate get_next_ino uint wraparound for 297 * compatibility 298 */ 299 if (IS_ENABLED(CONFIG_64BIT)) 300 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 301 __func__, MINOR(sb->s_dev)); 302 sbinfo->next_ino = 1; 303 ino = sbinfo->next_ino++; 304 } 305 *inop = ino; 306 } 307 spin_unlock(&sbinfo->stat_lock); 308 } else if (inop) { 309 /* 310 * __shmem_file_setup, one of our callers, is lock-free: it 311 * doesn't hold stat_lock in shmem_reserve_inode since 312 * max_inodes is always 0, and is called from potentially 313 * unknown contexts. As such, use a per-cpu batched allocator 314 * which doesn't require the per-sb stat_lock unless we are at 315 * the batch boundary. 316 * 317 * We don't need to worry about inode{32,64} since SB_KERNMOUNT 318 * shmem mounts are not exposed to userspace, so we don't need 319 * to worry about things like glibc compatibility. 320 */ 321 ino_t *next_ino; 322 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 323 ino = *next_ino; 324 if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 325 spin_lock(&sbinfo->stat_lock); 326 ino = sbinfo->next_ino; 327 sbinfo->next_ino += SHMEM_INO_BATCH; 328 spin_unlock(&sbinfo->stat_lock); 329 if (unlikely(is_zero_ino(ino))) 330 ino++; 331 } 332 *inop = ino; 333 *next_ino = ++ino; 334 put_cpu(); 335 } 336 337 return 0; 338 } 339 340 static void shmem_free_inode(struct super_block *sb) 341 { 342 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 343 if (sbinfo->max_inodes) { 344 spin_lock(&sbinfo->stat_lock); 345 sbinfo->free_inodes++; 346 spin_unlock(&sbinfo->stat_lock); 347 } 348 } 349 350 /** 351 * shmem_recalc_inode - recalculate the block usage of an inode 352 * @inode: inode to recalc 353 * 354 * We have to calculate the free blocks since the mm can drop 355 * undirtied hole pages behind our back. 356 * 357 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 358 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 359 * 360 * It has to be called with the spinlock held. 361 */ 362 static void shmem_recalc_inode(struct inode *inode) 363 { 364 struct shmem_inode_info *info = SHMEM_I(inode); 365 long freed; 366 367 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 368 if (freed > 0) { 369 info->alloced -= freed; 370 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 371 shmem_inode_unacct_blocks(inode, freed); 372 } 373 } 374 375 bool shmem_charge(struct inode *inode, long pages) 376 { 377 struct shmem_inode_info *info = SHMEM_I(inode); 378 unsigned long flags; 379 380 if (!shmem_inode_acct_block(inode, pages)) 381 return false; 382 383 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 384 inode->i_mapping->nrpages += pages; 385 386 spin_lock_irqsave(&info->lock, flags); 387 info->alloced += pages; 388 inode->i_blocks += pages * BLOCKS_PER_PAGE; 389 shmem_recalc_inode(inode); 390 spin_unlock_irqrestore(&info->lock, flags); 391 392 return true; 393 } 394 395 void shmem_uncharge(struct inode *inode, long pages) 396 { 397 struct shmem_inode_info *info = SHMEM_I(inode); 398 unsigned long flags; 399 400 /* nrpages adjustment done by __delete_from_page_cache() or caller */ 401 402 spin_lock_irqsave(&info->lock, flags); 403 info->alloced -= pages; 404 inode->i_blocks -= pages * BLOCKS_PER_PAGE; 405 shmem_recalc_inode(inode); 406 spin_unlock_irqrestore(&info->lock, flags); 407 408 shmem_inode_unacct_blocks(inode, pages); 409 } 410 411 /* 412 * Replace item expected in xarray by a new item, while holding xa_lock. 413 */ 414 static int shmem_replace_entry(struct address_space *mapping, 415 pgoff_t index, void *expected, void *replacement) 416 { 417 XA_STATE(xas, &mapping->i_pages, index); 418 void *item; 419 420 VM_BUG_ON(!expected); 421 VM_BUG_ON(!replacement); 422 item = xas_load(&xas); 423 if (item != expected) 424 return -ENOENT; 425 xas_store(&xas, replacement); 426 return 0; 427 } 428 429 /* 430 * Sometimes, before we decide whether to proceed or to fail, we must check 431 * that an entry was not already brought back from swap by a racing thread. 432 * 433 * Checking page is not enough: by the time a SwapCache page is locked, it 434 * might be reused, and again be SwapCache, using the same swap as before. 435 */ 436 static bool shmem_confirm_swap(struct address_space *mapping, 437 pgoff_t index, swp_entry_t swap) 438 { 439 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 440 } 441 442 /* 443 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 444 * 445 * SHMEM_HUGE_NEVER: 446 * disables huge pages for the mount; 447 * SHMEM_HUGE_ALWAYS: 448 * enables huge pages for the mount; 449 * SHMEM_HUGE_WITHIN_SIZE: 450 * only allocate huge pages if the page will be fully within i_size, 451 * also respect fadvise()/madvise() hints; 452 * SHMEM_HUGE_ADVISE: 453 * only allocate huge pages if requested with fadvise()/madvise(); 454 */ 455 456 #define SHMEM_HUGE_NEVER 0 457 #define SHMEM_HUGE_ALWAYS 1 458 #define SHMEM_HUGE_WITHIN_SIZE 2 459 #define SHMEM_HUGE_ADVISE 3 460 461 /* 462 * Special values. 463 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 464 * 465 * SHMEM_HUGE_DENY: 466 * disables huge on shm_mnt and all mounts, for emergency use; 467 * SHMEM_HUGE_FORCE: 468 * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 469 * 470 */ 471 #define SHMEM_HUGE_DENY (-1) 472 #define SHMEM_HUGE_FORCE (-2) 473 474 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 475 /* ifdef here to avoid bloating shmem.o when not necessary */ 476 477 static int shmem_huge __read_mostly; 478 479 #if defined(CONFIG_SYSFS) 480 static int shmem_parse_huge(const char *str) 481 { 482 if (!strcmp(str, "never")) 483 return SHMEM_HUGE_NEVER; 484 if (!strcmp(str, "always")) 485 return SHMEM_HUGE_ALWAYS; 486 if (!strcmp(str, "within_size")) 487 return SHMEM_HUGE_WITHIN_SIZE; 488 if (!strcmp(str, "advise")) 489 return SHMEM_HUGE_ADVISE; 490 if (!strcmp(str, "deny")) 491 return SHMEM_HUGE_DENY; 492 if (!strcmp(str, "force")) 493 return SHMEM_HUGE_FORCE; 494 return -EINVAL; 495 } 496 #endif 497 498 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 499 static const char *shmem_format_huge(int huge) 500 { 501 switch (huge) { 502 case SHMEM_HUGE_NEVER: 503 return "never"; 504 case SHMEM_HUGE_ALWAYS: 505 return "always"; 506 case SHMEM_HUGE_WITHIN_SIZE: 507 return "within_size"; 508 case SHMEM_HUGE_ADVISE: 509 return "advise"; 510 case SHMEM_HUGE_DENY: 511 return "deny"; 512 case SHMEM_HUGE_FORCE: 513 return "force"; 514 default: 515 VM_BUG_ON(1); 516 return "bad_val"; 517 } 518 } 519 #endif 520 521 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 522 struct shrink_control *sc, unsigned long nr_to_split) 523 { 524 LIST_HEAD(list), *pos, *next; 525 LIST_HEAD(to_remove); 526 struct inode *inode; 527 struct shmem_inode_info *info; 528 struct page *page; 529 unsigned long batch = sc ? sc->nr_to_scan : 128; 530 int removed = 0, split = 0; 531 532 if (list_empty(&sbinfo->shrinklist)) 533 return SHRINK_STOP; 534 535 spin_lock(&sbinfo->shrinklist_lock); 536 list_for_each_safe(pos, next, &sbinfo->shrinklist) { 537 info = list_entry(pos, struct shmem_inode_info, shrinklist); 538 539 /* pin the inode */ 540 inode = igrab(&info->vfs_inode); 541 542 /* inode is about to be evicted */ 543 if (!inode) { 544 list_del_init(&info->shrinklist); 545 removed++; 546 goto next; 547 } 548 549 /* Check if there's anything to gain */ 550 if (round_up(inode->i_size, PAGE_SIZE) == 551 round_up(inode->i_size, HPAGE_PMD_SIZE)) { 552 list_move(&info->shrinklist, &to_remove); 553 removed++; 554 goto next; 555 } 556 557 list_move(&info->shrinklist, &list); 558 next: 559 if (!--batch) 560 break; 561 } 562 spin_unlock(&sbinfo->shrinklist_lock); 563 564 list_for_each_safe(pos, next, &to_remove) { 565 info = list_entry(pos, struct shmem_inode_info, shrinklist); 566 inode = &info->vfs_inode; 567 list_del_init(&info->shrinklist); 568 iput(inode); 569 } 570 571 list_for_each_safe(pos, next, &list) { 572 int ret; 573 574 info = list_entry(pos, struct shmem_inode_info, shrinklist); 575 inode = &info->vfs_inode; 576 577 if (nr_to_split && split >= nr_to_split) 578 goto leave; 579 580 page = find_get_page(inode->i_mapping, 581 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 582 if (!page) 583 goto drop; 584 585 /* No huge page at the end of the file: nothing to split */ 586 if (!PageTransHuge(page)) { 587 put_page(page); 588 goto drop; 589 } 590 591 /* 592 * Leave the inode on the list if we failed to lock 593 * the page at this time. 594 * 595 * Waiting for the lock may lead to deadlock in the 596 * reclaim path. 597 */ 598 if (!trylock_page(page)) { 599 put_page(page); 600 goto leave; 601 } 602 603 ret = split_huge_page(page); 604 unlock_page(page); 605 put_page(page); 606 607 /* If split failed leave the inode on the list */ 608 if (ret) 609 goto leave; 610 611 split++; 612 drop: 613 list_del_init(&info->shrinklist); 614 removed++; 615 leave: 616 iput(inode); 617 } 618 619 spin_lock(&sbinfo->shrinklist_lock); 620 list_splice_tail(&list, &sbinfo->shrinklist); 621 sbinfo->shrinklist_len -= removed; 622 spin_unlock(&sbinfo->shrinklist_lock); 623 624 return split; 625 } 626 627 static long shmem_unused_huge_scan(struct super_block *sb, 628 struct shrink_control *sc) 629 { 630 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 631 632 if (!READ_ONCE(sbinfo->shrinklist_len)) 633 return SHRINK_STOP; 634 635 return shmem_unused_huge_shrink(sbinfo, sc, 0); 636 } 637 638 static long shmem_unused_huge_count(struct super_block *sb, 639 struct shrink_control *sc) 640 { 641 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 642 return READ_ONCE(sbinfo->shrinklist_len); 643 } 644 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 645 646 #define shmem_huge SHMEM_HUGE_DENY 647 648 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 649 struct shrink_control *sc, unsigned long nr_to_split) 650 { 651 return 0; 652 } 653 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 654 655 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) 656 { 657 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 658 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && 659 shmem_huge != SHMEM_HUGE_DENY) 660 return true; 661 return false; 662 } 663 664 /* 665 * Like add_to_page_cache_locked, but error if expected item has gone. 666 */ 667 static int shmem_add_to_page_cache(struct page *page, 668 struct address_space *mapping, 669 pgoff_t index, void *expected, gfp_t gfp, 670 struct mm_struct *charge_mm) 671 { 672 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 673 unsigned long i = 0; 674 unsigned long nr = compound_nr(page); 675 int error; 676 677 VM_BUG_ON_PAGE(PageTail(page), page); 678 VM_BUG_ON_PAGE(index != round_down(index, nr), page); 679 VM_BUG_ON_PAGE(!PageLocked(page), page); 680 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 681 VM_BUG_ON(expected && PageTransHuge(page)); 682 683 page_ref_add(page, nr); 684 page->mapping = mapping; 685 page->index = index; 686 687 if (!PageSwapCache(page)) { 688 error = mem_cgroup_charge(page, charge_mm, gfp); 689 if (error) { 690 if (PageTransHuge(page)) { 691 count_vm_event(THP_FILE_FALLBACK); 692 count_vm_event(THP_FILE_FALLBACK_CHARGE); 693 } 694 goto error; 695 } 696 } 697 cgroup_throttle_swaprate(page, gfp); 698 699 do { 700 void *entry; 701 xas_lock_irq(&xas); 702 entry = xas_find_conflict(&xas); 703 if (entry != expected) 704 xas_set_err(&xas, -EEXIST); 705 xas_create_range(&xas); 706 if (xas_error(&xas)) 707 goto unlock; 708 next: 709 xas_store(&xas, page); 710 if (++i < nr) { 711 xas_next(&xas); 712 goto next; 713 } 714 if (PageTransHuge(page)) { 715 count_vm_event(THP_FILE_ALLOC); 716 __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr); 717 } 718 mapping->nrpages += nr; 719 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr); 720 __mod_lruvec_page_state(page, NR_SHMEM, nr); 721 unlock: 722 xas_unlock_irq(&xas); 723 } while (xas_nomem(&xas, gfp)); 724 725 if (xas_error(&xas)) { 726 error = xas_error(&xas); 727 goto error; 728 } 729 730 return 0; 731 error: 732 page->mapping = NULL; 733 page_ref_sub(page, nr); 734 return error; 735 } 736 737 /* 738 * Like delete_from_page_cache, but substitutes swap for page. 739 */ 740 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 741 { 742 struct address_space *mapping = page->mapping; 743 int error; 744 745 VM_BUG_ON_PAGE(PageCompound(page), page); 746 747 xa_lock_irq(&mapping->i_pages); 748 error = shmem_replace_entry(mapping, page->index, page, radswap); 749 page->mapping = NULL; 750 mapping->nrpages--; 751 __dec_lruvec_page_state(page, NR_FILE_PAGES); 752 __dec_lruvec_page_state(page, NR_SHMEM); 753 xa_unlock_irq(&mapping->i_pages); 754 put_page(page); 755 BUG_ON(error); 756 } 757 758 /* 759 * Remove swap entry from page cache, free the swap and its page cache. 760 */ 761 static int shmem_free_swap(struct address_space *mapping, 762 pgoff_t index, void *radswap) 763 { 764 void *old; 765 766 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 767 if (old != radswap) 768 return -ENOENT; 769 free_swap_and_cache(radix_to_swp_entry(radswap)); 770 return 0; 771 } 772 773 /* 774 * Determine (in bytes) how many of the shmem object's pages mapped by the 775 * given offsets are swapped out. 776 * 777 * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 778 * as long as the inode doesn't go away and racy results are not a problem. 779 */ 780 unsigned long shmem_partial_swap_usage(struct address_space *mapping, 781 pgoff_t start, pgoff_t end) 782 { 783 XA_STATE(xas, &mapping->i_pages, start); 784 struct page *page; 785 unsigned long swapped = 0; 786 787 rcu_read_lock(); 788 xas_for_each(&xas, page, end - 1) { 789 if (xas_retry(&xas, page)) 790 continue; 791 if (xa_is_value(page)) 792 swapped++; 793 794 if (need_resched()) { 795 xas_pause(&xas); 796 cond_resched_rcu(); 797 } 798 } 799 800 rcu_read_unlock(); 801 802 return swapped << PAGE_SHIFT; 803 } 804 805 /* 806 * Determine (in bytes) how many of the shmem object's pages mapped by the 807 * given vma is swapped out. 808 * 809 * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 810 * as long as the inode doesn't go away and racy results are not a problem. 811 */ 812 unsigned long shmem_swap_usage(struct vm_area_struct *vma) 813 { 814 struct inode *inode = file_inode(vma->vm_file); 815 struct shmem_inode_info *info = SHMEM_I(inode); 816 struct address_space *mapping = inode->i_mapping; 817 unsigned long swapped; 818 819 /* Be careful as we don't hold info->lock */ 820 swapped = READ_ONCE(info->swapped); 821 822 /* 823 * The easier cases are when the shmem object has nothing in swap, or 824 * the vma maps it whole. Then we can simply use the stats that we 825 * already track. 826 */ 827 if (!swapped) 828 return 0; 829 830 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 831 return swapped << PAGE_SHIFT; 832 833 /* Here comes the more involved part */ 834 return shmem_partial_swap_usage(mapping, 835 linear_page_index(vma, vma->vm_start), 836 linear_page_index(vma, vma->vm_end)); 837 } 838 839 /* 840 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 841 */ 842 void shmem_unlock_mapping(struct address_space *mapping) 843 { 844 struct pagevec pvec; 845 pgoff_t index = 0; 846 847 pagevec_init(&pvec); 848 /* 849 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 850 */ 851 while (!mapping_unevictable(mapping)) { 852 if (!pagevec_lookup(&pvec, mapping, &index)) 853 break; 854 check_move_unevictable_pages(&pvec); 855 pagevec_release(&pvec); 856 cond_resched(); 857 } 858 } 859 860 /* 861 * Check whether a hole-punch or truncation needs to split a huge page, 862 * returning true if no split was required, or the split has been successful. 863 * 864 * Eviction (or truncation to 0 size) should never need to split a huge page; 865 * but in rare cases might do so, if shmem_undo_range() failed to trylock on 866 * head, and then succeeded to trylock on tail. 867 * 868 * A split can only succeed when there are no additional references on the 869 * huge page: so the split below relies upon find_get_entries() having stopped 870 * when it found a subpage of the huge page, without getting further references. 871 */ 872 static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end) 873 { 874 if (!PageTransCompound(page)) 875 return true; 876 877 /* Just proceed to delete a huge page wholly within the range punched */ 878 if (PageHead(page) && 879 page->index >= start && page->index + HPAGE_PMD_NR <= end) 880 return true; 881 882 /* Try to split huge page, so we can truly punch the hole or truncate */ 883 return split_huge_page(page) >= 0; 884 } 885 886 /* 887 * Remove range of pages and swap entries from page cache, and free them. 888 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 889 */ 890 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 891 bool unfalloc) 892 { 893 struct address_space *mapping = inode->i_mapping; 894 struct shmem_inode_info *info = SHMEM_I(inode); 895 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 896 pgoff_t end = (lend + 1) >> PAGE_SHIFT; 897 unsigned int partial_start = lstart & (PAGE_SIZE - 1); 898 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); 899 struct pagevec pvec; 900 pgoff_t indices[PAGEVEC_SIZE]; 901 long nr_swaps_freed = 0; 902 pgoff_t index; 903 int i; 904 905 if (lend == -1) 906 end = -1; /* unsigned, so actually very big */ 907 908 pagevec_init(&pvec); 909 index = start; 910 while (index < end && find_lock_entries(mapping, index, end - 1, 911 &pvec, indices)) { 912 for (i = 0; i < pagevec_count(&pvec); i++) { 913 struct page *page = pvec.pages[i]; 914 915 index = indices[i]; 916 917 if (xa_is_value(page)) { 918 if (unfalloc) 919 continue; 920 nr_swaps_freed += !shmem_free_swap(mapping, 921 index, page); 922 continue; 923 } 924 index += thp_nr_pages(page) - 1; 925 926 if (!unfalloc || !PageUptodate(page)) 927 truncate_inode_page(mapping, page); 928 unlock_page(page); 929 } 930 pagevec_remove_exceptionals(&pvec); 931 pagevec_release(&pvec); 932 cond_resched(); 933 index++; 934 } 935 936 if (partial_start) { 937 struct page *page = NULL; 938 shmem_getpage(inode, start - 1, &page, SGP_READ); 939 if (page) { 940 unsigned int top = PAGE_SIZE; 941 if (start > end) { 942 top = partial_end; 943 partial_end = 0; 944 } 945 zero_user_segment(page, partial_start, top); 946 set_page_dirty(page); 947 unlock_page(page); 948 put_page(page); 949 } 950 } 951 if (partial_end) { 952 struct page *page = NULL; 953 shmem_getpage(inode, end, &page, SGP_READ); 954 if (page) { 955 zero_user_segment(page, 0, partial_end); 956 set_page_dirty(page); 957 unlock_page(page); 958 put_page(page); 959 } 960 } 961 if (start >= end) 962 return; 963 964 index = start; 965 while (index < end) { 966 cond_resched(); 967 968 if (!find_get_entries(mapping, index, end - 1, &pvec, 969 indices)) { 970 /* If all gone or hole-punch or unfalloc, we're done */ 971 if (index == start || end != -1) 972 break; 973 /* But if truncating, restart to make sure all gone */ 974 index = start; 975 continue; 976 } 977 for (i = 0; i < pagevec_count(&pvec); i++) { 978 struct page *page = pvec.pages[i]; 979 980 index = indices[i]; 981 if (xa_is_value(page)) { 982 if (unfalloc) 983 continue; 984 if (shmem_free_swap(mapping, index, page)) { 985 /* Swap was replaced by page: retry */ 986 index--; 987 break; 988 } 989 nr_swaps_freed++; 990 continue; 991 } 992 993 lock_page(page); 994 995 if (!unfalloc || !PageUptodate(page)) { 996 if (page_mapping(page) != mapping) { 997 /* Page was replaced by swap: retry */ 998 unlock_page(page); 999 index--; 1000 break; 1001 } 1002 VM_BUG_ON_PAGE(PageWriteback(page), page); 1003 if (shmem_punch_compound(page, start, end)) 1004 truncate_inode_page(mapping, page); 1005 else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1006 /* Wipe the page and don't get stuck */ 1007 clear_highpage(page); 1008 flush_dcache_page(page); 1009 set_page_dirty(page); 1010 if (index < 1011 round_up(start, HPAGE_PMD_NR)) 1012 start = index + 1; 1013 } 1014 } 1015 unlock_page(page); 1016 } 1017 pagevec_remove_exceptionals(&pvec); 1018 pagevec_release(&pvec); 1019 index++; 1020 } 1021 1022 spin_lock_irq(&info->lock); 1023 info->swapped -= nr_swaps_freed; 1024 shmem_recalc_inode(inode); 1025 spin_unlock_irq(&info->lock); 1026 } 1027 1028 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 1029 { 1030 shmem_undo_range(inode, lstart, lend, false); 1031 inode->i_ctime = inode->i_mtime = current_time(inode); 1032 } 1033 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1034 1035 static int shmem_getattr(struct user_namespace *mnt_userns, 1036 const struct path *path, struct kstat *stat, 1037 u32 request_mask, unsigned int query_flags) 1038 { 1039 struct inode *inode = path->dentry->d_inode; 1040 struct shmem_inode_info *info = SHMEM_I(inode); 1041 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb); 1042 1043 if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 1044 spin_lock_irq(&info->lock); 1045 shmem_recalc_inode(inode); 1046 spin_unlock_irq(&info->lock); 1047 } 1048 generic_fillattr(&init_user_ns, inode, stat); 1049 1050 if (is_huge_enabled(sb_info)) 1051 stat->blksize = HPAGE_PMD_SIZE; 1052 1053 return 0; 1054 } 1055 1056 static int shmem_setattr(struct user_namespace *mnt_userns, 1057 struct dentry *dentry, struct iattr *attr) 1058 { 1059 struct inode *inode = d_inode(dentry); 1060 struct shmem_inode_info *info = SHMEM_I(inode); 1061 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1062 int error; 1063 1064 error = setattr_prepare(&init_user_ns, dentry, attr); 1065 if (error) 1066 return error; 1067 1068 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 1069 loff_t oldsize = inode->i_size; 1070 loff_t newsize = attr->ia_size; 1071 1072 /* protected by i_mutex */ 1073 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 1074 (newsize > oldsize && (info->seals & F_SEAL_GROW))) 1075 return -EPERM; 1076 1077 if (newsize != oldsize) { 1078 error = shmem_reacct_size(SHMEM_I(inode)->flags, 1079 oldsize, newsize); 1080 if (error) 1081 return error; 1082 i_size_write(inode, newsize); 1083 inode->i_ctime = inode->i_mtime = current_time(inode); 1084 } 1085 if (newsize <= oldsize) { 1086 loff_t holebegin = round_up(newsize, PAGE_SIZE); 1087 if (oldsize > holebegin) 1088 unmap_mapping_range(inode->i_mapping, 1089 holebegin, 0, 1); 1090 if (info->alloced) 1091 shmem_truncate_range(inode, 1092 newsize, (loff_t)-1); 1093 /* unmap again to remove racily COWed private pages */ 1094 if (oldsize > holebegin) 1095 unmap_mapping_range(inode->i_mapping, 1096 holebegin, 0, 1); 1097 1098 /* 1099 * Part of the huge page can be beyond i_size: subject 1100 * to shrink under memory pressure. 1101 */ 1102 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1103 spin_lock(&sbinfo->shrinklist_lock); 1104 /* 1105 * _careful to defend against unlocked access to 1106 * ->shrink_list in shmem_unused_huge_shrink() 1107 */ 1108 if (list_empty_careful(&info->shrinklist)) { 1109 list_add_tail(&info->shrinklist, 1110 &sbinfo->shrinklist); 1111 sbinfo->shrinklist_len++; 1112 } 1113 spin_unlock(&sbinfo->shrinklist_lock); 1114 } 1115 } 1116 } 1117 1118 setattr_copy(&init_user_ns, inode, attr); 1119 if (attr->ia_valid & ATTR_MODE) 1120 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode); 1121 return error; 1122 } 1123 1124 static void shmem_evict_inode(struct inode *inode) 1125 { 1126 struct shmem_inode_info *info = SHMEM_I(inode); 1127 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1128 1129 if (shmem_mapping(inode->i_mapping)) { 1130 shmem_unacct_size(info->flags, inode->i_size); 1131 inode->i_size = 0; 1132 shmem_truncate_range(inode, 0, (loff_t)-1); 1133 if (!list_empty(&info->shrinklist)) { 1134 spin_lock(&sbinfo->shrinklist_lock); 1135 if (!list_empty(&info->shrinklist)) { 1136 list_del_init(&info->shrinklist); 1137 sbinfo->shrinklist_len--; 1138 } 1139 spin_unlock(&sbinfo->shrinklist_lock); 1140 } 1141 while (!list_empty(&info->swaplist)) { 1142 /* Wait while shmem_unuse() is scanning this inode... */ 1143 wait_var_event(&info->stop_eviction, 1144 !atomic_read(&info->stop_eviction)); 1145 mutex_lock(&shmem_swaplist_mutex); 1146 /* ...but beware of the race if we peeked too early */ 1147 if (!atomic_read(&info->stop_eviction)) 1148 list_del_init(&info->swaplist); 1149 mutex_unlock(&shmem_swaplist_mutex); 1150 } 1151 } 1152 1153 simple_xattrs_free(&info->xattrs); 1154 WARN_ON(inode->i_blocks); 1155 shmem_free_inode(inode->i_sb); 1156 clear_inode(inode); 1157 } 1158 1159 extern struct swap_info_struct *swap_info[]; 1160 1161 static int shmem_find_swap_entries(struct address_space *mapping, 1162 pgoff_t start, unsigned int nr_entries, 1163 struct page **entries, pgoff_t *indices, 1164 unsigned int type, bool frontswap) 1165 { 1166 XA_STATE(xas, &mapping->i_pages, start); 1167 struct page *page; 1168 swp_entry_t entry; 1169 unsigned int ret = 0; 1170 1171 if (!nr_entries) 1172 return 0; 1173 1174 rcu_read_lock(); 1175 xas_for_each(&xas, page, ULONG_MAX) { 1176 if (xas_retry(&xas, page)) 1177 continue; 1178 1179 if (!xa_is_value(page)) 1180 continue; 1181 1182 entry = radix_to_swp_entry(page); 1183 if (swp_type(entry) != type) 1184 continue; 1185 if (frontswap && 1186 !frontswap_test(swap_info[type], swp_offset(entry))) 1187 continue; 1188 1189 indices[ret] = xas.xa_index; 1190 entries[ret] = page; 1191 1192 if (need_resched()) { 1193 xas_pause(&xas); 1194 cond_resched_rcu(); 1195 } 1196 if (++ret == nr_entries) 1197 break; 1198 } 1199 rcu_read_unlock(); 1200 1201 return ret; 1202 } 1203 1204 /* 1205 * Move the swapped pages for an inode to page cache. Returns the count 1206 * of pages swapped in, or the error in case of failure. 1207 */ 1208 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, 1209 pgoff_t *indices) 1210 { 1211 int i = 0; 1212 int ret = 0; 1213 int error = 0; 1214 struct address_space *mapping = inode->i_mapping; 1215 1216 for (i = 0; i < pvec.nr; i++) { 1217 struct page *page = pvec.pages[i]; 1218 1219 if (!xa_is_value(page)) 1220 continue; 1221 error = shmem_swapin_page(inode, indices[i], 1222 &page, SGP_CACHE, 1223 mapping_gfp_mask(mapping), 1224 NULL, NULL); 1225 if (error == 0) { 1226 unlock_page(page); 1227 put_page(page); 1228 ret++; 1229 } 1230 if (error == -ENOMEM) 1231 break; 1232 error = 0; 1233 } 1234 return error ? error : ret; 1235 } 1236 1237 /* 1238 * If swap found in inode, free it and move page from swapcache to filecache. 1239 */ 1240 static int shmem_unuse_inode(struct inode *inode, unsigned int type, 1241 bool frontswap, unsigned long *fs_pages_to_unuse) 1242 { 1243 struct address_space *mapping = inode->i_mapping; 1244 pgoff_t start = 0; 1245 struct pagevec pvec; 1246 pgoff_t indices[PAGEVEC_SIZE]; 1247 bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0); 1248 int ret = 0; 1249 1250 pagevec_init(&pvec); 1251 do { 1252 unsigned int nr_entries = PAGEVEC_SIZE; 1253 1254 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE) 1255 nr_entries = *fs_pages_to_unuse; 1256 1257 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, 1258 pvec.pages, indices, 1259 type, frontswap); 1260 if (pvec.nr == 0) { 1261 ret = 0; 1262 break; 1263 } 1264 1265 ret = shmem_unuse_swap_entries(inode, pvec, indices); 1266 if (ret < 0) 1267 break; 1268 1269 if (frontswap_partial) { 1270 *fs_pages_to_unuse -= ret; 1271 if (*fs_pages_to_unuse == 0) { 1272 ret = FRONTSWAP_PAGES_UNUSED; 1273 break; 1274 } 1275 } 1276 1277 start = indices[pvec.nr - 1]; 1278 } while (true); 1279 1280 return ret; 1281 } 1282 1283 /* 1284 * Read all the shared memory data that resides in the swap 1285 * device 'type' back into memory, so the swap device can be 1286 * unused. 1287 */ 1288 int shmem_unuse(unsigned int type, bool frontswap, 1289 unsigned long *fs_pages_to_unuse) 1290 { 1291 struct shmem_inode_info *info, *next; 1292 int error = 0; 1293 1294 if (list_empty(&shmem_swaplist)) 1295 return 0; 1296 1297 mutex_lock(&shmem_swaplist_mutex); 1298 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1299 if (!info->swapped) { 1300 list_del_init(&info->swaplist); 1301 continue; 1302 } 1303 /* 1304 * Drop the swaplist mutex while searching the inode for swap; 1305 * but before doing so, make sure shmem_evict_inode() will not 1306 * remove placeholder inode from swaplist, nor let it be freed 1307 * (igrab() would protect from unlink, but not from unmount). 1308 */ 1309 atomic_inc(&info->stop_eviction); 1310 mutex_unlock(&shmem_swaplist_mutex); 1311 1312 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap, 1313 fs_pages_to_unuse); 1314 cond_resched(); 1315 1316 mutex_lock(&shmem_swaplist_mutex); 1317 next = list_next_entry(info, swaplist); 1318 if (!info->swapped) 1319 list_del_init(&info->swaplist); 1320 if (atomic_dec_and_test(&info->stop_eviction)) 1321 wake_up_var(&info->stop_eviction); 1322 if (error) 1323 break; 1324 } 1325 mutex_unlock(&shmem_swaplist_mutex); 1326 1327 return error; 1328 } 1329 1330 /* 1331 * Move the page from the page cache to the swap cache. 1332 */ 1333 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 1334 { 1335 struct shmem_inode_info *info; 1336 struct address_space *mapping; 1337 struct inode *inode; 1338 swp_entry_t swap; 1339 pgoff_t index; 1340 1341 VM_BUG_ON_PAGE(PageCompound(page), page); 1342 BUG_ON(!PageLocked(page)); 1343 mapping = page->mapping; 1344 index = page->index; 1345 inode = mapping->host; 1346 info = SHMEM_I(inode); 1347 if (info->flags & VM_LOCKED) 1348 goto redirty; 1349 if (!total_swap_pages) 1350 goto redirty; 1351 1352 /* 1353 * Our capabilities prevent regular writeback or sync from ever calling 1354 * shmem_writepage; but a stacking filesystem might use ->writepage of 1355 * its underlying filesystem, in which case tmpfs should write out to 1356 * swap only in response to memory pressure, and not for the writeback 1357 * threads or sync. 1358 */ 1359 if (!wbc->for_reclaim) { 1360 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 1361 goto redirty; 1362 } 1363 1364 /* 1365 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 1366 * value into swapfile.c, the only way we can correctly account for a 1367 * fallocated page arriving here is now to initialize it and write it. 1368 * 1369 * That's okay for a page already fallocated earlier, but if we have 1370 * not yet completed the fallocation, then (a) we want to keep track 1371 * of this page in case we have to undo it, and (b) it may not be a 1372 * good idea to continue anyway, once we're pushing into swap. So 1373 * reactivate the page, and let shmem_fallocate() quit when too many. 1374 */ 1375 if (!PageUptodate(page)) { 1376 if (inode->i_private) { 1377 struct shmem_falloc *shmem_falloc; 1378 spin_lock(&inode->i_lock); 1379 shmem_falloc = inode->i_private; 1380 if (shmem_falloc && 1381 !shmem_falloc->waitq && 1382 index >= shmem_falloc->start && 1383 index < shmem_falloc->next) 1384 shmem_falloc->nr_unswapped++; 1385 else 1386 shmem_falloc = NULL; 1387 spin_unlock(&inode->i_lock); 1388 if (shmem_falloc) 1389 goto redirty; 1390 } 1391 clear_highpage(page); 1392 flush_dcache_page(page); 1393 SetPageUptodate(page); 1394 } 1395 1396 swap = get_swap_page(page); 1397 if (!swap.val) 1398 goto redirty; 1399 1400 /* 1401 * Add inode to shmem_unuse()'s list of swapped-out inodes, 1402 * if it's not already there. Do it now before the page is 1403 * moved to swap cache, when its pagelock no longer protects 1404 * the inode from eviction. But don't unlock the mutex until 1405 * we've incremented swapped, because shmem_unuse_inode() will 1406 * prune a !swapped inode from the swaplist under this mutex. 1407 */ 1408 mutex_lock(&shmem_swaplist_mutex); 1409 if (list_empty(&info->swaplist)) 1410 list_add(&info->swaplist, &shmem_swaplist); 1411 1412 if (add_to_swap_cache(page, swap, 1413 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 1414 NULL) == 0) { 1415 spin_lock_irq(&info->lock); 1416 shmem_recalc_inode(inode); 1417 info->swapped++; 1418 spin_unlock_irq(&info->lock); 1419 1420 swap_shmem_alloc(swap); 1421 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 1422 1423 mutex_unlock(&shmem_swaplist_mutex); 1424 BUG_ON(page_mapped(page)); 1425 swap_writepage(page, wbc); 1426 return 0; 1427 } 1428 1429 mutex_unlock(&shmem_swaplist_mutex); 1430 put_swap_page(page, swap); 1431 redirty: 1432 set_page_dirty(page); 1433 if (wbc->for_reclaim) 1434 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1435 unlock_page(page); 1436 return 0; 1437 } 1438 1439 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 1440 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1441 { 1442 char buffer[64]; 1443 1444 if (!mpol || mpol->mode == MPOL_DEFAULT) 1445 return; /* show nothing */ 1446 1447 mpol_to_str(buffer, sizeof(buffer), mpol); 1448 1449 seq_printf(seq, ",mpol=%s", buffer); 1450 } 1451 1452 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1453 { 1454 struct mempolicy *mpol = NULL; 1455 if (sbinfo->mpol) { 1456 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 1457 mpol = sbinfo->mpol; 1458 mpol_get(mpol); 1459 spin_unlock(&sbinfo->stat_lock); 1460 } 1461 return mpol; 1462 } 1463 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1464 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1465 { 1466 } 1467 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1468 { 1469 return NULL; 1470 } 1471 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 1472 #ifndef CONFIG_NUMA 1473 #define vm_policy vm_private_data 1474 #endif 1475 1476 static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1477 struct shmem_inode_info *info, pgoff_t index) 1478 { 1479 /* Create a pseudo vma that just contains the policy */ 1480 vma_init(vma, NULL); 1481 /* Bias interleave by inode number to distribute better across nodes */ 1482 vma->vm_pgoff = index + info->vfs_inode.i_ino; 1483 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1484 } 1485 1486 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1487 { 1488 /* Drop reference taken by mpol_shared_policy_lookup() */ 1489 mpol_cond_put(vma->vm_policy); 1490 } 1491 1492 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 1493 struct shmem_inode_info *info, pgoff_t index) 1494 { 1495 struct vm_area_struct pvma; 1496 struct page *page; 1497 struct vm_fault vmf = { 1498 .vma = &pvma, 1499 }; 1500 1501 shmem_pseudo_vma_init(&pvma, info, index); 1502 page = swap_cluster_readahead(swap, gfp, &vmf); 1503 shmem_pseudo_vma_destroy(&pvma); 1504 1505 return page; 1506 } 1507 1508 /* 1509 * Make sure huge_gfp is always more limited than limit_gfp. 1510 * Some of the flags set permissions, while others set limitations. 1511 */ 1512 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) 1513 { 1514 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 1515 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; 1516 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; 1517 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); 1518 1519 /* Allow allocations only from the originally specified zones. */ 1520 result |= zoneflags; 1521 1522 /* 1523 * Minimize the result gfp by taking the union with the deny flags, 1524 * and the intersection of the allow flags. 1525 */ 1526 result |= (limit_gfp & denyflags); 1527 result |= (huge_gfp & limit_gfp) & allowflags; 1528 1529 return result; 1530 } 1531 1532 static struct page *shmem_alloc_hugepage(gfp_t gfp, 1533 struct shmem_inode_info *info, pgoff_t index) 1534 { 1535 struct vm_area_struct pvma; 1536 struct address_space *mapping = info->vfs_inode.i_mapping; 1537 pgoff_t hindex; 1538 struct page *page; 1539 1540 hindex = round_down(index, HPAGE_PMD_NR); 1541 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 1542 XA_PRESENT)) 1543 return NULL; 1544 1545 shmem_pseudo_vma_init(&pvma, info, hindex); 1546 page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), 1547 true); 1548 shmem_pseudo_vma_destroy(&pvma); 1549 if (page) 1550 prep_transhuge_page(page); 1551 else 1552 count_vm_event(THP_FILE_FALLBACK); 1553 return page; 1554 } 1555 1556 static struct page *shmem_alloc_page(gfp_t gfp, 1557 struct shmem_inode_info *info, pgoff_t index) 1558 { 1559 struct vm_area_struct pvma; 1560 struct page *page; 1561 1562 shmem_pseudo_vma_init(&pvma, info, index); 1563 page = alloc_page_vma(gfp, &pvma, 0); 1564 shmem_pseudo_vma_destroy(&pvma); 1565 1566 return page; 1567 } 1568 1569 static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 1570 struct inode *inode, 1571 pgoff_t index, bool huge) 1572 { 1573 struct shmem_inode_info *info = SHMEM_I(inode); 1574 struct page *page; 1575 int nr; 1576 int err = -ENOSPC; 1577 1578 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1579 huge = false; 1580 nr = huge ? HPAGE_PMD_NR : 1; 1581 1582 if (!shmem_inode_acct_block(inode, nr)) 1583 goto failed; 1584 1585 if (huge) 1586 page = shmem_alloc_hugepage(gfp, info, index); 1587 else 1588 page = shmem_alloc_page(gfp, info, index); 1589 if (page) { 1590 __SetPageLocked(page); 1591 __SetPageSwapBacked(page); 1592 return page; 1593 } 1594 1595 err = -ENOMEM; 1596 shmem_inode_unacct_blocks(inode, nr); 1597 failed: 1598 return ERR_PTR(err); 1599 } 1600 1601 /* 1602 * When a page is moved from swapcache to shmem filecache (either by the 1603 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1604 * shmem_unuse_inode()), it may have been read in earlier from swap, in 1605 * ignorance of the mapping it belongs to. If that mapping has special 1606 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1607 * we may need to copy to a suitable page before moving to filecache. 1608 * 1609 * In a future release, this may well be extended to respect cpuset and 1610 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1611 * but for now it is a simple matter of zone. 1612 */ 1613 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1614 { 1615 return page_zonenum(page) > gfp_zone(gfp); 1616 } 1617 1618 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1619 struct shmem_inode_info *info, pgoff_t index) 1620 { 1621 struct page *oldpage, *newpage; 1622 struct address_space *swap_mapping; 1623 swp_entry_t entry; 1624 pgoff_t swap_index; 1625 int error; 1626 1627 oldpage = *pagep; 1628 entry.val = page_private(oldpage); 1629 swap_index = swp_offset(entry); 1630 swap_mapping = page_mapping(oldpage); 1631 1632 /* 1633 * We have arrived here because our zones are constrained, so don't 1634 * limit chance of success by further cpuset and node constraints. 1635 */ 1636 gfp &= ~GFP_CONSTRAINT_MASK; 1637 newpage = shmem_alloc_page(gfp, info, index); 1638 if (!newpage) 1639 return -ENOMEM; 1640 1641 get_page(newpage); 1642 copy_highpage(newpage, oldpage); 1643 flush_dcache_page(newpage); 1644 1645 __SetPageLocked(newpage); 1646 __SetPageSwapBacked(newpage); 1647 SetPageUptodate(newpage); 1648 set_page_private(newpage, entry.val); 1649 SetPageSwapCache(newpage); 1650 1651 /* 1652 * Our caller will very soon move newpage out of swapcache, but it's 1653 * a nice clean interface for us to replace oldpage by newpage there. 1654 */ 1655 xa_lock_irq(&swap_mapping->i_pages); 1656 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage); 1657 if (!error) { 1658 mem_cgroup_migrate(oldpage, newpage); 1659 __inc_lruvec_page_state(newpage, NR_FILE_PAGES); 1660 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES); 1661 } 1662 xa_unlock_irq(&swap_mapping->i_pages); 1663 1664 if (unlikely(error)) { 1665 /* 1666 * Is this possible? I think not, now that our callers check 1667 * both PageSwapCache and page_private after getting page lock; 1668 * but be defensive. Reverse old to newpage for clear and free. 1669 */ 1670 oldpage = newpage; 1671 } else { 1672 lru_cache_add(newpage); 1673 *pagep = newpage; 1674 } 1675 1676 ClearPageSwapCache(oldpage); 1677 set_page_private(oldpage, 0); 1678 1679 unlock_page(oldpage); 1680 put_page(oldpage); 1681 put_page(oldpage); 1682 return error; 1683 } 1684 1685 /* 1686 * Swap in the page pointed to by *pagep. 1687 * Caller has to make sure that *pagep contains a valid swapped page. 1688 * Returns 0 and the page in pagep if success. On failure, returns the 1689 * error code and NULL in *pagep. 1690 */ 1691 static int shmem_swapin_page(struct inode *inode, pgoff_t index, 1692 struct page **pagep, enum sgp_type sgp, 1693 gfp_t gfp, struct vm_area_struct *vma, 1694 vm_fault_t *fault_type) 1695 { 1696 struct address_space *mapping = inode->i_mapping; 1697 struct shmem_inode_info *info = SHMEM_I(inode); 1698 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; 1699 struct swap_info_struct *si; 1700 struct page *page = NULL; 1701 swp_entry_t swap; 1702 int error; 1703 1704 VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); 1705 swap = radix_to_swp_entry(*pagep); 1706 *pagep = NULL; 1707 1708 /* Prevent swapoff from happening to us. */ 1709 si = get_swap_device(swap); 1710 if (!si) { 1711 error = EINVAL; 1712 goto failed; 1713 } 1714 /* Look it up and read it in.. */ 1715 page = lookup_swap_cache(swap, NULL, 0); 1716 if (!page) { 1717 /* Or update major stats only when swapin succeeds?? */ 1718 if (fault_type) { 1719 *fault_type |= VM_FAULT_MAJOR; 1720 count_vm_event(PGMAJFAULT); 1721 count_memcg_event_mm(charge_mm, PGMAJFAULT); 1722 } 1723 /* Here we actually start the io */ 1724 page = shmem_swapin(swap, gfp, info, index); 1725 if (!page) { 1726 error = -ENOMEM; 1727 goto failed; 1728 } 1729 } 1730 1731 /* We have to do this with page locked to prevent races */ 1732 lock_page(page); 1733 if (!PageSwapCache(page) || page_private(page) != swap.val || 1734 !shmem_confirm_swap(mapping, index, swap)) { 1735 error = -EEXIST; 1736 goto unlock; 1737 } 1738 if (!PageUptodate(page)) { 1739 error = -EIO; 1740 goto failed; 1741 } 1742 wait_on_page_writeback(page); 1743 1744 /* 1745 * Some architectures may have to restore extra metadata to the 1746 * physical page after reading from swap. 1747 */ 1748 arch_swap_restore(swap, page); 1749 1750 if (shmem_should_replace_page(page, gfp)) { 1751 error = shmem_replace_page(&page, gfp, info, index); 1752 if (error) 1753 goto failed; 1754 } 1755 1756 error = shmem_add_to_page_cache(page, mapping, index, 1757 swp_to_radix_entry(swap), gfp, 1758 charge_mm); 1759 if (error) 1760 goto failed; 1761 1762 spin_lock_irq(&info->lock); 1763 info->swapped--; 1764 shmem_recalc_inode(inode); 1765 spin_unlock_irq(&info->lock); 1766 1767 if (sgp == SGP_WRITE) 1768 mark_page_accessed(page); 1769 1770 delete_from_swap_cache(page); 1771 set_page_dirty(page); 1772 swap_free(swap); 1773 1774 *pagep = page; 1775 if (si) 1776 put_swap_device(si); 1777 return 0; 1778 failed: 1779 if (!shmem_confirm_swap(mapping, index, swap)) 1780 error = -EEXIST; 1781 unlock: 1782 if (page) { 1783 unlock_page(page); 1784 put_page(page); 1785 } 1786 1787 if (si) 1788 put_swap_device(si); 1789 1790 return error; 1791 } 1792 1793 /* 1794 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1795 * 1796 * If we allocate a new one we do not mark it dirty. That's up to the 1797 * vm. If we swap it in we mark it dirty since we also free the swap 1798 * entry since a page cannot live in both the swap and page cache. 1799 * 1800 * vma, vmf, and fault_type are only supplied by shmem_fault: 1801 * otherwise they are NULL. 1802 */ 1803 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1804 struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1805 struct vm_area_struct *vma, struct vm_fault *vmf, 1806 vm_fault_t *fault_type) 1807 { 1808 struct address_space *mapping = inode->i_mapping; 1809 struct shmem_inode_info *info = SHMEM_I(inode); 1810 struct shmem_sb_info *sbinfo; 1811 struct mm_struct *charge_mm; 1812 struct page *page; 1813 enum sgp_type sgp_huge = sgp; 1814 pgoff_t hindex = index; 1815 gfp_t huge_gfp; 1816 int error; 1817 int once = 0; 1818 int alloced = 0; 1819 1820 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 1821 return -EFBIG; 1822 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1823 sgp = SGP_CACHE; 1824 repeat: 1825 if (sgp <= SGP_CACHE && 1826 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1827 return -EINVAL; 1828 } 1829 1830 sbinfo = SHMEM_SB(inode->i_sb); 1831 charge_mm = vma ? vma->vm_mm : NULL; 1832 1833 page = pagecache_get_page(mapping, index, 1834 FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0); 1835 1836 if (page && vma && userfaultfd_minor(vma)) { 1837 if (!xa_is_value(page)) { 1838 unlock_page(page); 1839 put_page(page); 1840 } 1841 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 1842 return 0; 1843 } 1844 1845 if (xa_is_value(page)) { 1846 error = shmem_swapin_page(inode, index, &page, 1847 sgp, gfp, vma, fault_type); 1848 if (error == -EEXIST) 1849 goto repeat; 1850 1851 *pagep = page; 1852 return error; 1853 } 1854 1855 if (page) 1856 hindex = page->index; 1857 if (page && sgp == SGP_WRITE) 1858 mark_page_accessed(page); 1859 1860 /* fallocated page? */ 1861 if (page && !PageUptodate(page)) { 1862 if (sgp != SGP_READ) 1863 goto clear; 1864 unlock_page(page); 1865 put_page(page); 1866 page = NULL; 1867 hindex = index; 1868 } 1869 if (page || sgp == SGP_READ) 1870 goto out; 1871 1872 /* 1873 * Fast cache lookup did not find it: 1874 * bring it back from swap or allocate. 1875 */ 1876 1877 if (vma && userfaultfd_missing(vma)) { 1878 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1879 return 0; 1880 } 1881 1882 /* shmem_symlink() */ 1883 if (!shmem_mapping(mapping)) 1884 goto alloc_nohuge; 1885 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1886 goto alloc_nohuge; 1887 if (shmem_huge == SHMEM_HUGE_FORCE) 1888 goto alloc_huge; 1889 switch (sbinfo->huge) { 1890 case SHMEM_HUGE_NEVER: 1891 goto alloc_nohuge; 1892 case SHMEM_HUGE_WITHIN_SIZE: { 1893 loff_t i_size; 1894 pgoff_t off; 1895 1896 off = round_up(index, HPAGE_PMD_NR); 1897 i_size = round_up(i_size_read(inode), PAGE_SIZE); 1898 if (i_size >= HPAGE_PMD_SIZE && 1899 i_size >> PAGE_SHIFT >= off) 1900 goto alloc_huge; 1901 1902 fallthrough; 1903 } 1904 case SHMEM_HUGE_ADVISE: 1905 if (sgp_huge == SGP_HUGE) 1906 goto alloc_huge; 1907 /* TODO: implement fadvise() hints */ 1908 goto alloc_nohuge; 1909 } 1910 1911 alloc_huge: 1912 huge_gfp = vma_thp_gfp_mask(vma); 1913 huge_gfp = limit_gfp_mask(huge_gfp, gfp); 1914 page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true); 1915 if (IS_ERR(page)) { 1916 alloc_nohuge: 1917 page = shmem_alloc_and_acct_page(gfp, inode, 1918 index, false); 1919 } 1920 if (IS_ERR(page)) { 1921 int retry = 5; 1922 1923 error = PTR_ERR(page); 1924 page = NULL; 1925 if (error != -ENOSPC) 1926 goto unlock; 1927 /* 1928 * Try to reclaim some space by splitting a huge page 1929 * beyond i_size on the filesystem. 1930 */ 1931 while (retry--) { 1932 int ret; 1933 1934 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1935 if (ret == SHRINK_STOP) 1936 break; 1937 if (ret) 1938 goto alloc_nohuge; 1939 } 1940 goto unlock; 1941 } 1942 1943 if (PageTransHuge(page)) 1944 hindex = round_down(index, HPAGE_PMD_NR); 1945 else 1946 hindex = index; 1947 1948 if (sgp == SGP_WRITE) 1949 __SetPageReferenced(page); 1950 1951 error = shmem_add_to_page_cache(page, mapping, hindex, 1952 NULL, gfp & GFP_RECLAIM_MASK, 1953 charge_mm); 1954 if (error) 1955 goto unacct; 1956 lru_cache_add(page); 1957 1958 spin_lock_irq(&info->lock); 1959 info->alloced += compound_nr(page); 1960 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 1961 shmem_recalc_inode(inode); 1962 spin_unlock_irq(&info->lock); 1963 alloced = true; 1964 1965 if (PageTransHuge(page) && 1966 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1967 hindex + HPAGE_PMD_NR - 1) { 1968 /* 1969 * Part of the huge page is beyond i_size: subject 1970 * to shrink under memory pressure. 1971 */ 1972 spin_lock(&sbinfo->shrinklist_lock); 1973 /* 1974 * _careful to defend against unlocked access to 1975 * ->shrink_list in shmem_unused_huge_shrink() 1976 */ 1977 if (list_empty_careful(&info->shrinklist)) { 1978 list_add_tail(&info->shrinklist, 1979 &sbinfo->shrinklist); 1980 sbinfo->shrinklist_len++; 1981 } 1982 spin_unlock(&sbinfo->shrinklist_lock); 1983 } 1984 1985 /* 1986 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1987 */ 1988 if (sgp == SGP_FALLOC) 1989 sgp = SGP_WRITE; 1990 clear: 1991 /* 1992 * Let SGP_WRITE caller clear ends if write does not fill page; 1993 * but SGP_FALLOC on a page fallocated earlier must initialize 1994 * it now, lest undo on failure cancel our earlier guarantee. 1995 */ 1996 if (sgp != SGP_WRITE && !PageUptodate(page)) { 1997 int i; 1998 1999 for (i = 0; i < compound_nr(page); i++) { 2000 clear_highpage(page + i); 2001 flush_dcache_page(page + i); 2002 } 2003 SetPageUptodate(page); 2004 } 2005 2006 /* Perhaps the file has been truncated since we checked */ 2007 if (sgp <= SGP_CACHE && 2008 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2009 if (alloced) { 2010 ClearPageDirty(page); 2011 delete_from_page_cache(page); 2012 spin_lock_irq(&info->lock); 2013 shmem_recalc_inode(inode); 2014 spin_unlock_irq(&info->lock); 2015 } 2016 error = -EINVAL; 2017 goto unlock; 2018 } 2019 out: 2020 *pagep = page + index - hindex; 2021 return 0; 2022 2023 /* 2024 * Error recovery. 2025 */ 2026 unacct: 2027 shmem_inode_unacct_blocks(inode, compound_nr(page)); 2028 2029 if (PageTransHuge(page)) { 2030 unlock_page(page); 2031 put_page(page); 2032 goto alloc_nohuge; 2033 } 2034 unlock: 2035 if (page) { 2036 unlock_page(page); 2037 put_page(page); 2038 } 2039 if (error == -ENOSPC && !once++) { 2040 spin_lock_irq(&info->lock); 2041 shmem_recalc_inode(inode); 2042 spin_unlock_irq(&info->lock); 2043 goto repeat; 2044 } 2045 if (error == -EEXIST) 2046 goto repeat; 2047 return error; 2048 } 2049 2050 /* 2051 * This is like autoremove_wake_function, but it removes the wait queue 2052 * entry unconditionally - even if something else had already woken the 2053 * target. 2054 */ 2055 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 2056 { 2057 int ret = default_wake_function(wait, mode, sync, key); 2058 list_del_init(&wait->entry); 2059 return ret; 2060 } 2061 2062 static vm_fault_t shmem_fault(struct vm_fault *vmf) 2063 { 2064 struct vm_area_struct *vma = vmf->vma; 2065 struct inode *inode = file_inode(vma->vm_file); 2066 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 2067 enum sgp_type sgp; 2068 int err; 2069 vm_fault_t ret = VM_FAULT_LOCKED; 2070 2071 /* 2072 * Trinity finds that probing a hole which tmpfs is punching can 2073 * prevent the hole-punch from ever completing: which in turn 2074 * locks writers out with its hold on i_mutex. So refrain from 2075 * faulting pages into the hole while it's being punched. Although 2076 * shmem_undo_range() does remove the additions, it may be unable to 2077 * keep up, as each new page needs its own unmap_mapping_range() call, 2078 * and the i_mmap tree grows ever slower to scan if new vmas are added. 2079 * 2080 * It does not matter if we sometimes reach this check just before the 2081 * hole-punch begins, so that one fault then races with the punch: 2082 * we just need to make racing faults a rare case. 2083 * 2084 * The implementation below would be much simpler if we just used a 2085 * standard mutex or completion: but we cannot take i_mutex in fault, 2086 * and bloating every shmem inode for this unlikely case would be sad. 2087 */ 2088 if (unlikely(inode->i_private)) { 2089 struct shmem_falloc *shmem_falloc; 2090 2091 spin_lock(&inode->i_lock); 2092 shmem_falloc = inode->i_private; 2093 if (shmem_falloc && 2094 shmem_falloc->waitq && 2095 vmf->pgoff >= shmem_falloc->start && 2096 vmf->pgoff < shmem_falloc->next) { 2097 struct file *fpin; 2098 wait_queue_head_t *shmem_falloc_waitq; 2099 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 2100 2101 ret = VM_FAULT_NOPAGE; 2102 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 2103 if (fpin) 2104 ret = VM_FAULT_RETRY; 2105 2106 shmem_falloc_waitq = shmem_falloc->waitq; 2107 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 2108 TASK_UNINTERRUPTIBLE); 2109 spin_unlock(&inode->i_lock); 2110 schedule(); 2111 2112 /* 2113 * shmem_falloc_waitq points into the shmem_fallocate() 2114 * stack of the hole-punching task: shmem_falloc_waitq 2115 * is usually invalid by the time we reach here, but 2116 * finish_wait() does not dereference it in that case; 2117 * though i_lock needed lest racing with wake_up_all(). 2118 */ 2119 spin_lock(&inode->i_lock); 2120 finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 2121 spin_unlock(&inode->i_lock); 2122 2123 if (fpin) 2124 fput(fpin); 2125 return ret; 2126 } 2127 spin_unlock(&inode->i_lock); 2128 } 2129 2130 sgp = SGP_CACHE; 2131 2132 if ((vma->vm_flags & VM_NOHUGEPAGE) || 2133 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 2134 sgp = SGP_NOHUGE; 2135 else if (vma->vm_flags & VM_HUGEPAGE) 2136 sgp = SGP_HUGE; 2137 2138 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 2139 gfp, vma, vmf, &ret); 2140 if (err) 2141 return vmf_error(err); 2142 return ret; 2143 } 2144 2145 unsigned long shmem_get_unmapped_area(struct file *file, 2146 unsigned long uaddr, unsigned long len, 2147 unsigned long pgoff, unsigned long flags) 2148 { 2149 unsigned long (*get_area)(struct file *, 2150 unsigned long, unsigned long, unsigned long, unsigned long); 2151 unsigned long addr; 2152 unsigned long offset; 2153 unsigned long inflated_len; 2154 unsigned long inflated_addr; 2155 unsigned long inflated_offset; 2156 2157 if (len > TASK_SIZE) 2158 return -ENOMEM; 2159 2160 get_area = current->mm->get_unmapped_area; 2161 addr = get_area(file, uaddr, len, pgoff, flags); 2162 2163 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2164 return addr; 2165 if (IS_ERR_VALUE(addr)) 2166 return addr; 2167 if (addr & ~PAGE_MASK) 2168 return addr; 2169 if (addr > TASK_SIZE - len) 2170 return addr; 2171 2172 if (shmem_huge == SHMEM_HUGE_DENY) 2173 return addr; 2174 if (len < HPAGE_PMD_SIZE) 2175 return addr; 2176 if (flags & MAP_FIXED) 2177 return addr; 2178 /* 2179 * Our priority is to support MAP_SHARED mapped hugely; 2180 * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2181 * But if caller specified an address hint and we allocated area there 2182 * successfully, respect that as before. 2183 */ 2184 if (uaddr == addr) 2185 return addr; 2186 2187 if (shmem_huge != SHMEM_HUGE_FORCE) { 2188 struct super_block *sb; 2189 2190 if (file) { 2191 VM_BUG_ON(file->f_op != &shmem_file_operations); 2192 sb = file_inode(file)->i_sb; 2193 } else { 2194 /* 2195 * Called directly from mm/mmap.c, or drivers/char/mem.c 2196 * for "/dev/zero", to create a shared anonymous object. 2197 */ 2198 if (IS_ERR(shm_mnt)) 2199 return addr; 2200 sb = shm_mnt->mnt_sb; 2201 } 2202 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2203 return addr; 2204 } 2205 2206 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2207 if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2208 return addr; 2209 if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2210 return addr; 2211 2212 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2213 if (inflated_len > TASK_SIZE) 2214 return addr; 2215 if (inflated_len < len) 2216 return addr; 2217 2218 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2219 if (IS_ERR_VALUE(inflated_addr)) 2220 return addr; 2221 if (inflated_addr & ~PAGE_MASK) 2222 return addr; 2223 2224 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2225 inflated_addr += offset - inflated_offset; 2226 if (inflated_offset > offset) 2227 inflated_addr += HPAGE_PMD_SIZE; 2228 2229 if (inflated_addr > TASK_SIZE - len) 2230 return addr; 2231 return inflated_addr; 2232 } 2233 2234 #ifdef CONFIG_NUMA 2235 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 2236 { 2237 struct inode *inode = file_inode(vma->vm_file); 2238 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 2239 } 2240 2241 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2242 unsigned long addr) 2243 { 2244 struct inode *inode = file_inode(vma->vm_file); 2245 pgoff_t index; 2246 2247 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2248 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 2249 } 2250 #endif 2251 2252 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 2253 { 2254 struct inode *inode = file_inode(file); 2255 struct shmem_inode_info *info = SHMEM_I(inode); 2256 int retval = -ENOMEM; 2257 2258 /* 2259 * What serializes the accesses to info->flags? 2260 * ipc_lock_object() when called from shmctl_do_lock(), 2261 * no serialization needed when called from shm_destroy(). 2262 */ 2263 if (lock && !(info->flags & VM_LOCKED)) { 2264 if (!user_shm_lock(inode->i_size, ucounts)) 2265 goto out_nomem; 2266 info->flags |= VM_LOCKED; 2267 mapping_set_unevictable(file->f_mapping); 2268 } 2269 if (!lock && (info->flags & VM_LOCKED) && ucounts) { 2270 user_shm_unlock(inode->i_size, ucounts); 2271 info->flags &= ~VM_LOCKED; 2272 mapping_clear_unevictable(file->f_mapping); 2273 } 2274 retval = 0; 2275 2276 out_nomem: 2277 return retval; 2278 } 2279 2280 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 2281 { 2282 struct shmem_inode_info *info = SHMEM_I(file_inode(file)); 2283 int ret; 2284 2285 ret = seal_check_future_write(info->seals, vma); 2286 if (ret) 2287 return ret; 2288 2289 /* arm64 - allow memory tagging on RAM-based files */ 2290 vma->vm_flags |= VM_MTE_ALLOWED; 2291 2292 file_accessed(file); 2293 vma->vm_ops = &shmem_vm_ops; 2294 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 2295 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2296 (vma->vm_end & HPAGE_PMD_MASK)) { 2297 khugepaged_enter(vma, vma->vm_flags); 2298 } 2299 return 0; 2300 } 2301 2302 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 2303 umode_t mode, dev_t dev, unsigned long flags) 2304 { 2305 struct inode *inode; 2306 struct shmem_inode_info *info; 2307 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2308 ino_t ino; 2309 2310 if (shmem_reserve_inode(sb, &ino)) 2311 return NULL; 2312 2313 inode = new_inode(sb); 2314 if (inode) { 2315 inode->i_ino = ino; 2316 inode_init_owner(&init_user_ns, inode, dir, mode); 2317 inode->i_blocks = 0; 2318 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 2319 inode->i_generation = prandom_u32(); 2320 info = SHMEM_I(inode); 2321 memset(info, 0, (char *)inode - (char *)info); 2322 spin_lock_init(&info->lock); 2323 atomic_set(&info->stop_eviction, 0); 2324 info->seals = F_SEAL_SEAL; 2325 info->flags = flags & VM_NORESERVE; 2326 INIT_LIST_HEAD(&info->shrinklist); 2327 INIT_LIST_HEAD(&info->swaplist); 2328 simple_xattrs_init(&info->xattrs); 2329 cache_no_acl(inode); 2330 2331 switch (mode & S_IFMT) { 2332 default: 2333 inode->i_op = &shmem_special_inode_operations; 2334 init_special_inode(inode, mode, dev); 2335 break; 2336 case S_IFREG: 2337 inode->i_mapping->a_ops = &shmem_aops; 2338 inode->i_op = &shmem_inode_operations; 2339 inode->i_fop = &shmem_file_operations; 2340 mpol_shared_policy_init(&info->policy, 2341 shmem_get_sbmpol(sbinfo)); 2342 break; 2343 case S_IFDIR: 2344 inc_nlink(inode); 2345 /* Some things misbehave if size == 0 on a directory */ 2346 inode->i_size = 2 * BOGO_DIRENT_SIZE; 2347 inode->i_op = &shmem_dir_inode_operations; 2348 inode->i_fop = &simple_dir_operations; 2349 break; 2350 case S_IFLNK: 2351 /* 2352 * Must not load anything in the rbtree, 2353 * mpol_free_shared_policy will not be called. 2354 */ 2355 mpol_shared_policy_init(&info->policy, NULL); 2356 break; 2357 } 2358 2359 lockdep_annotate_inode_mutex_key(inode); 2360 } else 2361 shmem_free_inode(sb); 2362 return inode; 2363 } 2364 2365 #ifdef CONFIG_USERFAULTFD 2366 int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 2367 pmd_t *dst_pmd, 2368 struct vm_area_struct *dst_vma, 2369 unsigned long dst_addr, 2370 unsigned long src_addr, 2371 bool zeropage, 2372 struct page **pagep) 2373 { 2374 struct inode *inode = file_inode(dst_vma->vm_file); 2375 struct shmem_inode_info *info = SHMEM_I(inode); 2376 struct address_space *mapping = inode->i_mapping; 2377 gfp_t gfp = mapping_gfp_mask(mapping); 2378 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 2379 void *page_kaddr; 2380 struct page *page; 2381 int ret; 2382 pgoff_t max_off; 2383 2384 if (!shmem_inode_acct_block(inode, 1)) { 2385 /* 2386 * We may have got a page, returned -ENOENT triggering a retry, 2387 * and now we find ourselves with -ENOMEM. Release the page, to 2388 * avoid a BUG_ON in our caller. 2389 */ 2390 if (unlikely(*pagep)) { 2391 put_page(*pagep); 2392 *pagep = NULL; 2393 } 2394 return -ENOMEM; 2395 } 2396 2397 if (!*pagep) { 2398 ret = -ENOMEM; 2399 page = shmem_alloc_page(gfp, info, pgoff); 2400 if (!page) 2401 goto out_unacct_blocks; 2402 2403 if (!zeropage) { /* COPY */ 2404 page_kaddr = kmap_atomic(page); 2405 ret = copy_from_user(page_kaddr, 2406 (const void __user *)src_addr, 2407 PAGE_SIZE); 2408 kunmap_atomic(page_kaddr); 2409 2410 /* fallback to copy_from_user outside mmap_lock */ 2411 if (unlikely(ret)) { 2412 *pagep = page; 2413 ret = -ENOENT; 2414 /* don't free the page */ 2415 goto out_unacct_blocks; 2416 } 2417 } else { /* ZEROPAGE */ 2418 clear_highpage(page); 2419 } 2420 } else { 2421 page = *pagep; 2422 *pagep = NULL; 2423 } 2424 2425 VM_BUG_ON(PageLocked(page)); 2426 VM_BUG_ON(PageSwapBacked(page)); 2427 __SetPageLocked(page); 2428 __SetPageSwapBacked(page); 2429 __SetPageUptodate(page); 2430 2431 ret = -EFAULT; 2432 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2433 if (unlikely(pgoff >= max_off)) 2434 goto out_release; 2435 2436 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 2437 gfp & GFP_RECLAIM_MASK, dst_mm); 2438 if (ret) 2439 goto out_release; 2440 2441 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 2442 page, true, false); 2443 if (ret) 2444 goto out_delete_from_cache; 2445 2446 spin_lock_irq(&info->lock); 2447 info->alloced++; 2448 inode->i_blocks += BLOCKS_PER_PAGE; 2449 shmem_recalc_inode(inode); 2450 spin_unlock_irq(&info->lock); 2451 2452 SetPageDirty(page); 2453 unlock_page(page); 2454 return 0; 2455 out_delete_from_cache: 2456 delete_from_page_cache(page); 2457 out_release: 2458 unlock_page(page); 2459 put_page(page); 2460 out_unacct_blocks: 2461 shmem_inode_unacct_blocks(inode, 1); 2462 return ret; 2463 } 2464 #endif /* CONFIG_USERFAULTFD */ 2465 2466 #ifdef CONFIG_TMPFS 2467 static const struct inode_operations shmem_symlink_inode_operations; 2468 static const struct inode_operations shmem_short_symlink_operations; 2469 2470 #ifdef CONFIG_TMPFS_XATTR 2471 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 2472 #else 2473 #define shmem_initxattrs NULL 2474 #endif 2475 2476 static int 2477 shmem_write_begin(struct file *file, struct address_space *mapping, 2478 loff_t pos, unsigned len, unsigned flags, 2479 struct page **pagep, void **fsdata) 2480 { 2481 struct inode *inode = mapping->host; 2482 struct shmem_inode_info *info = SHMEM_I(inode); 2483 pgoff_t index = pos >> PAGE_SHIFT; 2484 2485 /* i_mutex is held by caller */ 2486 if (unlikely(info->seals & (F_SEAL_GROW | 2487 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 2488 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 2489 return -EPERM; 2490 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 2491 return -EPERM; 2492 } 2493 2494 return shmem_getpage(inode, index, pagep, SGP_WRITE); 2495 } 2496 2497 static int 2498 shmem_write_end(struct file *file, struct address_space *mapping, 2499 loff_t pos, unsigned len, unsigned copied, 2500 struct page *page, void *fsdata) 2501 { 2502 struct inode *inode = mapping->host; 2503 2504 if (pos + copied > inode->i_size) 2505 i_size_write(inode, pos + copied); 2506 2507 if (!PageUptodate(page)) { 2508 struct page *head = compound_head(page); 2509 if (PageTransCompound(page)) { 2510 int i; 2511 2512 for (i = 0; i < HPAGE_PMD_NR; i++) { 2513 if (head + i == page) 2514 continue; 2515 clear_highpage(head + i); 2516 flush_dcache_page(head + i); 2517 } 2518 } 2519 if (copied < PAGE_SIZE) { 2520 unsigned from = pos & (PAGE_SIZE - 1); 2521 zero_user_segments(page, 0, from, 2522 from + copied, PAGE_SIZE); 2523 } 2524 SetPageUptodate(head); 2525 } 2526 set_page_dirty(page); 2527 unlock_page(page); 2528 put_page(page); 2529 2530 return copied; 2531 } 2532 2533 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 2534 { 2535 struct file *file = iocb->ki_filp; 2536 struct inode *inode = file_inode(file); 2537 struct address_space *mapping = inode->i_mapping; 2538 pgoff_t index; 2539 unsigned long offset; 2540 enum sgp_type sgp = SGP_READ; 2541 int error = 0; 2542 ssize_t retval = 0; 2543 loff_t *ppos = &iocb->ki_pos; 2544 2545 /* 2546 * Might this read be for a stacking filesystem? Then when reading 2547 * holes of a sparse file, we actually need to allocate those pages, 2548 * and even mark them dirty, so it cannot exceed the max_blocks limit. 2549 */ 2550 if (!iter_is_iovec(to)) 2551 sgp = SGP_CACHE; 2552 2553 index = *ppos >> PAGE_SHIFT; 2554 offset = *ppos & ~PAGE_MASK; 2555 2556 for (;;) { 2557 struct page *page = NULL; 2558 pgoff_t end_index; 2559 unsigned long nr, ret; 2560 loff_t i_size = i_size_read(inode); 2561 2562 end_index = i_size >> PAGE_SHIFT; 2563 if (index > end_index) 2564 break; 2565 if (index == end_index) { 2566 nr = i_size & ~PAGE_MASK; 2567 if (nr <= offset) 2568 break; 2569 } 2570 2571 error = shmem_getpage(inode, index, &page, sgp); 2572 if (error) { 2573 if (error == -EINVAL) 2574 error = 0; 2575 break; 2576 } 2577 if (page) { 2578 if (sgp == SGP_CACHE) 2579 set_page_dirty(page); 2580 unlock_page(page); 2581 } 2582 2583 /* 2584 * We must evaluate after, since reads (unlike writes) 2585 * are called without i_mutex protection against truncate 2586 */ 2587 nr = PAGE_SIZE; 2588 i_size = i_size_read(inode); 2589 end_index = i_size >> PAGE_SHIFT; 2590 if (index == end_index) { 2591 nr = i_size & ~PAGE_MASK; 2592 if (nr <= offset) { 2593 if (page) 2594 put_page(page); 2595 break; 2596 } 2597 } 2598 nr -= offset; 2599 2600 if (page) { 2601 /* 2602 * If users can be writing to this page using arbitrary 2603 * virtual addresses, take care about potential aliasing 2604 * before reading the page on the kernel side. 2605 */ 2606 if (mapping_writably_mapped(mapping)) 2607 flush_dcache_page(page); 2608 /* 2609 * Mark the page accessed if we read the beginning. 2610 */ 2611 if (!offset) 2612 mark_page_accessed(page); 2613 } else { 2614 page = ZERO_PAGE(0); 2615 get_page(page); 2616 } 2617 2618 /* 2619 * Ok, we have the page, and it's up-to-date, so 2620 * now we can copy it to user space... 2621 */ 2622 ret = copy_page_to_iter(page, offset, nr, to); 2623 retval += ret; 2624 offset += ret; 2625 index += offset >> PAGE_SHIFT; 2626 offset &= ~PAGE_MASK; 2627 2628 put_page(page); 2629 if (!iov_iter_count(to)) 2630 break; 2631 if (ret < nr) { 2632 error = -EFAULT; 2633 break; 2634 } 2635 cond_resched(); 2636 } 2637 2638 *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 2639 file_accessed(file); 2640 return retval ? retval : error; 2641 } 2642 2643 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2644 { 2645 struct address_space *mapping = file->f_mapping; 2646 struct inode *inode = mapping->host; 2647 2648 if (whence != SEEK_DATA && whence != SEEK_HOLE) 2649 return generic_file_llseek_size(file, offset, whence, 2650 MAX_LFS_FILESIZE, i_size_read(inode)); 2651 if (offset < 0) 2652 return -ENXIO; 2653 2654 inode_lock(inode); 2655 /* We're holding i_mutex so we can access i_size directly */ 2656 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); 2657 if (offset >= 0) 2658 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 2659 inode_unlock(inode); 2660 return offset; 2661 } 2662 2663 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 2664 loff_t len) 2665 { 2666 struct inode *inode = file_inode(file); 2667 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2668 struct shmem_inode_info *info = SHMEM_I(inode); 2669 struct shmem_falloc shmem_falloc; 2670 pgoff_t start, index, end; 2671 int error; 2672 2673 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 2674 return -EOPNOTSUPP; 2675 2676 inode_lock(inode); 2677 2678 if (mode & FALLOC_FL_PUNCH_HOLE) { 2679 struct address_space *mapping = file->f_mapping; 2680 loff_t unmap_start = round_up(offset, PAGE_SIZE); 2681 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 2682 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 2683 2684 /* protected by i_mutex */ 2685 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 2686 error = -EPERM; 2687 goto out; 2688 } 2689 2690 shmem_falloc.waitq = &shmem_falloc_waitq; 2691 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 2692 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2693 spin_lock(&inode->i_lock); 2694 inode->i_private = &shmem_falloc; 2695 spin_unlock(&inode->i_lock); 2696 2697 if ((u64)unmap_end > (u64)unmap_start) 2698 unmap_mapping_range(mapping, unmap_start, 2699 1 + unmap_end - unmap_start, 0); 2700 shmem_truncate_range(inode, offset, offset + len - 1); 2701 /* No need to unmap again: hole-punching leaves COWed pages */ 2702 2703 spin_lock(&inode->i_lock); 2704 inode->i_private = NULL; 2705 wake_up_all(&shmem_falloc_waitq); 2706 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 2707 spin_unlock(&inode->i_lock); 2708 error = 0; 2709 goto out; 2710 } 2711 2712 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2713 error = inode_newsize_ok(inode, offset + len); 2714 if (error) 2715 goto out; 2716 2717 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 2718 error = -EPERM; 2719 goto out; 2720 } 2721 2722 start = offset >> PAGE_SHIFT; 2723 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2724 /* Try to avoid a swapstorm if len is impossible to satisfy */ 2725 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2726 error = -ENOSPC; 2727 goto out; 2728 } 2729 2730 shmem_falloc.waitq = NULL; 2731 shmem_falloc.start = start; 2732 shmem_falloc.next = start; 2733 shmem_falloc.nr_falloced = 0; 2734 shmem_falloc.nr_unswapped = 0; 2735 spin_lock(&inode->i_lock); 2736 inode->i_private = &shmem_falloc; 2737 spin_unlock(&inode->i_lock); 2738 2739 for (index = start; index < end; index++) { 2740 struct page *page; 2741 2742 /* 2743 * Good, the fallocate(2) manpage permits EINTR: we may have 2744 * been interrupted because we are using up too much memory. 2745 */ 2746 if (signal_pending(current)) 2747 error = -EINTR; 2748 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 2749 error = -ENOMEM; 2750 else 2751 error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2752 if (error) { 2753 /* Remove the !PageUptodate pages we added */ 2754 if (index > start) { 2755 shmem_undo_range(inode, 2756 (loff_t)start << PAGE_SHIFT, 2757 ((loff_t)index << PAGE_SHIFT) - 1, true); 2758 } 2759 goto undone; 2760 } 2761 2762 /* 2763 * Inform shmem_writepage() how far we have reached. 2764 * No need for lock or barrier: we have the page lock. 2765 */ 2766 shmem_falloc.next++; 2767 if (!PageUptodate(page)) 2768 shmem_falloc.nr_falloced++; 2769 2770 /* 2771 * If !PageUptodate, leave it that way so that freeable pages 2772 * can be recognized if we need to rollback on error later. 2773 * But set_page_dirty so that memory pressure will swap rather 2774 * than free the pages we are allocating (and SGP_CACHE pages 2775 * might still be clean: we now need to mark those dirty too). 2776 */ 2777 set_page_dirty(page); 2778 unlock_page(page); 2779 put_page(page); 2780 cond_resched(); 2781 } 2782 2783 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2784 i_size_write(inode, offset + len); 2785 inode->i_ctime = current_time(inode); 2786 undone: 2787 spin_lock(&inode->i_lock); 2788 inode->i_private = NULL; 2789 spin_unlock(&inode->i_lock); 2790 out: 2791 inode_unlock(inode); 2792 return error; 2793 } 2794 2795 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 2796 { 2797 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 2798 2799 buf->f_type = TMPFS_MAGIC; 2800 buf->f_bsize = PAGE_SIZE; 2801 buf->f_namelen = NAME_MAX; 2802 if (sbinfo->max_blocks) { 2803 buf->f_blocks = sbinfo->max_blocks; 2804 buf->f_bavail = 2805 buf->f_bfree = sbinfo->max_blocks - 2806 percpu_counter_sum(&sbinfo->used_blocks); 2807 } 2808 if (sbinfo->max_inodes) { 2809 buf->f_files = sbinfo->max_inodes; 2810 buf->f_ffree = sbinfo->free_inodes; 2811 } 2812 /* else leave those fields 0 like simple_statfs */ 2813 2814 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); 2815 2816 return 0; 2817 } 2818 2819 /* 2820 * File creation. Allocate an inode, and we're done.. 2821 */ 2822 static int 2823 shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir, 2824 struct dentry *dentry, umode_t mode, dev_t dev) 2825 { 2826 struct inode *inode; 2827 int error = -ENOSPC; 2828 2829 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 2830 if (inode) { 2831 error = simple_acl_create(dir, inode); 2832 if (error) 2833 goto out_iput; 2834 error = security_inode_init_security(inode, dir, 2835 &dentry->d_name, 2836 shmem_initxattrs, NULL); 2837 if (error && error != -EOPNOTSUPP) 2838 goto out_iput; 2839 2840 error = 0; 2841 dir->i_size += BOGO_DIRENT_SIZE; 2842 dir->i_ctime = dir->i_mtime = current_time(dir); 2843 d_instantiate(dentry, inode); 2844 dget(dentry); /* Extra count - pin the dentry in core */ 2845 } 2846 return error; 2847 out_iput: 2848 iput(inode); 2849 return error; 2850 } 2851 2852 static int 2853 shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, 2854 struct dentry *dentry, umode_t mode) 2855 { 2856 struct inode *inode; 2857 int error = -ENOSPC; 2858 2859 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 2860 if (inode) { 2861 error = security_inode_init_security(inode, dir, 2862 NULL, 2863 shmem_initxattrs, NULL); 2864 if (error && error != -EOPNOTSUPP) 2865 goto out_iput; 2866 error = simple_acl_create(dir, inode); 2867 if (error) 2868 goto out_iput; 2869 d_tmpfile(dentry, inode); 2870 } 2871 return error; 2872 out_iput: 2873 iput(inode); 2874 return error; 2875 } 2876 2877 static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 2878 struct dentry *dentry, umode_t mode) 2879 { 2880 int error; 2881 2882 if ((error = shmem_mknod(&init_user_ns, dir, dentry, 2883 mode | S_IFDIR, 0))) 2884 return error; 2885 inc_nlink(dir); 2886 return 0; 2887 } 2888 2889 static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir, 2890 struct dentry *dentry, umode_t mode, bool excl) 2891 { 2892 return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 2893 } 2894 2895 /* 2896 * Link a file.. 2897 */ 2898 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 2899 { 2900 struct inode *inode = d_inode(old_dentry); 2901 int ret = 0; 2902 2903 /* 2904 * No ordinary (disk based) filesystem counts links as inodes; 2905 * but each new link needs a new dentry, pinning lowmem, and 2906 * tmpfs dentries cannot be pruned until they are unlinked. 2907 * But if an O_TMPFILE file is linked into the tmpfs, the 2908 * first link must skip that, to get the accounting right. 2909 */ 2910 if (inode->i_nlink) { 2911 ret = shmem_reserve_inode(inode->i_sb, NULL); 2912 if (ret) 2913 goto out; 2914 } 2915 2916 dir->i_size += BOGO_DIRENT_SIZE; 2917 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 2918 inc_nlink(inode); 2919 ihold(inode); /* New dentry reference */ 2920 dget(dentry); /* Extra pinning count for the created dentry */ 2921 d_instantiate(dentry, inode); 2922 out: 2923 return ret; 2924 } 2925 2926 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 2927 { 2928 struct inode *inode = d_inode(dentry); 2929 2930 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 2931 shmem_free_inode(inode->i_sb); 2932 2933 dir->i_size -= BOGO_DIRENT_SIZE; 2934 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 2935 drop_nlink(inode); 2936 dput(dentry); /* Undo the count from "create" - this does all the work */ 2937 return 0; 2938 } 2939 2940 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 2941 { 2942 if (!simple_empty(dentry)) 2943 return -ENOTEMPTY; 2944 2945 drop_nlink(d_inode(dentry)); 2946 drop_nlink(dir); 2947 return shmem_unlink(dir, dentry); 2948 } 2949 2950 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2951 { 2952 bool old_is_dir = d_is_dir(old_dentry); 2953 bool new_is_dir = d_is_dir(new_dentry); 2954 2955 if (old_dir != new_dir && old_is_dir != new_is_dir) { 2956 if (old_is_dir) { 2957 drop_nlink(old_dir); 2958 inc_nlink(new_dir); 2959 } else { 2960 drop_nlink(new_dir); 2961 inc_nlink(old_dir); 2962 } 2963 } 2964 old_dir->i_ctime = old_dir->i_mtime = 2965 new_dir->i_ctime = new_dir->i_mtime = 2966 d_inode(old_dentry)->i_ctime = 2967 d_inode(new_dentry)->i_ctime = current_time(old_dir); 2968 2969 return 0; 2970 } 2971 2972 static int shmem_whiteout(struct user_namespace *mnt_userns, 2973 struct inode *old_dir, struct dentry *old_dentry) 2974 { 2975 struct dentry *whiteout; 2976 int error; 2977 2978 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 2979 if (!whiteout) 2980 return -ENOMEM; 2981 2982 error = shmem_mknod(&init_user_ns, old_dir, whiteout, 2983 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 2984 dput(whiteout); 2985 if (error) 2986 return error; 2987 2988 /* 2989 * Cheat and hash the whiteout while the old dentry is still in 2990 * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 2991 * 2992 * d_lookup() will consistently find one of them at this point, 2993 * not sure which one, but that isn't even important. 2994 */ 2995 d_rehash(whiteout); 2996 return 0; 2997 } 2998 2999 /* 3000 * The VFS layer already does all the dentry stuff for rename, 3001 * we just have to decrement the usage count for the target if 3002 * it exists so that the VFS layer correctly free's it when it 3003 * gets overwritten. 3004 */ 3005 static int shmem_rename2(struct user_namespace *mnt_userns, 3006 struct inode *old_dir, struct dentry *old_dentry, 3007 struct inode *new_dir, struct dentry *new_dentry, 3008 unsigned int flags) 3009 { 3010 struct inode *inode = d_inode(old_dentry); 3011 int they_are_dirs = S_ISDIR(inode->i_mode); 3012 3013 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 3014 return -EINVAL; 3015 3016 if (flags & RENAME_EXCHANGE) 3017 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 3018 3019 if (!simple_empty(new_dentry)) 3020 return -ENOTEMPTY; 3021 3022 if (flags & RENAME_WHITEOUT) { 3023 int error; 3024 3025 error = shmem_whiteout(&init_user_ns, old_dir, old_dentry); 3026 if (error) 3027 return error; 3028 } 3029 3030 if (d_really_is_positive(new_dentry)) { 3031 (void) shmem_unlink(new_dir, new_dentry); 3032 if (they_are_dirs) { 3033 drop_nlink(d_inode(new_dentry)); 3034 drop_nlink(old_dir); 3035 } 3036 } else if (they_are_dirs) { 3037 drop_nlink(old_dir); 3038 inc_nlink(new_dir); 3039 } 3040 3041 old_dir->i_size -= BOGO_DIRENT_SIZE; 3042 new_dir->i_size += BOGO_DIRENT_SIZE; 3043 old_dir->i_ctime = old_dir->i_mtime = 3044 new_dir->i_ctime = new_dir->i_mtime = 3045 inode->i_ctime = current_time(old_dir); 3046 return 0; 3047 } 3048 3049 static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir, 3050 struct dentry *dentry, const char *symname) 3051 { 3052 int error; 3053 int len; 3054 struct inode *inode; 3055 struct page *page; 3056 3057 len = strlen(symname) + 1; 3058 if (len > PAGE_SIZE) 3059 return -ENAMETOOLONG; 3060 3061 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 3062 VM_NORESERVE); 3063 if (!inode) 3064 return -ENOSPC; 3065 3066 error = security_inode_init_security(inode, dir, &dentry->d_name, 3067 shmem_initxattrs, NULL); 3068 if (error && error != -EOPNOTSUPP) { 3069 iput(inode); 3070 return error; 3071 } 3072 3073 inode->i_size = len-1; 3074 if (len <= SHORT_SYMLINK_LEN) { 3075 inode->i_link = kmemdup(symname, len, GFP_KERNEL); 3076 if (!inode->i_link) { 3077 iput(inode); 3078 return -ENOMEM; 3079 } 3080 inode->i_op = &shmem_short_symlink_operations; 3081 } else { 3082 inode_nohighmem(inode); 3083 error = shmem_getpage(inode, 0, &page, SGP_WRITE); 3084 if (error) { 3085 iput(inode); 3086 return error; 3087 } 3088 inode->i_mapping->a_ops = &shmem_aops; 3089 inode->i_op = &shmem_symlink_inode_operations; 3090 memcpy(page_address(page), symname, len); 3091 SetPageUptodate(page); 3092 set_page_dirty(page); 3093 unlock_page(page); 3094 put_page(page); 3095 } 3096 dir->i_size += BOGO_DIRENT_SIZE; 3097 dir->i_ctime = dir->i_mtime = current_time(dir); 3098 d_instantiate(dentry, inode); 3099 dget(dentry); 3100 return 0; 3101 } 3102 3103 static void shmem_put_link(void *arg) 3104 { 3105 mark_page_accessed(arg); 3106 put_page(arg); 3107 } 3108 3109 static const char *shmem_get_link(struct dentry *dentry, 3110 struct inode *inode, 3111 struct delayed_call *done) 3112 { 3113 struct page *page = NULL; 3114 int error; 3115 if (!dentry) { 3116 page = find_get_page(inode->i_mapping, 0); 3117 if (!page) 3118 return ERR_PTR(-ECHILD); 3119 if (!PageUptodate(page)) { 3120 put_page(page); 3121 return ERR_PTR(-ECHILD); 3122 } 3123 } else { 3124 error = shmem_getpage(inode, 0, &page, SGP_READ); 3125 if (error) 3126 return ERR_PTR(error); 3127 unlock_page(page); 3128 } 3129 set_delayed_call(done, shmem_put_link, page); 3130 return page_address(page); 3131 } 3132 3133 #ifdef CONFIG_TMPFS_XATTR 3134 /* 3135 * Superblocks without xattr inode operations may get some security.* xattr 3136 * support from the LSM "for free". As soon as we have any other xattrs 3137 * like ACLs, we also need to implement the security.* handlers at 3138 * filesystem level, though. 3139 */ 3140 3141 /* 3142 * Callback for security_inode_init_security() for acquiring xattrs. 3143 */ 3144 static int shmem_initxattrs(struct inode *inode, 3145 const struct xattr *xattr_array, 3146 void *fs_info) 3147 { 3148 struct shmem_inode_info *info = SHMEM_I(inode); 3149 const struct xattr *xattr; 3150 struct simple_xattr *new_xattr; 3151 size_t len; 3152 3153 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 3154 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 3155 if (!new_xattr) 3156 return -ENOMEM; 3157 3158 len = strlen(xattr->name) + 1; 3159 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 3160 GFP_KERNEL); 3161 if (!new_xattr->name) { 3162 kvfree(new_xattr); 3163 return -ENOMEM; 3164 } 3165 3166 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 3167 XATTR_SECURITY_PREFIX_LEN); 3168 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 3169 xattr->name, len); 3170 3171 simple_xattr_list_add(&info->xattrs, new_xattr); 3172 } 3173 3174 return 0; 3175 } 3176 3177 static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3178 struct dentry *unused, struct inode *inode, 3179 const char *name, void *buffer, size_t size) 3180 { 3181 struct shmem_inode_info *info = SHMEM_I(inode); 3182 3183 name = xattr_full_name(handler, name); 3184 return simple_xattr_get(&info->xattrs, name, buffer, size); 3185 } 3186 3187 static int shmem_xattr_handler_set(const struct xattr_handler *handler, 3188 struct user_namespace *mnt_userns, 3189 struct dentry *unused, struct inode *inode, 3190 const char *name, const void *value, 3191 size_t size, int flags) 3192 { 3193 struct shmem_inode_info *info = SHMEM_I(inode); 3194 3195 name = xattr_full_name(handler, name); 3196 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); 3197 } 3198 3199 static const struct xattr_handler shmem_security_xattr_handler = { 3200 .prefix = XATTR_SECURITY_PREFIX, 3201 .get = shmem_xattr_handler_get, 3202 .set = shmem_xattr_handler_set, 3203 }; 3204 3205 static const struct xattr_handler shmem_trusted_xattr_handler = { 3206 .prefix = XATTR_TRUSTED_PREFIX, 3207 .get = shmem_xattr_handler_get, 3208 .set = shmem_xattr_handler_set, 3209 }; 3210 3211 static const struct xattr_handler *shmem_xattr_handlers[] = { 3212 #ifdef CONFIG_TMPFS_POSIX_ACL 3213 &posix_acl_access_xattr_handler, 3214 &posix_acl_default_xattr_handler, 3215 #endif 3216 &shmem_security_xattr_handler, 3217 &shmem_trusted_xattr_handler, 3218 NULL 3219 }; 3220 3221 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3222 { 3223 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3224 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3225 } 3226 #endif /* CONFIG_TMPFS_XATTR */ 3227 3228 static const struct inode_operations shmem_short_symlink_operations = { 3229 .get_link = simple_get_link, 3230 #ifdef CONFIG_TMPFS_XATTR 3231 .listxattr = shmem_listxattr, 3232 #endif 3233 }; 3234 3235 static const struct inode_operations shmem_symlink_inode_operations = { 3236 .get_link = shmem_get_link, 3237 #ifdef CONFIG_TMPFS_XATTR 3238 .listxattr = shmem_listxattr, 3239 #endif 3240 }; 3241 3242 static struct dentry *shmem_get_parent(struct dentry *child) 3243 { 3244 return ERR_PTR(-ESTALE); 3245 } 3246 3247 static int shmem_match(struct inode *ino, void *vfh) 3248 { 3249 __u32 *fh = vfh; 3250 __u64 inum = fh[2]; 3251 inum = (inum << 32) | fh[1]; 3252 return ino->i_ino == inum && fh[0] == ino->i_generation; 3253 } 3254 3255 /* Find any alias of inode, but prefer a hashed alias */ 3256 static struct dentry *shmem_find_alias(struct inode *inode) 3257 { 3258 struct dentry *alias = d_find_alias(inode); 3259 3260 return alias ?: d_find_any_alias(inode); 3261 } 3262 3263 3264 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3265 struct fid *fid, int fh_len, int fh_type) 3266 { 3267 struct inode *inode; 3268 struct dentry *dentry = NULL; 3269 u64 inum; 3270 3271 if (fh_len < 3) 3272 return NULL; 3273 3274 inum = fid->raw[2]; 3275 inum = (inum << 32) | fid->raw[1]; 3276 3277 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3278 shmem_match, fid->raw); 3279 if (inode) { 3280 dentry = shmem_find_alias(inode); 3281 iput(inode); 3282 } 3283 3284 return dentry; 3285 } 3286 3287 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3288 struct inode *parent) 3289 { 3290 if (*len < 3) { 3291 *len = 3; 3292 return FILEID_INVALID; 3293 } 3294 3295 if (inode_unhashed(inode)) { 3296 /* Unfortunately insert_inode_hash is not idempotent, 3297 * so as we hash inodes here rather than at creation 3298 * time, we need a lock to ensure we only try 3299 * to do it once 3300 */ 3301 static DEFINE_SPINLOCK(lock); 3302 spin_lock(&lock); 3303 if (inode_unhashed(inode)) 3304 __insert_inode_hash(inode, 3305 inode->i_ino + inode->i_generation); 3306 spin_unlock(&lock); 3307 } 3308 3309 fh[0] = inode->i_generation; 3310 fh[1] = inode->i_ino; 3311 fh[2] = ((__u64)inode->i_ino) >> 32; 3312 3313 *len = 3; 3314 return 1; 3315 } 3316 3317 static const struct export_operations shmem_export_ops = { 3318 .get_parent = shmem_get_parent, 3319 .encode_fh = shmem_encode_fh, 3320 .fh_to_dentry = shmem_fh_to_dentry, 3321 }; 3322 3323 enum shmem_param { 3324 Opt_gid, 3325 Opt_huge, 3326 Opt_mode, 3327 Opt_mpol, 3328 Opt_nr_blocks, 3329 Opt_nr_inodes, 3330 Opt_size, 3331 Opt_uid, 3332 Opt_inode32, 3333 Opt_inode64, 3334 }; 3335 3336 static const struct constant_table shmem_param_enums_huge[] = { 3337 {"never", SHMEM_HUGE_NEVER }, 3338 {"always", SHMEM_HUGE_ALWAYS }, 3339 {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 3340 {"advise", SHMEM_HUGE_ADVISE }, 3341 {} 3342 }; 3343 3344 const struct fs_parameter_spec shmem_fs_parameters[] = { 3345 fsparam_u32 ("gid", Opt_gid), 3346 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 3347 fsparam_u32oct("mode", Opt_mode), 3348 fsparam_string("mpol", Opt_mpol), 3349 fsparam_string("nr_blocks", Opt_nr_blocks), 3350 fsparam_string("nr_inodes", Opt_nr_inodes), 3351 fsparam_string("size", Opt_size), 3352 fsparam_u32 ("uid", Opt_uid), 3353 fsparam_flag ("inode32", Opt_inode32), 3354 fsparam_flag ("inode64", Opt_inode64), 3355 {} 3356 }; 3357 3358 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 3359 { 3360 struct shmem_options *ctx = fc->fs_private; 3361 struct fs_parse_result result; 3362 unsigned long long size; 3363 char *rest; 3364 int opt; 3365 3366 opt = fs_parse(fc, shmem_fs_parameters, param, &result); 3367 if (opt < 0) 3368 return opt; 3369 3370 switch (opt) { 3371 case Opt_size: 3372 size = memparse(param->string, &rest); 3373 if (*rest == '%') { 3374 size <<= PAGE_SHIFT; 3375 size *= totalram_pages(); 3376 do_div(size, 100); 3377 rest++; 3378 } 3379 if (*rest) 3380 goto bad_value; 3381 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 3382 ctx->seen |= SHMEM_SEEN_BLOCKS; 3383 break; 3384 case Opt_nr_blocks: 3385 ctx->blocks = memparse(param->string, &rest); 3386 if (*rest) 3387 goto bad_value; 3388 ctx->seen |= SHMEM_SEEN_BLOCKS; 3389 break; 3390 case Opt_nr_inodes: 3391 ctx->inodes = memparse(param->string, &rest); 3392 if (*rest) 3393 goto bad_value; 3394 ctx->seen |= SHMEM_SEEN_INODES; 3395 break; 3396 case Opt_mode: 3397 ctx->mode = result.uint_32 & 07777; 3398 break; 3399 case Opt_uid: 3400 ctx->uid = make_kuid(current_user_ns(), result.uint_32); 3401 if (!uid_valid(ctx->uid)) 3402 goto bad_value; 3403 break; 3404 case Opt_gid: 3405 ctx->gid = make_kgid(current_user_ns(), result.uint_32); 3406 if (!gid_valid(ctx->gid)) 3407 goto bad_value; 3408 break; 3409 case Opt_huge: 3410 ctx->huge = result.uint_32; 3411 if (ctx->huge != SHMEM_HUGE_NEVER && 3412 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 3413 has_transparent_hugepage())) 3414 goto unsupported_parameter; 3415 ctx->seen |= SHMEM_SEEN_HUGE; 3416 break; 3417 case Opt_mpol: 3418 if (IS_ENABLED(CONFIG_NUMA)) { 3419 mpol_put(ctx->mpol); 3420 ctx->mpol = NULL; 3421 if (mpol_parse_str(param->string, &ctx->mpol)) 3422 goto bad_value; 3423 break; 3424 } 3425 goto unsupported_parameter; 3426 case Opt_inode32: 3427 ctx->full_inums = false; 3428 ctx->seen |= SHMEM_SEEN_INUMS; 3429 break; 3430 case Opt_inode64: 3431 if (sizeof(ino_t) < 8) { 3432 return invalfc(fc, 3433 "Cannot use inode64 with <64bit inums in kernel\n"); 3434 } 3435 ctx->full_inums = true; 3436 ctx->seen |= SHMEM_SEEN_INUMS; 3437 break; 3438 } 3439 return 0; 3440 3441 unsupported_parameter: 3442 return invalfc(fc, "Unsupported parameter '%s'", param->key); 3443 bad_value: 3444 return invalfc(fc, "Bad value for '%s'", param->key); 3445 } 3446 3447 static int shmem_parse_options(struct fs_context *fc, void *data) 3448 { 3449 char *options = data; 3450 3451 if (options) { 3452 int err = security_sb_eat_lsm_opts(options, &fc->security); 3453 if (err) 3454 return err; 3455 } 3456 3457 while (options != NULL) { 3458 char *this_char = options; 3459 for (;;) { 3460 /* 3461 * NUL-terminate this option: unfortunately, 3462 * mount options form a comma-separated list, 3463 * but mpol's nodelist may also contain commas. 3464 */ 3465 options = strchr(options, ','); 3466 if (options == NULL) 3467 break; 3468 options++; 3469 if (!isdigit(*options)) { 3470 options[-1] = '\0'; 3471 break; 3472 } 3473 } 3474 if (*this_char) { 3475 char *value = strchr(this_char, '='); 3476 size_t len = 0; 3477 int err; 3478 3479 if (value) { 3480 *value++ = '\0'; 3481 len = strlen(value); 3482 } 3483 err = vfs_parse_fs_string(fc, this_char, value, len); 3484 if (err < 0) 3485 return err; 3486 } 3487 } 3488 return 0; 3489 } 3490 3491 /* 3492 * Reconfigure a shmem filesystem. 3493 * 3494 * Note that we disallow change from limited->unlimited blocks/inodes while any 3495 * are in use; but we must separately disallow unlimited->limited, because in 3496 * that case we have no record of how much is already in use. 3497 */ 3498 static int shmem_reconfigure(struct fs_context *fc) 3499 { 3500 struct shmem_options *ctx = fc->fs_private; 3501 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 3502 unsigned long inodes; 3503 const char *err; 3504 3505 spin_lock(&sbinfo->stat_lock); 3506 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 3507 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 3508 if (!sbinfo->max_blocks) { 3509 err = "Cannot retroactively limit size"; 3510 goto out; 3511 } 3512 if (percpu_counter_compare(&sbinfo->used_blocks, 3513 ctx->blocks) > 0) { 3514 err = "Too small a size for current use"; 3515 goto out; 3516 } 3517 } 3518 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 3519 if (!sbinfo->max_inodes) { 3520 err = "Cannot retroactively limit inodes"; 3521 goto out; 3522 } 3523 if (ctx->inodes < inodes) { 3524 err = "Too few inodes for current use"; 3525 goto out; 3526 } 3527 } 3528 3529 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 3530 sbinfo->next_ino > UINT_MAX) { 3531 err = "Current inum too high to switch to 32-bit inums"; 3532 goto out; 3533 } 3534 3535 if (ctx->seen & SHMEM_SEEN_HUGE) 3536 sbinfo->huge = ctx->huge; 3537 if (ctx->seen & SHMEM_SEEN_INUMS) 3538 sbinfo->full_inums = ctx->full_inums; 3539 if (ctx->seen & SHMEM_SEEN_BLOCKS) 3540 sbinfo->max_blocks = ctx->blocks; 3541 if (ctx->seen & SHMEM_SEEN_INODES) { 3542 sbinfo->max_inodes = ctx->inodes; 3543 sbinfo->free_inodes = ctx->inodes - inodes; 3544 } 3545 3546 /* 3547 * Preserve previous mempolicy unless mpol remount option was specified. 3548 */ 3549 if (ctx->mpol) { 3550 mpol_put(sbinfo->mpol); 3551 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 3552 ctx->mpol = NULL; 3553 } 3554 spin_unlock(&sbinfo->stat_lock); 3555 return 0; 3556 out: 3557 spin_unlock(&sbinfo->stat_lock); 3558 return invalfc(fc, "%s", err); 3559 } 3560 3561 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3562 { 3563 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3564 3565 if (sbinfo->max_blocks != shmem_default_max_blocks()) 3566 seq_printf(seq, ",size=%luk", 3567 sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3568 if (sbinfo->max_inodes != shmem_default_max_inodes()) 3569 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 3570 if (sbinfo->mode != (0777 | S_ISVTX)) 3571 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 3572 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 3573 seq_printf(seq, ",uid=%u", 3574 from_kuid_munged(&init_user_ns, sbinfo->uid)); 3575 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 3576 seq_printf(seq, ",gid=%u", 3577 from_kgid_munged(&init_user_ns, sbinfo->gid)); 3578 3579 /* 3580 * Showing inode{64,32} might be useful even if it's the system default, 3581 * since then people don't have to resort to checking both here and 3582 * /proc/config.gz to confirm 64-bit inums were successfully applied 3583 * (which may not even exist if IKCONFIG_PROC isn't enabled). 3584 * 3585 * We hide it when inode64 isn't the default and we are using 32-bit 3586 * inodes, since that probably just means the feature isn't even under 3587 * consideration. 3588 * 3589 * As such: 3590 * 3591 * +-----------------+-----------------+ 3592 * | TMPFS_INODE64=y | TMPFS_INODE64=n | 3593 * +------------------+-----------------+-----------------+ 3594 * | full_inums=true | show | show | 3595 * | full_inums=false | show | hide | 3596 * +------------------+-----------------+-----------------+ 3597 * 3598 */ 3599 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 3600 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 3601 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3602 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 3603 if (sbinfo->huge) 3604 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 3605 #endif 3606 shmem_show_mpol(seq, sbinfo->mpol); 3607 return 0; 3608 } 3609 3610 #endif /* CONFIG_TMPFS */ 3611 3612 static void shmem_put_super(struct super_block *sb) 3613 { 3614 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3615 3616 free_percpu(sbinfo->ino_batch); 3617 percpu_counter_destroy(&sbinfo->used_blocks); 3618 mpol_put(sbinfo->mpol); 3619 kfree(sbinfo); 3620 sb->s_fs_info = NULL; 3621 } 3622 3623 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 3624 { 3625 struct shmem_options *ctx = fc->fs_private; 3626 struct inode *inode; 3627 struct shmem_sb_info *sbinfo; 3628 int err = -ENOMEM; 3629 3630 /* Round up to L1_CACHE_BYTES to resist false sharing */ 3631 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3632 L1_CACHE_BYTES), GFP_KERNEL); 3633 if (!sbinfo) 3634 return -ENOMEM; 3635 3636 sb->s_fs_info = sbinfo; 3637 3638 #ifdef CONFIG_TMPFS 3639 /* 3640 * Per default we only allow half of the physical ram per 3641 * tmpfs instance, limiting inodes to one per page of lowmem; 3642 * but the internal instance is left unlimited. 3643 */ 3644 if (!(sb->s_flags & SB_KERNMOUNT)) { 3645 if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 3646 ctx->blocks = shmem_default_max_blocks(); 3647 if (!(ctx->seen & SHMEM_SEEN_INODES)) 3648 ctx->inodes = shmem_default_max_inodes(); 3649 if (!(ctx->seen & SHMEM_SEEN_INUMS)) 3650 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 3651 } else { 3652 sb->s_flags |= SB_NOUSER; 3653 } 3654 sb->s_export_op = &shmem_export_ops; 3655 sb->s_flags |= SB_NOSEC; 3656 #else 3657 sb->s_flags |= SB_NOUSER; 3658 #endif 3659 sbinfo->max_blocks = ctx->blocks; 3660 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; 3661 if (sb->s_flags & SB_KERNMOUNT) { 3662 sbinfo->ino_batch = alloc_percpu(ino_t); 3663 if (!sbinfo->ino_batch) 3664 goto failed; 3665 } 3666 sbinfo->uid = ctx->uid; 3667 sbinfo->gid = ctx->gid; 3668 sbinfo->full_inums = ctx->full_inums; 3669 sbinfo->mode = ctx->mode; 3670 sbinfo->huge = ctx->huge; 3671 sbinfo->mpol = ctx->mpol; 3672 ctx->mpol = NULL; 3673 3674 spin_lock_init(&sbinfo->stat_lock); 3675 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3676 goto failed; 3677 spin_lock_init(&sbinfo->shrinklist_lock); 3678 INIT_LIST_HEAD(&sbinfo->shrinklist); 3679 3680 sb->s_maxbytes = MAX_LFS_FILESIZE; 3681 sb->s_blocksize = PAGE_SIZE; 3682 sb->s_blocksize_bits = PAGE_SHIFT; 3683 sb->s_magic = TMPFS_MAGIC; 3684 sb->s_op = &shmem_ops; 3685 sb->s_time_gran = 1; 3686 #ifdef CONFIG_TMPFS_XATTR 3687 sb->s_xattr = shmem_xattr_handlers; 3688 #endif 3689 #ifdef CONFIG_TMPFS_POSIX_ACL 3690 sb->s_flags |= SB_POSIXACL; 3691 #endif 3692 uuid_gen(&sb->s_uuid); 3693 3694 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 3695 if (!inode) 3696 goto failed; 3697 inode->i_uid = sbinfo->uid; 3698 inode->i_gid = sbinfo->gid; 3699 sb->s_root = d_make_root(inode); 3700 if (!sb->s_root) 3701 goto failed; 3702 return 0; 3703 3704 failed: 3705 shmem_put_super(sb); 3706 return err; 3707 } 3708 3709 static int shmem_get_tree(struct fs_context *fc) 3710 { 3711 return get_tree_nodev(fc, shmem_fill_super); 3712 } 3713 3714 static void shmem_free_fc(struct fs_context *fc) 3715 { 3716 struct shmem_options *ctx = fc->fs_private; 3717 3718 if (ctx) { 3719 mpol_put(ctx->mpol); 3720 kfree(ctx); 3721 } 3722 } 3723 3724 static const struct fs_context_operations shmem_fs_context_ops = { 3725 .free = shmem_free_fc, 3726 .get_tree = shmem_get_tree, 3727 #ifdef CONFIG_TMPFS 3728 .parse_monolithic = shmem_parse_options, 3729 .parse_param = shmem_parse_one, 3730 .reconfigure = shmem_reconfigure, 3731 #endif 3732 }; 3733 3734 static struct kmem_cache *shmem_inode_cachep; 3735 3736 static struct inode *shmem_alloc_inode(struct super_block *sb) 3737 { 3738 struct shmem_inode_info *info; 3739 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 3740 if (!info) 3741 return NULL; 3742 return &info->vfs_inode; 3743 } 3744 3745 static void shmem_free_in_core_inode(struct inode *inode) 3746 { 3747 if (S_ISLNK(inode->i_mode)) 3748 kfree(inode->i_link); 3749 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3750 } 3751 3752 static void shmem_destroy_inode(struct inode *inode) 3753 { 3754 if (S_ISREG(inode->i_mode)) 3755 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 3756 } 3757 3758 static void shmem_init_inode(void *foo) 3759 { 3760 struct shmem_inode_info *info = foo; 3761 inode_init_once(&info->vfs_inode); 3762 } 3763 3764 static void shmem_init_inodecache(void) 3765 { 3766 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 3767 sizeof(struct shmem_inode_info), 3768 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 3769 } 3770 3771 static void shmem_destroy_inodecache(void) 3772 { 3773 kmem_cache_destroy(shmem_inode_cachep); 3774 } 3775 3776 const struct address_space_operations shmem_aops = { 3777 .writepage = shmem_writepage, 3778 .set_page_dirty = __set_page_dirty_no_writeback, 3779 #ifdef CONFIG_TMPFS 3780 .write_begin = shmem_write_begin, 3781 .write_end = shmem_write_end, 3782 #endif 3783 #ifdef CONFIG_MIGRATION 3784 .migratepage = migrate_page, 3785 #endif 3786 .error_remove_page = generic_error_remove_page, 3787 }; 3788 EXPORT_SYMBOL(shmem_aops); 3789 3790 static const struct file_operations shmem_file_operations = { 3791 .mmap = shmem_mmap, 3792 .get_unmapped_area = shmem_get_unmapped_area, 3793 #ifdef CONFIG_TMPFS 3794 .llseek = shmem_file_llseek, 3795 .read_iter = shmem_file_read_iter, 3796 .write_iter = generic_file_write_iter, 3797 .fsync = noop_fsync, 3798 .splice_read = generic_file_splice_read, 3799 .splice_write = iter_file_splice_write, 3800 .fallocate = shmem_fallocate, 3801 #endif 3802 }; 3803 3804 static const struct inode_operations shmem_inode_operations = { 3805 .getattr = shmem_getattr, 3806 .setattr = shmem_setattr, 3807 #ifdef CONFIG_TMPFS_XATTR 3808 .listxattr = shmem_listxattr, 3809 .set_acl = simple_set_acl, 3810 #endif 3811 }; 3812 3813 static const struct inode_operations shmem_dir_inode_operations = { 3814 #ifdef CONFIG_TMPFS 3815 .create = shmem_create, 3816 .lookup = simple_lookup, 3817 .link = shmem_link, 3818 .unlink = shmem_unlink, 3819 .symlink = shmem_symlink, 3820 .mkdir = shmem_mkdir, 3821 .rmdir = shmem_rmdir, 3822 .mknod = shmem_mknod, 3823 .rename = shmem_rename2, 3824 .tmpfile = shmem_tmpfile, 3825 #endif 3826 #ifdef CONFIG_TMPFS_XATTR 3827 .listxattr = shmem_listxattr, 3828 #endif 3829 #ifdef CONFIG_TMPFS_POSIX_ACL 3830 .setattr = shmem_setattr, 3831 .set_acl = simple_set_acl, 3832 #endif 3833 }; 3834 3835 static const struct inode_operations shmem_special_inode_operations = { 3836 #ifdef CONFIG_TMPFS_XATTR 3837 .listxattr = shmem_listxattr, 3838 #endif 3839 #ifdef CONFIG_TMPFS_POSIX_ACL 3840 .setattr = shmem_setattr, 3841 .set_acl = simple_set_acl, 3842 #endif 3843 }; 3844 3845 static const struct super_operations shmem_ops = { 3846 .alloc_inode = shmem_alloc_inode, 3847 .free_inode = shmem_free_in_core_inode, 3848 .destroy_inode = shmem_destroy_inode, 3849 #ifdef CONFIG_TMPFS 3850 .statfs = shmem_statfs, 3851 .show_options = shmem_show_options, 3852 #endif 3853 .evict_inode = shmem_evict_inode, 3854 .drop_inode = generic_delete_inode, 3855 .put_super = shmem_put_super, 3856 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3857 .nr_cached_objects = shmem_unused_huge_count, 3858 .free_cached_objects = shmem_unused_huge_scan, 3859 #endif 3860 }; 3861 3862 static const struct vm_operations_struct shmem_vm_ops = { 3863 .fault = shmem_fault, 3864 .map_pages = filemap_map_pages, 3865 #ifdef CONFIG_NUMA 3866 .set_policy = shmem_set_policy, 3867 .get_policy = shmem_get_policy, 3868 #endif 3869 }; 3870 3871 int shmem_init_fs_context(struct fs_context *fc) 3872 { 3873 struct shmem_options *ctx; 3874 3875 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 3876 if (!ctx) 3877 return -ENOMEM; 3878 3879 ctx->mode = 0777 | S_ISVTX; 3880 ctx->uid = current_fsuid(); 3881 ctx->gid = current_fsgid(); 3882 3883 fc->fs_private = ctx; 3884 fc->ops = &shmem_fs_context_ops; 3885 return 0; 3886 } 3887 3888 static struct file_system_type shmem_fs_type = { 3889 .owner = THIS_MODULE, 3890 .name = "tmpfs", 3891 .init_fs_context = shmem_init_fs_context, 3892 #ifdef CONFIG_TMPFS 3893 .parameters = shmem_fs_parameters, 3894 #endif 3895 .kill_sb = kill_litter_super, 3896 .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, 3897 }; 3898 3899 int __init shmem_init(void) 3900 { 3901 int error; 3902 3903 shmem_init_inodecache(); 3904 3905 error = register_filesystem(&shmem_fs_type); 3906 if (error) { 3907 pr_err("Could not register tmpfs\n"); 3908 goto out2; 3909 } 3910 3911 shm_mnt = kern_mount(&shmem_fs_type); 3912 if (IS_ERR(shm_mnt)) { 3913 error = PTR_ERR(shm_mnt); 3914 pr_err("Could not kern_mount tmpfs\n"); 3915 goto out1; 3916 } 3917 3918 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3919 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 3920 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 3921 else 3922 shmem_huge = 0; /* just in case it was patched */ 3923 #endif 3924 return 0; 3925 3926 out1: 3927 unregister_filesystem(&shmem_fs_type); 3928 out2: 3929 shmem_destroy_inodecache(); 3930 shm_mnt = ERR_PTR(error); 3931 return error; 3932 } 3933 3934 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 3935 static ssize_t shmem_enabled_show(struct kobject *kobj, 3936 struct kobj_attribute *attr, char *buf) 3937 { 3938 static const int values[] = { 3939 SHMEM_HUGE_ALWAYS, 3940 SHMEM_HUGE_WITHIN_SIZE, 3941 SHMEM_HUGE_ADVISE, 3942 SHMEM_HUGE_NEVER, 3943 SHMEM_HUGE_DENY, 3944 SHMEM_HUGE_FORCE, 3945 }; 3946 int len = 0; 3947 int i; 3948 3949 for (i = 0; i < ARRAY_SIZE(values); i++) { 3950 len += sysfs_emit_at(buf, len, 3951 shmem_huge == values[i] ? "%s[%s]" : "%s%s", 3952 i ? " " : "", 3953 shmem_format_huge(values[i])); 3954 } 3955 3956 len += sysfs_emit_at(buf, len, "\n"); 3957 3958 return len; 3959 } 3960 3961 static ssize_t shmem_enabled_store(struct kobject *kobj, 3962 struct kobj_attribute *attr, const char *buf, size_t count) 3963 { 3964 char tmp[16]; 3965 int huge; 3966 3967 if (count + 1 > sizeof(tmp)) 3968 return -EINVAL; 3969 memcpy(tmp, buf, count); 3970 tmp[count] = '\0'; 3971 if (count && tmp[count - 1] == '\n') 3972 tmp[count - 1] = '\0'; 3973 3974 huge = shmem_parse_huge(tmp); 3975 if (huge == -EINVAL) 3976 return -EINVAL; 3977 if (!has_transparent_hugepage() && 3978 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 3979 return -EINVAL; 3980 3981 shmem_huge = huge; 3982 if (shmem_huge > SHMEM_HUGE_DENY) 3983 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 3984 return count; 3985 } 3986 3987 struct kobj_attribute shmem_enabled_attr = 3988 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 3989 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 3990 3991 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3992 bool shmem_huge_enabled(struct vm_area_struct *vma) 3993 { 3994 struct inode *inode = file_inode(vma->vm_file); 3995 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3996 loff_t i_size; 3997 pgoff_t off; 3998 3999 if (!transhuge_vma_enabled(vma, vma->vm_flags)) 4000 return false; 4001 if (shmem_huge == SHMEM_HUGE_FORCE) 4002 return true; 4003 if (shmem_huge == SHMEM_HUGE_DENY) 4004 return false; 4005 switch (sbinfo->huge) { 4006 case SHMEM_HUGE_NEVER: 4007 return false; 4008 case SHMEM_HUGE_ALWAYS: 4009 return true; 4010 case SHMEM_HUGE_WITHIN_SIZE: 4011 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 4012 i_size = round_up(i_size_read(inode), PAGE_SIZE); 4013 if (i_size >= HPAGE_PMD_SIZE && 4014 i_size >> PAGE_SHIFT >= off) 4015 return true; 4016 fallthrough; 4017 case SHMEM_HUGE_ADVISE: 4018 /* TODO: implement fadvise() hints */ 4019 return (vma->vm_flags & VM_HUGEPAGE); 4020 default: 4021 VM_BUG_ON(1); 4022 return false; 4023 } 4024 } 4025 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4026 4027 #else /* !CONFIG_SHMEM */ 4028 4029 /* 4030 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4031 * 4032 * This is intended for small system where the benefits of the full 4033 * shmem code (swap-backed and resource-limited) are outweighed by 4034 * their complexity. On systems without swap this code should be 4035 * effectively equivalent, but much lighter weight. 4036 */ 4037 4038 static struct file_system_type shmem_fs_type = { 4039 .name = "tmpfs", 4040 .init_fs_context = ramfs_init_fs_context, 4041 .parameters = ramfs_fs_parameters, 4042 .kill_sb = kill_litter_super, 4043 .fs_flags = FS_USERNS_MOUNT, 4044 }; 4045 4046 int __init shmem_init(void) 4047 { 4048 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4049 4050 shm_mnt = kern_mount(&shmem_fs_type); 4051 BUG_ON(IS_ERR(shm_mnt)); 4052 4053 return 0; 4054 } 4055 4056 int shmem_unuse(unsigned int type, bool frontswap, 4057 unsigned long *fs_pages_to_unuse) 4058 { 4059 return 0; 4060 } 4061 4062 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 4063 { 4064 return 0; 4065 } 4066 4067 void shmem_unlock_mapping(struct address_space *mapping) 4068 { 4069 } 4070 4071 #ifdef CONFIG_MMU 4072 unsigned long shmem_get_unmapped_area(struct file *file, 4073 unsigned long addr, unsigned long len, 4074 unsigned long pgoff, unsigned long flags) 4075 { 4076 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4077 } 4078 #endif 4079 4080 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 4081 { 4082 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 4083 } 4084 EXPORT_SYMBOL_GPL(shmem_truncate_range); 4085 4086 #define shmem_vm_ops generic_file_vm_ops 4087 #define shmem_file_operations ramfs_file_operations 4088 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 4089 #define shmem_acct_size(flags, size) 0 4090 #define shmem_unacct_size(flags, size) do {} while (0) 4091 4092 #endif /* CONFIG_SHMEM */ 4093 4094 /* common code */ 4095 4096 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 4097 unsigned long flags, unsigned int i_flags) 4098 { 4099 struct inode *inode; 4100 struct file *res; 4101 4102 if (IS_ERR(mnt)) 4103 return ERR_CAST(mnt); 4104 4105 if (size < 0 || size > MAX_LFS_FILESIZE) 4106 return ERR_PTR(-EINVAL); 4107 4108 if (shmem_acct_size(flags, size)) 4109 return ERR_PTR(-ENOMEM); 4110 4111 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, 4112 flags); 4113 if (unlikely(!inode)) { 4114 shmem_unacct_size(flags, size); 4115 return ERR_PTR(-ENOSPC); 4116 } 4117 inode->i_flags |= i_flags; 4118 inode->i_size = size; 4119 clear_nlink(inode); /* It is unlinked */ 4120 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 4121 if (!IS_ERR(res)) 4122 res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 4123 &shmem_file_operations); 4124 if (IS_ERR(res)) 4125 iput(inode); 4126 return res; 4127 } 4128 4129 /** 4130 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4131 * kernel internal. There will be NO LSM permission checks against the 4132 * underlying inode. So users of this interface must do LSM checks at a 4133 * higher layer. The users are the big_key and shm implementations. LSM 4134 * checks are provided at the key or shm level rather than the inode. 4135 * @name: name for dentry (to be seen in /proc/<pid>/maps 4136 * @size: size to be set for the file 4137 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4138 */ 4139 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4140 { 4141 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 4142 } 4143 4144 /** 4145 * shmem_file_setup - get an unlinked file living in tmpfs 4146 * @name: name for dentry (to be seen in /proc/<pid>/maps 4147 * @size: size to be set for the file 4148 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4149 */ 4150 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4151 { 4152 return __shmem_file_setup(shm_mnt, name, size, flags, 0); 4153 } 4154 EXPORT_SYMBOL_GPL(shmem_file_setup); 4155 4156 /** 4157 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 4158 * @mnt: the tmpfs mount where the file will be created 4159 * @name: name for dentry (to be seen in /proc/<pid>/maps 4160 * @size: size to be set for the file 4161 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4162 */ 4163 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 4164 loff_t size, unsigned long flags) 4165 { 4166 return __shmem_file_setup(mnt, name, size, flags, 0); 4167 } 4168 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4169 4170 /** 4171 * shmem_zero_setup - setup a shared anonymous mapping 4172 * @vma: the vma to be mmapped is prepared by do_mmap 4173 */ 4174 int shmem_zero_setup(struct vm_area_struct *vma) 4175 { 4176 struct file *file; 4177 loff_t size = vma->vm_end - vma->vm_start; 4178 4179 /* 4180 * Cloning a new file under mmap_lock leads to a lock ordering conflict 4181 * between XFS directory reading and selinux: since this file is only 4182 * accessible to the user through its mapping, use S_PRIVATE flag to 4183 * bypass file security, in the same way as shmem_kernel_file_setup(). 4184 */ 4185 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 4186 if (IS_ERR(file)) 4187 return PTR_ERR(file); 4188 4189 if (vma->vm_file) 4190 fput(vma->vm_file); 4191 vma->vm_file = file; 4192 vma->vm_ops = &shmem_vm_ops; 4193 4194 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4195 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 4196 (vma->vm_end & HPAGE_PMD_MASK)) { 4197 khugepaged_enter(vma, vma->vm_flags); 4198 } 4199 4200 return 0; 4201 } 4202 4203 /** 4204 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4205 * @mapping: the page's address_space 4206 * @index: the page index 4207 * @gfp: the page allocator flags to use if allocating 4208 * 4209 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4210 * with any new page allocations done using the specified allocation flags. 4211 * But read_cache_page_gfp() uses the ->readpage() method: which does not 4212 * suit tmpfs, since it may have pages in swapcache, and needs to find those 4213 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4214 * 4215 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 4216 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4217 */ 4218 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4219 pgoff_t index, gfp_t gfp) 4220 { 4221 #ifdef CONFIG_SHMEM 4222 struct inode *inode = mapping->host; 4223 struct page *page; 4224 int error; 4225 4226 BUG_ON(!shmem_mapping(mapping)); 4227 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, 4228 gfp, NULL, NULL, NULL); 4229 if (error) 4230 page = ERR_PTR(error); 4231 else 4232 unlock_page(page); 4233 return page; 4234 #else 4235 /* 4236 * The tiny !SHMEM case uses ramfs without swap 4237 */ 4238 return read_cache_page_gfp(mapping, index, gfp); 4239 #endif 4240 } 4241 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4242