11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31853ac43aSMatt Mackall #include <linux/mm.h> 3246c9a946SArnd Bergmann #include <linux/random.h> 33174cd4b1SIngo Molnar #include <linux/sched/signal.h> 34b95f1b31SPaul Gortmaker #include <linux/export.h> 35853ac43aSMatt Mackall #include <linux/swap.h> 36e2e40f2cSChristoph Hellwig #include <linux/uio.h> 37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h> 38749df87bSMike Kravetz #include <linux/hugetlb.h> 39b56a2d8aSVineeth Remanan Pillai #include <linux/frontswap.h> 40626c3920SAl Viro #include <linux/fs_parser.h> 41853ac43aSMatt Mackall 4295cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ 4395cc09d6SAndrea Arcangeli 44853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 45853ac43aSMatt Mackall 46853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 471da177e4SLinus Torvalds /* 481da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 491da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 501da177e4SLinus Torvalds * which makes it a completely usable filesystem. 511da177e4SLinus Torvalds */ 521da177e4SLinus Torvalds 5339f0247dSAndreas Gruenbacher #include <linux/xattr.h> 54a5694255SChristoph Hellwig #include <linux/exportfs.h> 551c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 56feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 571da177e4SLinus Torvalds #include <linux/mman.h> 581da177e4SLinus Torvalds #include <linux/string.h> 591da177e4SLinus Torvalds #include <linux/slab.h> 601da177e4SLinus Torvalds #include <linux/backing-dev.h> 611da177e4SLinus Torvalds #include <linux/shmem_fs.h> 621da177e4SLinus Torvalds #include <linux/writeback.h> 631da177e4SLinus Torvalds #include <linux/blkdev.h> 64bda97eabSHugh Dickins #include <linux/pagevec.h> 6541ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 6683e4fa9cSHugh Dickins #include <linux/falloc.h> 67708e3508SHugh Dickins #include <linux/splice.h> 681da177e4SLinus Torvalds #include <linux/security.h> 691da177e4SLinus Torvalds #include <linux/swapops.h> 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/namei.h> 72b00dc3adSHugh Dickins #include <linux/ctype.h> 73304dbdb7SLee Schermerhorn #include <linux/migrate.h> 74c1f60a5aSChristoph Lameter #include <linux/highmem.h> 75680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 7692562927SMimi Zohar #include <linux/magic.h> 779183df25SDavid Herrmann #include <linux/syscalls.h> 7840e041a2SDavid Herrmann #include <linux/fcntl.h> 799183df25SDavid Herrmann #include <uapi/linux/memfd.h> 80cfda0526SMike Rapoport #include <linux/userfaultfd_k.h> 814c27fe4cSMike Rapoport #include <linux/rmap.h> 822b4db796SAmir Goldstein #include <linux/uuid.h> 83304dbdb7SLee Schermerhorn 847c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 851da177e4SLinus Torvalds 86dd56b046SMel Gorman #include "internal.h" 87dd56b046SMel Gorman 8809cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8909cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 901da177e4SLinus Torvalds 911da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 921da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 931da177e4SLinus Torvalds 9469f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 9569f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 9669f07ec9SHugh Dickins 971aac1400SHugh Dickins /* 98f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 99f00cdc6dSHugh Dickins * inode->i_private (with i_mutex making sure that it has only one user at 100f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 1011aac1400SHugh Dickins */ 1021aac1400SHugh Dickins struct shmem_falloc { 1038e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 1041aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 1051aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 1061aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 1071aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1081aac1400SHugh Dickins }; 1091aac1400SHugh Dickins 1100b5071ddSAl Viro struct shmem_options { 1110b5071ddSAl Viro unsigned long long blocks; 1120b5071ddSAl Viro unsigned long long inodes; 1130b5071ddSAl Viro struct mempolicy *mpol; 1140b5071ddSAl Viro kuid_t uid; 1150b5071ddSAl Viro kgid_t gid; 1160b5071ddSAl Viro umode_t mode; 117ea3271f7SChris Down bool full_inums; 1180b5071ddSAl Viro int huge; 1190b5071ddSAl Viro int seen; 1200b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1 1210b5071ddSAl Viro #define SHMEM_SEEN_INODES 2 1220b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4 123ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8 1240b5071ddSAl Viro }; 1250b5071ddSAl Viro 126b76db735SAndrew Morton #ifdef CONFIG_TMPFS 127680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 128680d794bSakpm@linux-foundation.org { 129ca79b0c2SArun KS return totalram_pages() / 2; 130680d794bSakpm@linux-foundation.org } 131680d794bSakpm@linux-foundation.org 132680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 133680d794bSakpm@linux-foundation.org { 134ca79b0c2SArun KS unsigned long nr_pages = totalram_pages(); 135ca79b0c2SArun KS 136ca79b0c2SArun KS return min(nr_pages - totalhigh_pages(), nr_pages / 2); 137680d794bSakpm@linux-foundation.org } 138b76db735SAndrew Morton #endif 139680d794bSakpm@linux-foundation.org 140bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 141bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 142bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index); 143c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index, 144c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, 145c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 146c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type); 14768da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1489e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, 149cfda0526SMike Rapoport gfp_t gfp, struct vm_area_struct *vma, 1502b740303SSouptick Joarder struct vm_fault *vmf, vm_fault_t *fault_type); 15168da9f05SHugh Dickins 152f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index, 1539e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp) 15468da9f05SHugh Dickins { 15568da9f05SHugh Dickins return shmem_getpage_gfp(inode, index, pagep, sgp, 156cfda0526SMike Rapoport mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 15768da9f05SHugh Dickins } 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1601da177e4SLinus Torvalds { 1611da177e4SLinus Torvalds return sb->s_fs_info; 1621da177e4SLinus Torvalds } 1631da177e4SLinus Torvalds 1641da177e4SLinus Torvalds /* 1651da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1661da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1671da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1681da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1691da177e4SLinus Torvalds */ 1701da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1711da177e4SLinus Torvalds { 1720b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 173191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1741da177e4SLinus Torvalds } 1751da177e4SLinus Torvalds 1761da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1771da177e4SLinus Torvalds { 1780b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1791da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds 18277142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 18377142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 18477142517SKonstantin Khlebnikov { 18577142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 18677142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 18777142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 18877142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 18977142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 19077142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 19177142517SKonstantin Khlebnikov } 19277142517SKonstantin Khlebnikov return 0; 19377142517SKonstantin Khlebnikov } 19477142517SKonstantin Khlebnikov 1951da177e4SLinus Torvalds /* 1961da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 19775edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 1981da177e4SLinus Torvalds * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1991da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 2001da177e4SLinus Torvalds */ 201800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 2021da177e4SLinus Torvalds { 203800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 204800d8c63SKirill A. Shutemov return 0; 205800d8c63SKirill A. Shutemov 206800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 207800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 2081da177e4SLinus Torvalds } 2091da177e4SLinus Torvalds 2101da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 2111da177e4SLinus Torvalds { 2120b0a0806SHugh Dickins if (flags & VM_NORESERVE) 21309cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 2141da177e4SLinus Torvalds } 2151da177e4SLinus Torvalds 2160f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 2170f079694SMike Rapoport { 2180f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2190f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2200f079694SMike Rapoport 2210f079694SMike Rapoport if (shmem_acct_block(info->flags, pages)) 2220f079694SMike Rapoport return false; 2230f079694SMike Rapoport 2240f079694SMike Rapoport if (sbinfo->max_blocks) { 2250f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2260f079694SMike Rapoport sbinfo->max_blocks - pages) > 0) 2270f079694SMike Rapoport goto unacct; 2280f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages); 2290f079694SMike Rapoport } 2300f079694SMike Rapoport 2310f079694SMike Rapoport return true; 2320f079694SMike Rapoport 2330f079694SMike Rapoport unacct: 2340f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2350f079694SMike Rapoport return false; 2360f079694SMike Rapoport } 2370f079694SMike Rapoport 2380f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 2390f079694SMike Rapoport { 2400f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2410f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2420f079694SMike Rapoport 2430f079694SMike Rapoport if (sbinfo->max_blocks) 2440f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages); 2450f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2460f079694SMike Rapoport } 2470f079694SMike Rapoport 248759b9775SHugh Dickins static const struct super_operations shmem_ops; 24930e6a51dSHui Su const struct address_space_operations shmem_aops; 25015ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 25192e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 25292e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 25392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 254f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 255779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 2561da177e4SLinus Torvalds 257b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma) 258b0506e48SMike Rapoport { 259b0506e48SMike Rapoport return vma->vm_ops == &shmem_vm_ops; 260b0506e48SMike Rapoport } 261b0506e48SMike Rapoport 2621da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 263cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 2641da177e4SLinus Torvalds 265e809d5f0SChris Down /* 266e809d5f0SChris Down * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 267e809d5f0SChris Down * produces a novel ino for the newly allocated inode. 268e809d5f0SChris Down * 269e809d5f0SChris Down * It may also be called when making a hard link to permit the space needed by 270e809d5f0SChris Down * each dentry. However, in that case, no new inode number is needed since that 271e809d5f0SChris Down * internally draws from another pool of inode numbers (currently global 272e809d5f0SChris Down * get_next_ino()). This case is indicated by passing NULL as inop. 273e809d5f0SChris Down */ 274e809d5f0SChris Down #define SHMEM_INO_BATCH 1024 275e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 2765b04c689SPavel Emelyanov { 2775b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 278e809d5f0SChris Down ino_t ino; 279e809d5f0SChris Down 280e809d5f0SChris Down if (!(sb->s_flags & SB_KERNMOUNT)) { 2815b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 282bb3e96d6SByron Stanoszek if (sbinfo->max_inodes) { 2835b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 2845b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2855b04c689SPavel Emelyanov return -ENOSPC; 2865b04c689SPavel Emelyanov } 2875b04c689SPavel Emelyanov sbinfo->free_inodes--; 288bb3e96d6SByron Stanoszek } 289e809d5f0SChris Down if (inop) { 290e809d5f0SChris Down ino = sbinfo->next_ino++; 291e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 292e809d5f0SChris Down ino = sbinfo->next_ino++; 293ea3271f7SChris Down if (unlikely(!sbinfo->full_inums && 294ea3271f7SChris Down ino > UINT_MAX)) { 295e809d5f0SChris Down /* 296e809d5f0SChris Down * Emulate get_next_ino uint wraparound for 297e809d5f0SChris Down * compatibility 298e809d5f0SChris Down */ 299ea3271f7SChris Down if (IS_ENABLED(CONFIG_64BIT)) 300ea3271f7SChris Down pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 301ea3271f7SChris Down __func__, MINOR(sb->s_dev)); 302ea3271f7SChris Down sbinfo->next_ino = 1; 303ea3271f7SChris Down ino = sbinfo->next_ino++; 3045b04c689SPavel Emelyanov } 305e809d5f0SChris Down *inop = ino; 306e809d5f0SChris Down } 307e809d5f0SChris Down spin_unlock(&sbinfo->stat_lock); 308e809d5f0SChris Down } else if (inop) { 309e809d5f0SChris Down /* 310e809d5f0SChris Down * __shmem_file_setup, one of our callers, is lock-free: it 311e809d5f0SChris Down * doesn't hold stat_lock in shmem_reserve_inode since 312e809d5f0SChris Down * max_inodes is always 0, and is called from potentially 313e809d5f0SChris Down * unknown contexts. As such, use a per-cpu batched allocator 314e809d5f0SChris Down * which doesn't require the per-sb stat_lock unless we are at 315e809d5f0SChris Down * the batch boundary. 316ea3271f7SChris Down * 317ea3271f7SChris Down * We don't need to worry about inode{32,64} since SB_KERNMOUNT 318ea3271f7SChris Down * shmem mounts are not exposed to userspace, so we don't need 319ea3271f7SChris Down * to worry about things like glibc compatibility. 320e809d5f0SChris Down */ 321e809d5f0SChris Down ino_t *next_ino; 322e809d5f0SChris Down next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 323e809d5f0SChris Down ino = *next_ino; 324e809d5f0SChris Down if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 325e809d5f0SChris Down spin_lock(&sbinfo->stat_lock); 326e809d5f0SChris Down ino = sbinfo->next_ino; 327e809d5f0SChris Down sbinfo->next_ino += SHMEM_INO_BATCH; 328e809d5f0SChris Down spin_unlock(&sbinfo->stat_lock); 329e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 330e809d5f0SChris Down ino++; 331e809d5f0SChris Down } 332e809d5f0SChris Down *inop = ino; 333e809d5f0SChris Down *next_ino = ++ino; 334e809d5f0SChris Down put_cpu(); 335e809d5f0SChris Down } 336e809d5f0SChris Down 3375b04c689SPavel Emelyanov return 0; 3385b04c689SPavel Emelyanov } 3395b04c689SPavel Emelyanov 3405b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 3415b04c689SPavel Emelyanov { 3425b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3435b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 3445b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 3455b04c689SPavel Emelyanov sbinfo->free_inodes++; 3465b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 3475b04c689SPavel Emelyanov } 3485b04c689SPavel Emelyanov } 3495b04c689SPavel Emelyanov 35046711810SRandy Dunlap /** 35141ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 3521da177e4SLinus Torvalds * @inode: inode to recalc 3531da177e4SLinus Torvalds * 3541da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 3551da177e4SLinus Torvalds * undirtied hole pages behind our back. 3561da177e4SLinus Torvalds * 3571da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 3581da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 3591da177e4SLinus Torvalds * 3601da177e4SLinus Torvalds * It has to be called with the spinlock held. 3611da177e4SLinus Torvalds */ 3621da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 3631da177e4SLinus Torvalds { 3641da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 3651da177e4SLinus Torvalds long freed; 3661da177e4SLinus Torvalds 3671da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 3681da177e4SLinus Torvalds if (freed > 0) { 3691da177e4SLinus Torvalds info->alloced -= freed; 37054af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 3710f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed); 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds } 3741da177e4SLinus Torvalds 375800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 376800d8c63SKirill A. Shutemov { 377800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3784595ef88SKirill A. Shutemov unsigned long flags; 379800d8c63SKirill A. Shutemov 3800f079694SMike Rapoport if (!shmem_inode_acct_block(inode, pages)) 381800d8c63SKirill A. Shutemov return false; 382b1cc94abSMike Rapoport 383aaa52e34SHugh Dickins /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 384aaa52e34SHugh Dickins inode->i_mapping->nrpages += pages; 385aaa52e34SHugh Dickins 3864595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 387800d8c63SKirill A. Shutemov info->alloced += pages; 388800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 389800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3904595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 391800d8c63SKirill A. Shutemov 392800d8c63SKirill A. Shutemov return true; 393800d8c63SKirill A. Shutemov } 394800d8c63SKirill A. Shutemov 395800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 396800d8c63SKirill A. Shutemov { 397800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3984595ef88SKirill A. Shutemov unsigned long flags; 399800d8c63SKirill A. Shutemov 400aaa52e34SHugh Dickins /* nrpages adjustment done by __delete_from_page_cache() or caller */ 401aaa52e34SHugh Dickins 4024595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 403800d8c63SKirill A. Shutemov info->alloced -= pages; 404800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 405800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 4064595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 407800d8c63SKirill A. Shutemov 4080f079694SMike Rapoport shmem_inode_unacct_blocks(inode, pages); 409800d8c63SKirill A. Shutemov } 410800d8c63SKirill A. Shutemov 4117a5d0fbbSHugh Dickins /* 41262f945b6SMatthew Wilcox * Replace item expected in xarray by a new item, while holding xa_lock. 4137a5d0fbbSHugh Dickins */ 41462f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping, 4157a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 4167a5d0fbbSHugh Dickins { 41762f945b6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 4186dbaf22cSJohannes Weiner void *item; 4197a5d0fbbSHugh Dickins 4207a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 4216dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 42262f945b6SMatthew Wilcox item = xas_load(&xas); 4237a5d0fbbSHugh Dickins if (item != expected) 4247a5d0fbbSHugh Dickins return -ENOENT; 42562f945b6SMatthew Wilcox xas_store(&xas, replacement); 4267a5d0fbbSHugh Dickins return 0; 4277a5d0fbbSHugh Dickins } 4287a5d0fbbSHugh Dickins 4297a5d0fbbSHugh Dickins /* 430d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 431d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 432d1899228SHugh Dickins * 433d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 434d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 435d1899228SHugh Dickins */ 436d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 437d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 438d1899228SHugh Dickins { 439a12831bfSMatthew Wilcox return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 440d1899228SHugh Dickins } 441d1899228SHugh Dickins 442d1899228SHugh Dickins /* 4435a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 4445a6e75f8SKirill A. Shutemov * 4455a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 4465a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 4475a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 4485a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 4495a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 4505a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 4515a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 4525a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 4535a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 4545a6e75f8SKirill A. Shutemov */ 4555a6e75f8SKirill A. Shutemov 4565a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 4575a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 4585a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 4595a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 4605a6e75f8SKirill A. Shutemov 4615a6e75f8SKirill A. Shutemov /* 4625a6e75f8SKirill A. Shutemov * Special values. 4635a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 4645a6e75f8SKirill A. Shutemov * 4655a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 4665a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 4675a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 4685a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 4695a6e75f8SKirill A. Shutemov * 4705a6e75f8SKirill A. Shutemov */ 4715a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 4725a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 4735a6e75f8SKirill A. Shutemov 474396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4755a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 4765a6e75f8SKirill A. Shutemov 4775b9c98f3SMike Kravetz static int shmem_huge __read_mostly; 4785a6e75f8SKirill A. Shutemov 479e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) 4805a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 4815a6e75f8SKirill A. Shutemov { 4825a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 4835a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 4845a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 4855a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 4865a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 4875a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 4885a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 4895a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 4905a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 4915a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 4925a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 4935a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 4945a6e75f8SKirill A. Shutemov return -EINVAL; 4955a6e75f8SKirill A. Shutemov } 496e5f2249aSArnd Bergmann #endif 4975a6e75f8SKirill A. Shutemov 498e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 4995a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 5005a6e75f8SKirill A. Shutemov { 5015a6e75f8SKirill A. Shutemov switch (huge) { 5025a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 5035a6e75f8SKirill A. Shutemov return "never"; 5045a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 5055a6e75f8SKirill A. Shutemov return "always"; 5065a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 5075a6e75f8SKirill A. Shutemov return "within_size"; 5085a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 5095a6e75f8SKirill A. Shutemov return "advise"; 5105a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 5115a6e75f8SKirill A. Shutemov return "deny"; 5125a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 5135a6e75f8SKirill A. Shutemov return "force"; 5145a6e75f8SKirill A. Shutemov default: 5155a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 5165a6e75f8SKirill A. Shutemov return "bad_val"; 5175a6e75f8SKirill A. Shutemov } 5185a6e75f8SKirill A. Shutemov } 519f1f5929cSJérémy Lefaure #endif 5205a6e75f8SKirill A. Shutemov 521779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 522779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 523779750d2SKirill A. Shutemov { 524779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 525253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 526779750d2SKirill A. Shutemov struct inode *inode; 527779750d2SKirill A. Shutemov struct shmem_inode_info *info; 528779750d2SKirill A. Shutemov struct page *page; 529779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 530779750d2SKirill A. Shutemov int removed = 0, split = 0; 531779750d2SKirill A. Shutemov 532779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 533779750d2SKirill A. Shutemov return SHRINK_STOP; 534779750d2SKirill A. Shutemov 535779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 536779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 537779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 538779750d2SKirill A. Shutemov 539779750d2SKirill A. Shutemov /* pin the inode */ 540779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 541779750d2SKirill A. Shutemov 542779750d2SKirill A. Shutemov /* inode is about to be evicted */ 543779750d2SKirill A. Shutemov if (!inode) { 544779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 545779750d2SKirill A. Shutemov removed++; 546779750d2SKirill A. Shutemov goto next; 547779750d2SKirill A. Shutemov } 548779750d2SKirill A. Shutemov 549779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 550779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 551779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 552253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 553779750d2SKirill A. Shutemov removed++; 554779750d2SKirill A. Shutemov goto next; 555779750d2SKirill A. Shutemov } 556779750d2SKirill A. Shutemov 557779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 558779750d2SKirill A. Shutemov next: 559779750d2SKirill A. Shutemov if (!--batch) 560779750d2SKirill A. Shutemov break; 561779750d2SKirill A. Shutemov } 562779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 563779750d2SKirill A. Shutemov 564253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 565253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 566253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 567253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 568253fd0f0SKirill A. Shutemov iput(inode); 569253fd0f0SKirill A. Shutemov } 570253fd0f0SKirill A. Shutemov 571779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 572779750d2SKirill A. Shutemov int ret; 573779750d2SKirill A. Shutemov 574779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 575779750d2SKirill A. Shutemov inode = &info->vfs_inode; 576779750d2SKirill A. Shutemov 577b3cd54b2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) 578b3cd54b2SKirill A. Shutemov goto leave; 579779750d2SKirill A. Shutemov 580b3cd54b2SKirill A. Shutemov page = find_get_page(inode->i_mapping, 581779750d2SKirill A. Shutemov (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 582779750d2SKirill A. Shutemov if (!page) 583779750d2SKirill A. Shutemov goto drop; 584779750d2SKirill A. Shutemov 585b3cd54b2SKirill A. Shutemov /* No huge page at the end of the file: nothing to split */ 586779750d2SKirill A. Shutemov if (!PageTransHuge(page)) { 587779750d2SKirill A. Shutemov put_page(page); 588779750d2SKirill A. Shutemov goto drop; 589779750d2SKirill A. Shutemov } 590779750d2SKirill A. Shutemov 591b3cd54b2SKirill A. Shutemov /* 592b3cd54b2SKirill A. Shutemov * Leave the inode on the list if we failed to lock 593b3cd54b2SKirill A. Shutemov * the page at this time. 594b3cd54b2SKirill A. Shutemov * 595b3cd54b2SKirill A. Shutemov * Waiting for the lock may lead to deadlock in the 596b3cd54b2SKirill A. Shutemov * reclaim path. 597b3cd54b2SKirill A. Shutemov */ 598b3cd54b2SKirill A. Shutemov if (!trylock_page(page)) { 599b3cd54b2SKirill A. Shutemov put_page(page); 600b3cd54b2SKirill A. Shutemov goto leave; 601b3cd54b2SKirill A. Shutemov } 602b3cd54b2SKirill A. Shutemov 603779750d2SKirill A. Shutemov ret = split_huge_page(page); 604779750d2SKirill A. Shutemov unlock_page(page); 605779750d2SKirill A. Shutemov put_page(page); 606779750d2SKirill A. Shutemov 607b3cd54b2SKirill A. Shutemov /* If split failed leave the inode on the list */ 608b3cd54b2SKirill A. Shutemov if (ret) 609b3cd54b2SKirill A. Shutemov goto leave; 610779750d2SKirill A. Shutemov 611779750d2SKirill A. Shutemov split++; 612779750d2SKirill A. Shutemov drop: 613779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 614779750d2SKirill A. Shutemov removed++; 615b3cd54b2SKirill A. Shutemov leave: 616779750d2SKirill A. Shutemov iput(inode); 617779750d2SKirill A. Shutemov } 618779750d2SKirill A. Shutemov 619779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 620779750d2SKirill A. Shutemov list_splice_tail(&list, &sbinfo->shrinklist); 621779750d2SKirill A. Shutemov sbinfo->shrinklist_len -= removed; 622779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 623779750d2SKirill A. Shutemov 624779750d2SKirill A. Shutemov return split; 625779750d2SKirill A. Shutemov } 626779750d2SKirill A. Shutemov 627779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 628779750d2SKirill A. Shutemov struct shrink_control *sc) 629779750d2SKirill A. Shutemov { 630779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 631779750d2SKirill A. Shutemov 632779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 633779750d2SKirill A. Shutemov return SHRINK_STOP; 634779750d2SKirill A. Shutemov 635779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 636779750d2SKirill A. Shutemov } 637779750d2SKirill A. Shutemov 638779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 639779750d2SKirill A. Shutemov struct shrink_control *sc) 640779750d2SKirill A. Shutemov { 641779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 642779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 643779750d2SKirill A. Shutemov } 644396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 6455a6e75f8SKirill A. Shutemov 6465a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 6475a6e75f8SKirill A. Shutemov 648779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 649779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 650779750d2SKirill A. Shutemov { 651779750d2SKirill A. Shutemov return 0; 652779750d2SKirill A. Shutemov } 653396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 6545a6e75f8SKirill A. Shutemov 65589fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) 65689fdcd26SYang Shi { 657396bcc52SMatthew Wilcox (Oracle) if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 65889fdcd26SYang Shi (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && 65989fdcd26SYang Shi shmem_huge != SHMEM_HUGE_DENY) 66089fdcd26SYang Shi return true; 66189fdcd26SYang Shi return false; 66289fdcd26SYang Shi } 66389fdcd26SYang Shi 6645a6e75f8SKirill A. Shutemov /* 66546f65ec1SHugh Dickins * Like add_to_page_cache_locked, but error if expected item has gone. 66646f65ec1SHugh Dickins */ 66746f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page, 66846f65ec1SHugh Dickins struct address_space *mapping, 6693fea5a49SJohannes Weiner pgoff_t index, void *expected, gfp_t gfp, 6703fea5a49SJohannes Weiner struct mm_struct *charge_mm) 67146f65ec1SHugh Dickins { 672552446a4SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 673552446a4SMatthew Wilcox unsigned long i = 0; 674d8c6546bSMatthew Wilcox (Oracle) unsigned long nr = compound_nr(page); 6753fea5a49SJohannes Weiner int error; 67646f65ec1SHugh Dickins 677800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 678800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(index != round_down(index, nr), page); 679309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 680309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 681800d8c63SKirill A. Shutemov VM_BUG_ON(expected && PageTransHuge(page)); 68246f65ec1SHugh Dickins 683800d8c63SKirill A. Shutemov page_ref_add(page, nr); 68446f65ec1SHugh Dickins page->mapping = mapping; 68546f65ec1SHugh Dickins page->index = index; 68646f65ec1SHugh Dickins 6874c6355b2SJohannes Weiner if (!PageSwapCache(page)) { 688d9eb1ea2SJohannes Weiner error = mem_cgroup_charge(page, charge_mm, gfp); 6893fea5a49SJohannes Weiner if (error) { 6904c6355b2SJohannes Weiner if (PageTransHuge(page)) { 6913fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK); 6923fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK_CHARGE); 6933fea5a49SJohannes Weiner } 6943fea5a49SJohannes Weiner goto error; 6953fea5a49SJohannes Weiner } 6964c6355b2SJohannes Weiner } 6973fea5a49SJohannes Weiner cgroup_throttle_swaprate(page, gfp); 6983fea5a49SJohannes Weiner 699552446a4SMatthew Wilcox do { 700552446a4SMatthew Wilcox void *entry; 701552446a4SMatthew Wilcox xas_lock_irq(&xas); 702552446a4SMatthew Wilcox entry = xas_find_conflict(&xas); 703552446a4SMatthew Wilcox if (entry != expected) 704552446a4SMatthew Wilcox xas_set_err(&xas, -EEXIST); 705552446a4SMatthew Wilcox xas_create_range(&xas); 706552446a4SMatthew Wilcox if (xas_error(&xas)) 707552446a4SMatthew Wilcox goto unlock; 708552446a4SMatthew Wilcox next: 7094101196bSMatthew Wilcox (Oracle) xas_store(&xas, page); 710552446a4SMatthew Wilcox if (++i < nr) { 711552446a4SMatthew Wilcox xas_next(&xas); 712552446a4SMatthew Wilcox goto next; 713552446a4SMatthew Wilcox } 714800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 715800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 716b8eddff8SJohannes Weiner __inc_lruvec_page_state(page, NR_SHMEM_THPS); 717552446a4SMatthew Wilcox } 718552446a4SMatthew Wilcox mapping->nrpages += nr; 7190d1c2072SJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_PAGES, nr); 7200d1c2072SJohannes Weiner __mod_lruvec_page_state(page, NR_SHMEM, nr); 721552446a4SMatthew Wilcox unlock: 722552446a4SMatthew Wilcox xas_unlock_irq(&xas); 723552446a4SMatthew Wilcox } while (xas_nomem(&xas, gfp)); 724552446a4SMatthew Wilcox 725552446a4SMatthew Wilcox if (xas_error(&xas)) { 7263fea5a49SJohannes Weiner error = xas_error(&xas); 7273fea5a49SJohannes Weiner goto error; 72846f65ec1SHugh Dickins } 729552446a4SMatthew Wilcox 730552446a4SMatthew Wilcox return 0; 7313fea5a49SJohannes Weiner error: 7323fea5a49SJohannes Weiner page->mapping = NULL; 7333fea5a49SJohannes Weiner page_ref_sub(page, nr); 7343fea5a49SJohannes Weiner return error; 73546f65ec1SHugh Dickins } 73646f65ec1SHugh Dickins 73746f65ec1SHugh Dickins /* 7386922c0c7SHugh Dickins * Like delete_from_page_cache, but substitutes swap for page. 7396922c0c7SHugh Dickins */ 7406922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap) 7416922c0c7SHugh Dickins { 7426922c0c7SHugh Dickins struct address_space *mapping = page->mapping; 7436922c0c7SHugh Dickins int error; 7446922c0c7SHugh Dickins 745800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 746800d8c63SKirill A. Shutemov 747b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 74862f945b6SMatthew Wilcox error = shmem_replace_entry(mapping, page->index, page, radswap); 7496922c0c7SHugh Dickins page->mapping = NULL; 7506922c0c7SHugh Dickins mapping->nrpages--; 7510d1c2072SJohannes Weiner __dec_lruvec_page_state(page, NR_FILE_PAGES); 7520d1c2072SJohannes Weiner __dec_lruvec_page_state(page, NR_SHMEM); 753b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 75409cbfeafSKirill A. Shutemov put_page(page); 7556922c0c7SHugh Dickins BUG_ON(error); 7566922c0c7SHugh Dickins } 7576922c0c7SHugh Dickins 7586922c0c7SHugh Dickins /* 759c121d3bbSMatthew Wilcox * Remove swap entry from page cache, free the swap and its page cache. 7607a5d0fbbSHugh Dickins */ 7617a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 7627a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 7637a5d0fbbSHugh Dickins { 7646dbaf22cSJohannes Weiner void *old; 7657a5d0fbbSHugh Dickins 76655f3f7eaSMatthew Wilcox old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 7676dbaf22cSJohannes Weiner if (old != radswap) 7686dbaf22cSJohannes Weiner return -ENOENT; 7697a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 7706dbaf22cSJohannes Weiner return 0; 7717a5d0fbbSHugh Dickins } 7727a5d0fbbSHugh Dickins 7737a5d0fbbSHugh Dickins /* 7746a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 77548131e03SVlastimil Babka * given offsets are swapped out. 7766a15a370SVlastimil Babka * 777b93b0163SMatthew Wilcox * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 7786a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 7796a15a370SVlastimil Babka */ 78048131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 78148131e03SVlastimil Babka pgoff_t start, pgoff_t end) 7826a15a370SVlastimil Babka { 7837ae3424fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 7846a15a370SVlastimil Babka struct page *page; 78548131e03SVlastimil Babka unsigned long swapped = 0; 7866a15a370SVlastimil Babka 7876a15a370SVlastimil Babka rcu_read_lock(); 7887ae3424fSMatthew Wilcox xas_for_each(&xas, page, end - 1) { 7897ae3424fSMatthew Wilcox if (xas_retry(&xas, page)) 7902cf938aaSMatthew Wilcox continue; 7913159f943SMatthew Wilcox if (xa_is_value(page)) 7926a15a370SVlastimil Babka swapped++; 7936a15a370SVlastimil Babka 7946a15a370SVlastimil Babka if (need_resched()) { 7957ae3424fSMatthew Wilcox xas_pause(&xas); 7966a15a370SVlastimil Babka cond_resched_rcu(); 7976a15a370SVlastimil Babka } 7986a15a370SVlastimil Babka } 7996a15a370SVlastimil Babka 8006a15a370SVlastimil Babka rcu_read_unlock(); 8016a15a370SVlastimil Babka 8026a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 8036a15a370SVlastimil Babka } 8046a15a370SVlastimil Babka 8056a15a370SVlastimil Babka /* 80648131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 80748131e03SVlastimil Babka * given vma is swapped out. 80848131e03SVlastimil Babka * 809b93b0163SMatthew Wilcox * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 81048131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 81148131e03SVlastimil Babka */ 81248131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 81348131e03SVlastimil Babka { 81448131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 81548131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 81648131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 81748131e03SVlastimil Babka unsigned long swapped; 81848131e03SVlastimil Babka 81948131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 82048131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 82148131e03SVlastimil Babka 82248131e03SVlastimil Babka /* 82348131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 82448131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 82548131e03SVlastimil Babka * already track. 82648131e03SVlastimil Babka */ 82748131e03SVlastimil Babka if (!swapped) 82848131e03SVlastimil Babka return 0; 82948131e03SVlastimil Babka 83048131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 83148131e03SVlastimil Babka return swapped << PAGE_SHIFT; 83248131e03SVlastimil Babka 83348131e03SVlastimil Babka /* Here comes the more involved part */ 83448131e03SVlastimil Babka return shmem_partial_swap_usage(mapping, 83548131e03SVlastimil Babka linear_page_index(vma, vma->vm_start), 83648131e03SVlastimil Babka linear_page_index(vma, vma->vm_end)); 83748131e03SVlastimil Babka } 83848131e03SVlastimil Babka 83948131e03SVlastimil Babka /* 84024513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 84124513264SHugh Dickins */ 84224513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 84324513264SHugh Dickins { 84424513264SHugh Dickins struct pagevec pvec; 84524513264SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 84624513264SHugh Dickins pgoff_t index = 0; 84724513264SHugh Dickins 84886679820SMel Gorman pagevec_init(&pvec); 84924513264SHugh Dickins /* 85024513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 85124513264SHugh Dickins */ 85224513264SHugh Dickins while (!mapping_unevictable(mapping)) { 85324513264SHugh Dickins /* 85424513264SHugh Dickins * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 85524513264SHugh Dickins * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 85624513264SHugh Dickins */ 8570cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 85824513264SHugh Dickins PAGEVEC_SIZE, pvec.pages, indices); 85924513264SHugh Dickins if (!pvec.nr) 86024513264SHugh Dickins break; 86124513264SHugh Dickins index = indices[pvec.nr - 1] + 1; 8620cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 86364e3d12fSKuo-Hsin Yang check_move_unevictable_pages(&pvec); 86424513264SHugh Dickins pagevec_release(&pvec); 86524513264SHugh Dickins cond_resched(); 86624513264SHugh Dickins } 8677a5d0fbbSHugh Dickins } 8687a5d0fbbSHugh Dickins 8697a5d0fbbSHugh Dickins /* 87071725ed1SHugh Dickins * Check whether a hole-punch or truncation needs to split a huge page, 87171725ed1SHugh Dickins * returning true if no split was required, or the split has been successful. 87271725ed1SHugh Dickins * 87371725ed1SHugh Dickins * Eviction (or truncation to 0 size) should never need to split a huge page; 87471725ed1SHugh Dickins * but in rare cases might do so, if shmem_undo_range() failed to trylock on 87571725ed1SHugh Dickins * head, and then succeeded to trylock on tail. 87671725ed1SHugh Dickins * 87771725ed1SHugh Dickins * A split can only succeed when there are no additional references on the 87871725ed1SHugh Dickins * huge page: so the split below relies upon find_get_entries() having stopped 87971725ed1SHugh Dickins * when it found a subpage of the huge page, without getting further references. 88071725ed1SHugh Dickins */ 88171725ed1SHugh Dickins static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end) 88271725ed1SHugh Dickins { 88371725ed1SHugh Dickins if (!PageTransCompound(page)) 88471725ed1SHugh Dickins return true; 88571725ed1SHugh Dickins 88671725ed1SHugh Dickins /* Just proceed to delete a huge page wholly within the range punched */ 88771725ed1SHugh Dickins if (PageHead(page) && 88871725ed1SHugh Dickins page->index >= start && page->index + HPAGE_PMD_NR <= end) 88971725ed1SHugh Dickins return true; 89071725ed1SHugh Dickins 89171725ed1SHugh Dickins /* Try to split huge page, so we can truly punch the hole or truncate */ 89271725ed1SHugh Dickins return split_huge_page(page) >= 0; 89371725ed1SHugh Dickins } 89471725ed1SHugh Dickins 89571725ed1SHugh Dickins /* 8967f4446eeSMatthew Wilcox * Remove range of pages and swap entries from page cache, and free them. 8971635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 8987a5d0fbbSHugh Dickins */ 8991635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 9001635f6a7SHugh Dickins bool unfalloc) 9011da177e4SLinus Torvalds { 902285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 9031da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 90409cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 90509cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 90609cbfeafSKirill A. Shutemov unsigned int partial_start = lstart & (PAGE_SIZE - 1); 90709cbfeafSKirill A. Shutemov unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); 908bda97eabSHugh Dickins struct pagevec pvec; 9097a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 9107a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 911285b2c4fSHugh Dickins pgoff_t index; 912bda97eabSHugh Dickins int i; 9131da177e4SLinus Torvalds 91483e4fa9cSHugh Dickins if (lend == -1) 91583e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 916bda97eabSHugh Dickins 91786679820SMel Gorman pagevec_init(&pvec); 918bda97eabSHugh Dickins index = start; 91983e4fa9cSHugh Dickins while (index < end) { 9200cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 92183e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 9227a5d0fbbSHugh Dickins pvec.pages, indices); 9237a5d0fbbSHugh Dickins if (!pvec.nr) 9247a5d0fbbSHugh Dickins break; 925bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 926bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 927bda97eabSHugh Dickins 9287a5d0fbbSHugh Dickins index = indices[i]; 92983e4fa9cSHugh Dickins if (index >= end) 930bda97eabSHugh Dickins break; 931bda97eabSHugh Dickins 9323159f943SMatthew Wilcox if (xa_is_value(page)) { 9331635f6a7SHugh Dickins if (unfalloc) 9341635f6a7SHugh Dickins continue; 9357a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 9367a5d0fbbSHugh Dickins index, page); 9377a5d0fbbSHugh Dickins continue; 9387a5d0fbbSHugh Dickins } 9397a5d0fbbSHugh Dickins 940800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); 941800d8c63SKirill A. Shutemov 942bda97eabSHugh Dickins if (!trylock_page(page)) 943bda97eabSHugh Dickins continue; 944800d8c63SKirill A. Shutemov 94571725ed1SHugh Dickins if ((!unfalloc || !PageUptodate(page)) && 94671725ed1SHugh Dickins page_mapping(page) == mapping) { 947309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 94871725ed1SHugh Dickins if (shmem_punch_compound(page, start, end)) 949bda97eabSHugh Dickins truncate_inode_page(mapping, page); 9507a5d0fbbSHugh Dickins } 951bda97eabSHugh Dickins unlock_page(page); 952bda97eabSHugh Dickins } 9530cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 95424513264SHugh Dickins pagevec_release(&pvec); 955bda97eabSHugh Dickins cond_resched(); 956bda97eabSHugh Dickins index++; 957bda97eabSHugh Dickins } 958bda97eabSHugh Dickins 95983e4fa9cSHugh Dickins if (partial_start) { 960bda97eabSHugh Dickins struct page *page = NULL; 9619e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, start - 1, &page, SGP_READ); 962bda97eabSHugh Dickins if (page) { 96309cbfeafSKirill A. Shutemov unsigned int top = PAGE_SIZE; 96483e4fa9cSHugh Dickins if (start > end) { 96583e4fa9cSHugh Dickins top = partial_end; 96683e4fa9cSHugh Dickins partial_end = 0; 96783e4fa9cSHugh Dickins } 96883e4fa9cSHugh Dickins zero_user_segment(page, partial_start, top); 969bda97eabSHugh Dickins set_page_dirty(page); 970bda97eabSHugh Dickins unlock_page(page); 97109cbfeafSKirill A. Shutemov put_page(page); 972bda97eabSHugh Dickins } 973bda97eabSHugh Dickins } 97483e4fa9cSHugh Dickins if (partial_end) { 97583e4fa9cSHugh Dickins struct page *page = NULL; 9769e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, end, &page, SGP_READ); 97783e4fa9cSHugh Dickins if (page) { 97883e4fa9cSHugh Dickins zero_user_segment(page, 0, partial_end); 97983e4fa9cSHugh Dickins set_page_dirty(page); 98083e4fa9cSHugh Dickins unlock_page(page); 98109cbfeafSKirill A. Shutemov put_page(page); 98283e4fa9cSHugh Dickins } 98383e4fa9cSHugh Dickins } 98483e4fa9cSHugh Dickins if (start >= end) 98583e4fa9cSHugh Dickins return; 986bda97eabSHugh Dickins 987bda97eabSHugh Dickins index = start; 988b1a36650SHugh Dickins while (index < end) { 989bda97eabSHugh Dickins cond_resched(); 9900cd6144aSJohannes Weiner 9910cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 99283e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 9937a5d0fbbSHugh Dickins pvec.pages, indices); 9947a5d0fbbSHugh Dickins if (!pvec.nr) { 995b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 996b1a36650SHugh Dickins if (index == start || end != -1) 997bda97eabSHugh Dickins break; 998b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 999bda97eabSHugh Dickins index = start; 1000bda97eabSHugh Dickins continue; 1001bda97eabSHugh Dickins } 1002bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 1003bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 1004bda97eabSHugh Dickins 10057a5d0fbbSHugh Dickins index = indices[i]; 100683e4fa9cSHugh Dickins if (index >= end) 1007bda97eabSHugh Dickins break; 1008bda97eabSHugh Dickins 10093159f943SMatthew Wilcox if (xa_is_value(page)) { 10101635f6a7SHugh Dickins if (unfalloc) 10111635f6a7SHugh Dickins continue; 1012b1a36650SHugh Dickins if (shmem_free_swap(mapping, index, page)) { 1013b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 1014b1a36650SHugh Dickins index--; 1015b1a36650SHugh Dickins break; 1016b1a36650SHugh Dickins } 1017b1a36650SHugh Dickins nr_swaps_freed++; 10187a5d0fbbSHugh Dickins continue; 10197a5d0fbbSHugh Dickins } 10207a5d0fbbSHugh Dickins 1021bda97eabSHugh Dickins lock_page(page); 1022800d8c63SKirill A. Shutemov 10231635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 102471725ed1SHugh Dickins if (page_mapping(page) != mapping) { 1025b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 1026b1a36650SHugh Dickins unlock_page(page); 1027b1a36650SHugh Dickins index--; 1028b1a36650SHugh Dickins break; 10297a5d0fbbSHugh Dickins } 103071725ed1SHugh Dickins VM_BUG_ON_PAGE(PageWriteback(page), page); 103171725ed1SHugh Dickins if (shmem_punch_compound(page, start, end)) 103271725ed1SHugh Dickins truncate_inode_page(mapping, page); 10330783ac95SHugh Dickins else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 103471725ed1SHugh Dickins /* Wipe the page and don't get stuck */ 103571725ed1SHugh Dickins clear_highpage(page); 103671725ed1SHugh Dickins flush_dcache_page(page); 103771725ed1SHugh Dickins set_page_dirty(page); 103871725ed1SHugh Dickins if (index < 103971725ed1SHugh Dickins round_up(start, HPAGE_PMD_NR)) 104071725ed1SHugh Dickins start = index + 1; 104171725ed1SHugh Dickins } 10421635f6a7SHugh Dickins } 1043bda97eabSHugh Dickins unlock_page(page); 1044bda97eabSHugh Dickins } 10450cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 104624513264SHugh Dickins pagevec_release(&pvec); 1047bda97eabSHugh Dickins index++; 1048bda97eabSHugh Dickins } 104994c1e62dSHugh Dickins 10504595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 10517a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 10521da177e4SLinus Torvalds shmem_recalc_inode(inode); 10534595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 10541635f6a7SHugh Dickins } 10551da177e4SLinus Torvalds 10561635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 10571635f6a7SHugh Dickins { 10581635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 1059078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 10601da177e4SLinus Torvalds } 106194c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 10621da177e4SLinus Torvalds 1063*549c7297SChristian Brauner static int shmem_getattr(struct user_namespace *mnt_userns, 1064*549c7297SChristian Brauner const struct path *path, struct kstat *stat, 1065a528d35eSDavid Howells u32 request_mask, unsigned int query_flags) 106644a30220SYu Zhao { 1067a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode; 106844a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 106989fdcd26SYang Shi struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb); 107044a30220SYu Zhao 1071d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 10724595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 107344a30220SYu Zhao shmem_recalc_inode(inode); 10744595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1075d0424c42SHugh Dickins } 10760d56a451SChristian Brauner generic_fillattr(&init_user_ns, inode, stat); 107789fdcd26SYang Shi 107889fdcd26SYang Shi if (is_huge_enabled(sb_info)) 107989fdcd26SYang Shi stat->blksize = HPAGE_PMD_SIZE; 108089fdcd26SYang Shi 108144a30220SYu Zhao return 0; 108244a30220SYu Zhao } 108344a30220SYu Zhao 1084*549c7297SChristian Brauner static int shmem_setattr(struct user_namespace *mnt_userns, 1085*549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 10861da177e4SLinus Torvalds { 108775c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 108840e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 1089779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 10901da177e4SLinus Torvalds int error; 10911da177e4SLinus Torvalds 10922f221d6fSChristian Brauner error = setattr_prepare(&init_user_ns, dentry, attr); 1093db78b877SChristoph Hellwig if (error) 1094db78b877SChristoph Hellwig return error; 1095db78b877SChristoph Hellwig 109694c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 109794c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 109894c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 10993889e6e7Snpiggin@suse.de 110040e041a2SDavid Herrmann /* protected by i_mutex */ 110140e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 110240e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 110340e041a2SDavid Herrmann return -EPERM; 110440e041a2SDavid Herrmann 110594c1e62dSHugh Dickins if (newsize != oldsize) { 110677142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 110777142517SKonstantin Khlebnikov oldsize, newsize); 110877142517SKonstantin Khlebnikov if (error) 110977142517SKonstantin Khlebnikov return error; 111094c1e62dSHugh Dickins i_size_write(inode, newsize); 1111078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 111294c1e62dSHugh Dickins } 1113afa2db2fSJosef Bacik if (newsize <= oldsize) { 111494c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 1115d0424c42SHugh Dickins if (oldsize > holebegin) 1116d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1117d0424c42SHugh Dickins holebegin, 0, 1); 1118d0424c42SHugh Dickins if (info->alloced) 1119d0424c42SHugh Dickins shmem_truncate_range(inode, 1120d0424c42SHugh Dickins newsize, (loff_t)-1); 112194c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1122d0424c42SHugh Dickins if (oldsize > holebegin) 1123d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1124d0424c42SHugh Dickins holebegin, 0, 1); 1125779750d2SKirill A. Shutemov 1126779750d2SKirill A. Shutemov /* 1127779750d2SKirill A. Shutemov * Part of the huge page can be beyond i_size: subject 1128779750d2SKirill A. Shutemov * to shrink under memory pressure. 1129779750d2SKirill A. Shutemov */ 1130396bcc52SMatthew Wilcox (Oracle) if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1131779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1132d041353dSCong Wang /* 1133d041353dSCong Wang * _careful to defend against unlocked access to 1134d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1135d041353dSCong Wang */ 1136d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1137779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1138779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1139779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1140779750d2SKirill A. Shutemov } 1141779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1142779750d2SKirill A. Shutemov } 114394c1e62dSHugh Dickins } 11441da177e4SLinus Torvalds } 11451da177e4SLinus Torvalds 11462f221d6fSChristian Brauner setattr_copy(&init_user_ns, inode, attr); 1147db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 1148e65ce2a5SChristian Brauner error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode); 11491da177e4SLinus Torvalds return error; 11501da177e4SLinus Torvalds } 11511da177e4SLinus Torvalds 11521f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 11531da177e4SLinus Torvalds { 11541da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1155779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 11561da177e4SLinus Torvalds 115730e6a51dSHui Su if (shmem_mapping(inode->i_mapping)) { 11581da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 11591da177e4SLinus Torvalds inode->i_size = 0; 11603889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1161779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1162779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1163779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1164779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1165779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1166779750d2SKirill A. Shutemov } 1167779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1168779750d2SKirill A. Shutemov } 1169af53d3e9SHugh Dickins while (!list_empty(&info->swaplist)) { 1170af53d3e9SHugh Dickins /* Wait while shmem_unuse() is scanning this inode... */ 1171af53d3e9SHugh Dickins wait_var_event(&info->stop_eviction, 1172af53d3e9SHugh Dickins !atomic_read(&info->stop_eviction)); 1173cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1174af53d3e9SHugh Dickins /* ...but beware of the race if we peeked too early */ 1175af53d3e9SHugh Dickins if (!atomic_read(&info->stop_eviction)) 11761da177e4SLinus Torvalds list_del_init(&info->swaplist); 1177cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 11781da177e4SLinus Torvalds } 11793ed47db3SAl Viro } 1180b09e0fa4SEric Paris 118138f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 11820f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 11835b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1184dbd5768fSJan Kara clear_inode(inode); 11851da177e4SLinus Torvalds } 11861da177e4SLinus Torvalds 1187b56a2d8aSVineeth Remanan Pillai extern struct swap_info_struct *swap_info[]; 1188b56a2d8aSVineeth Remanan Pillai 1189b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping, 1190b56a2d8aSVineeth Remanan Pillai pgoff_t start, unsigned int nr_entries, 1191b56a2d8aSVineeth Remanan Pillai struct page **entries, pgoff_t *indices, 119287039546SHugh Dickins unsigned int type, bool frontswap) 1193478922e2SMatthew Wilcox { 1194b56a2d8aSVineeth Remanan Pillai XA_STATE(xas, &mapping->i_pages, start); 1195b56a2d8aSVineeth Remanan Pillai struct page *page; 119687039546SHugh Dickins swp_entry_t entry; 1197b56a2d8aSVineeth Remanan Pillai unsigned int ret = 0; 1198b56a2d8aSVineeth Remanan Pillai 1199b56a2d8aSVineeth Remanan Pillai if (!nr_entries) 1200b56a2d8aSVineeth Remanan Pillai return 0; 1201478922e2SMatthew Wilcox 1202478922e2SMatthew Wilcox rcu_read_lock(); 1203b56a2d8aSVineeth Remanan Pillai xas_for_each(&xas, page, ULONG_MAX) { 1204b56a2d8aSVineeth Remanan Pillai if (xas_retry(&xas, page)) 12055b9c98f3SMike Kravetz continue; 1206b56a2d8aSVineeth Remanan Pillai 1207b56a2d8aSVineeth Remanan Pillai if (!xa_is_value(page)) 1208478922e2SMatthew Wilcox continue; 1209b56a2d8aSVineeth Remanan Pillai 121087039546SHugh Dickins entry = radix_to_swp_entry(page); 121187039546SHugh Dickins if (swp_type(entry) != type) 1212b56a2d8aSVineeth Remanan Pillai continue; 121387039546SHugh Dickins if (frontswap && 121487039546SHugh Dickins !frontswap_test(swap_info[type], swp_offset(entry))) 121587039546SHugh Dickins continue; 1216b56a2d8aSVineeth Remanan Pillai 1217b56a2d8aSVineeth Remanan Pillai indices[ret] = xas.xa_index; 1218b56a2d8aSVineeth Remanan Pillai entries[ret] = page; 1219b56a2d8aSVineeth Remanan Pillai 1220b56a2d8aSVineeth Remanan Pillai if (need_resched()) { 1221e21a2955SMatthew Wilcox xas_pause(&xas); 1222478922e2SMatthew Wilcox cond_resched_rcu(); 1223478922e2SMatthew Wilcox } 1224b56a2d8aSVineeth Remanan Pillai if (++ret == nr_entries) 1225b56a2d8aSVineeth Remanan Pillai break; 1226b56a2d8aSVineeth Remanan Pillai } 1227478922e2SMatthew Wilcox rcu_read_unlock(); 1228e21a2955SMatthew Wilcox 1229b56a2d8aSVineeth Remanan Pillai return ret; 1230b56a2d8aSVineeth Remanan Pillai } 1231b56a2d8aSVineeth Remanan Pillai 1232b56a2d8aSVineeth Remanan Pillai /* 1233b56a2d8aSVineeth Remanan Pillai * Move the swapped pages for an inode to page cache. Returns the count 1234b56a2d8aSVineeth Remanan Pillai * of pages swapped in, or the error in case of failure. 1235b56a2d8aSVineeth Remanan Pillai */ 1236b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, 1237b56a2d8aSVineeth Remanan Pillai pgoff_t *indices) 1238b56a2d8aSVineeth Remanan Pillai { 1239b56a2d8aSVineeth Remanan Pillai int i = 0; 1240b56a2d8aSVineeth Remanan Pillai int ret = 0; 1241b56a2d8aSVineeth Remanan Pillai int error = 0; 1242b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1243b56a2d8aSVineeth Remanan Pillai 1244b56a2d8aSVineeth Remanan Pillai for (i = 0; i < pvec.nr; i++) { 1245b56a2d8aSVineeth Remanan Pillai struct page *page = pvec.pages[i]; 1246b56a2d8aSVineeth Remanan Pillai 1247b56a2d8aSVineeth Remanan Pillai if (!xa_is_value(page)) 1248b56a2d8aSVineeth Remanan Pillai continue; 1249b56a2d8aSVineeth Remanan Pillai error = shmem_swapin_page(inode, indices[i], 1250b56a2d8aSVineeth Remanan Pillai &page, SGP_CACHE, 1251b56a2d8aSVineeth Remanan Pillai mapping_gfp_mask(mapping), 1252b56a2d8aSVineeth Remanan Pillai NULL, NULL); 1253b56a2d8aSVineeth Remanan Pillai if (error == 0) { 1254b56a2d8aSVineeth Remanan Pillai unlock_page(page); 1255b56a2d8aSVineeth Remanan Pillai put_page(page); 1256b56a2d8aSVineeth Remanan Pillai ret++; 1257b56a2d8aSVineeth Remanan Pillai } 1258b56a2d8aSVineeth Remanan Pillai if (error == -ENOMEM) 1259b56a2d8aSVineeth Remanan Pillai break; 1260b56a2d8aSVineeth Remanan Pillai error = 0; 1261b56a2d8aSVineeth Remanan Pillai } 1262b56a2d8aSVineeth Remanan Pillai return error ? error : ret; 1263478922e2SMatthew Wilcox } 1264478922e2SMatthew Wilcox 126546f65ec1SHugh Dickins /* 126646f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 126746f65ec1SHugh Dickins */ 1268b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_inode(struct inode *inode, unsigned int type, 1269b56a2d8aSVineeth Remanan Pillai bool frontswap, unsigned long *fs_pages_to_unuse) 12701da177e4SLinus Torvalds { 1271b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1272b56a2d8aSVineeth Remanan Pillai pgoff_t start = 0; 1273b56a2d8aSVineeth Remanan Pillai struct pagevec pvec; 1274b56a2d8aSVineeth Remanan Pillai pgoff_t indices[PAGEVEC_SIZE]; 1275b56a2d8aSVineeth Remanan Pillai bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0); 1276b56a2d8aSVineeth Remanan Pillai int ret = 0; 12771da177e4SLinus Torvalds 1278b56a2d8aSVineeth Remanan Pillai pagevec_init(&pvec); 1279b56a2d8aSVineeth Remanan Pillai do { 1280b56a2d8aSVineeth Remanan Pillai unsigned int nr_entries = PAGEVEC_SIZE; 12812e0e26c7SHugh Dickins 1282b56a2d8aSVineeth Remanan Pillai if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE) 1283b56a2d8aSVineeth Remanan Pillai nr_entries = *fs_pages_to_unuse; 12842e0e26c7SHugh Dickins 1285b56a2d8aSVineeth Remanan Pillai pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, 1286b56a2d8aSVineeth Remanan Pillai pvec.pages, indices, 128787039546SHugh Dickins type, frontswap); 1288b56a2d8aSVineeth Remanan Pillai if (pvec.nr == 0) { 1289b56a2d8aSVineeth Remanan Pillai ret = 0; 1290778dd893SHugh Dickins break; 1291b56a2d8aSVineeth Remanan Pillai } 1292b56a2d8aSVineeth Remanan Pillai 1293b56a2d8aSVineeth Remanan Pillai ret = shmem_unuse_swap_entries(inode, pvec, indices); 1294b56a2d8aSVineeth Remanan Pillai if (ret < 0) 1295b56a2d8aSVineeth Remanan Pillai break; 1296b56a2d8aSVineeth Remanan Pillai 1297b56a2d8aSVineeth Remanan Pillai if (frontswap_partial) { 1298b56a2d8aSVineeth Remanan Pillai *fs_pages_to_unuse -= ret; 1299b56a2d8aSVineeth Remanan Pillai if (*fs_pages_to_unuse == 0) { 1300b56a2d8aSVineeth Remanan Pillai ret = FRONTSWAP_PAGES_UNUSED; 1301b56a2d8aSVineeth Remanan Pillai break; 1302b56a2d8aSVineeth Remanan Pillai } 1303b56a2d8aSVineeth Remanan Pillai } 1304b56a2d8aSVineeth Remanan Pillai 1305b56a2d8aSVineeth Remanan Pillai start = indices[pvec.nr - 1]; 1306b56a2d8aSVineeth Remanan Pillai } while (true); 1307b56a2d8aSVineeth Remanan Pillai 1308b56a2d8aSVineeth Remanan Pillai return ret; 1309b56a2d8aSVineeth Remanan Pillai } 1310b56a2d8aSVineeth Remanan Pillai 1311b56a2d8aSVineeth Remanan Pillai /* 1312b56a2d8aSVineeth Remanan Pillai * Read all the shared memory data that resides in the swap 1313b56a2d8aSVineeth Remanan Pillai * device 'type' back into memory, so the swap device can be 1314b56a2d8aSVineeth Remanan Pillai * unused. 1315b56a2d8aSVineeth Remanan Pillai */ 1316b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap, 1317b56a2d8aSVineeth Remanan Pillai unsigned long *fs_pages_to_unuse) 1318b56a2d8aSVineeth Remanan Pillai { 1319b56a2d8aSVineeth Remanan Pillai struct shmem_inode_info *info, *next; 1320b56a2d8aSVineeth Remanan Pillai int error = 0; 1321b56a2d8aSVineeth Remanan Pillai 1322b56a2d8aSVineeth Remanan Pillai if (list_empty(&shmem_swaplist)) 1323b56a2d8aSVineeth Remanan Pillai return 0; 1324b56a2d8aSVineeth Remanan Pillai 1325b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1326b56a2d8aSVineeth Remanan Pillai list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1327b56a2d8aSVineeth Remanan Pillai if (!info->swapped) { 1328b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1329b56a2d8aSVineeth Remanan Pillai continue; 1330b56a2d8aSVineeth Remanan Pillai } 1331af53d3e9SHugh Dickins /* 1332af53d3e9SHugh Dickins * Drop the swaplist mutex while searching the inode for swap; 1333af53d3e9SHugh Dickins * but before doing so, make sure shmem_evict_inode() will not 1334af53d3e9SHugh Dickins * remove placeholder inode from swaplist, nor let it be freed 1335af53d3e9SHugh Dickins * (igrab() would protect from unlink, but not from unmount). 1336af53d3e9SHugh Dickins */ 1337af53d3e9SHugh Dickins atomic_inc(&info->stop_eviction); 1338b56a2d8aSVineeth Remanan Pillai mutex_unlock(&shmem_swaplist_mutex); 1339b56a2d8aSVineeth Remanan Pillai 1340af53d3e9SHugh Dickins error = shmem_unuse_inode(&info->vfs_inode, type, frontswap, 1341b56a2d8aSVineeth Remanan Pillai fs_pages_to_unuse); 1342b56a2d8aSVineeth Remanan Pillai cond_resched(); 1343b56a2d8aSVineeth Remanan Pillai 1344b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1345b56a2d8aSVineeth Remanan Pillai next = list_next_entry(info, swaplist); 1346b56a2d8aSVineeth Remanan Pillai if (!info->swapped) 1347b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1348af53d3e9SHugh Dickins if (atomic_dec_and_test(&info->stop_eviction)) 1349af53d3e9SHugh Dickins wake_up_var(&info->stop_eviction); 1350b56a2d8aSVineeth Remanan Pillai if (error) 1351b56a2d8aSVineeth Remanan Pillai break; 13521da177e4SLinus Torvalds } 1353cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1354778dd893SHugh Dickins 1355778dd893SHugh Dickins return error; 13561da177e4SLinus Torvalds } 13571da177e4SLinus Torvalds 13581da177e4SLinus Torvalds /* 13591da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 13601da177e4SLinus Torvalds */ 13611da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 13621da177e4SLinus Torvalds { 13631da177e4SLinus Torvalds struct shmem_inode_info *info; 13641da177e4SLinus Torvalds struct address_space *mapping; 13651da177e4SLinus Torvalds struct inode *inode; 13666922c0c7SHugh Dickins swp_entry_t swap; 13676922c0c7SHugh Dickins pgoff_t index; 13681da177e4SLinus Torvalds 1369800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 13701da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 13711da177e4SLinus Torvalds mapping = page->mapping; 13721da177e4SLinus Torvalds index = page->index; 13731da177e4SLinus Torvalds inode = mapping->host; 13741da177e4SLinus Torvalds info = SHMEM_I(inode); 13751da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 13761da177e4SLinus Torvalds goto redirty; 1377d9fe526aSHugh Dickins if (!total_swap_pages) 13781da177e4SLinus Torvalds goto redirty; 13791da177e4SLinus Torvalds 1380d9fe526aSHugh Dickins /* 138197b713baSChristoph Hellwig * Our capabilities prevent regular writeback or sync from ever calling 138297b713baSChristoph Hellwig * shmem_writepage; but a stacking filesystem might use ->writepage of 138397b713baSChristoph Hellwig * its underlying filesystem, in which case tmpfs should write out to 138497b713baSChristoph Hellwig * swap only in response to memory pressure, and not for the writeback 138597b713baSChristoph Hellwig * threads or sync. 1386d9fe526aSHugh Dickins */ 138748f170fbSHugh Dickins if (!wbc->for_reclaim) { 138848f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 138948f170fbSHugh Dickins goto redirty; 139048f170fbSHugh Dickins } 13911635f6a7SHugh Dickins 13921635f6a7SHugh Dickins /* 13931635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 13941635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 13951635f6a7SHugh Dickins * fallocated page arriving here is now to initialize it and write it. 13961aac1400SHugh Dickins * 13971aac1400SHugh Dickins * That's okay for a page already fallocated earlier, but if we have 13981aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 13991aac1400SHugh Dickins * of this page in case we have to undo it, and (b) it may not be a 14001aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 14011aac1400SHugh Dickins * reactivate the page, and let shmem_fallocate() quit when too many. 14021635f6a7SHugh Dickins */ 14031635f6a7SHugh Dickins if (!PageUptodate(page)) { 14041aac1400SHugh Dickins if (inode->i_private) { 14051aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 14061aac1400SHugh Dickins spin_lock(&inode->i_lock); 14071aac1400SHugh Dickins shmem_falloc = inode->i_private; 14081aac1400SHugh Dickins if (shmem_falloc && 14098e205f77SHugh Dickins !shmem_falloc->waitq && 14101aac1400SHugh Dickins index >= shmem_falloc->start && 14111aac1400SHugh Dickins index < shmem_falloc->next) 14121aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 14131aac1400SHugh Dickins else 14141aac1400SHugh Dickins shmem_falloc = NULL; 14151aac1400SHugh Dickins spin_unlock(&inode->i_lock); 14161aac1400SHugh Dickins if (shmem_falloc) 14171aac1400SHugh Dickins goto redirty; 14181aac1400SHugh Dickins } 14191635f6a7SHugh Dickins clear_highpage(page); 14201635f6a7SHugh Dickins flush_dcache_page(page); 14211635f6a7SHugh Dickins SetPageUptodate(page); 14221635f6a7SHugh Dickins } 14231635f6a7SHugh Dickins 142438d8b4e6SHuang Ying swap = get_swap_page(page); 142548f170fbSHugh Dickins if (!swap.val) 142648f170fbSHugh Dickins goto redirty; 1427d9fe526aSHugh Dickins 1428b1dea800SHugh Dickins /* 1429b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 14306922c0c7SHugh Dickins * if it's not already there. Do it now before the page is 14316922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1432b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 14336922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 14346922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1435b1dea800SHugh Dickins */ 1436b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 143705bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 1438b56a2d8aSVineeth Remanan Pillai list_add(&info->swaplist, &shmem_swaplist); 1439b1dea800SHugh Dickins 14404afab1cdSYang Shi if (add_to_swap_cache(page, swap, 14413852f676SJoonsoo Kim __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 14423852f676SJoonsoo Kim NULL) == 0) { 14434595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1444267a4c76SHugh Dickins shmem_recalc_inode(inode); 1445267a4c76SHugh Dickins info->swapped++; 14464595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1447267a4c76SHugh Dickins 1448aaa46865SHugh Dickins swap_shmem_alloc(swap); 14496922c0c7SHugh Dickins shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 14506922c0c7SHugh Dickins 14516922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1452d9fe526aSHugh Dickins BUG_ON(page_mapped(page)); 14539fab5619SHugh Dickins swap_writepage(page, wbc); 14541da177e4SLinus Torvalds return 0; 14551da177e4SLinus Torvalds } 14561da177e4SLinus Torvalds 14576922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 145875f6d6d2SMinchan Kim put_swap_page(page, swap); 14591da177e4SLinus Torvalds redirty: 14601da177e4SLinus Torvalds set_page_dirty(page); 1461d9fe526aSHugh Dickins if (wbc->for_reclaim) 1462d9fe526aSHugh Dickins return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1463d9fe526aSHugh Dickins unlock_page(page); 1464d9fe526aSHugh Dickins return 0; 14651da177e4SLinus Torvalds } 14661da177e4SLinus Torvalds 146775edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 146871fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1469680d794bSakpm@linux-foundation.org { 1470680d794bSakpm@linux-foundation.org char buffer[64]; 1471680d794bSakpm@linux-foundation.org 147271fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1473095f1fc4SLee Schermerhorn return; /* show nothing */ 1474095f1fc4SLee Schermerhorn 1475a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1476095f1fc4SLee Schermerhorn 1477095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1478680d794bSakpm@linux-foundation.org } 147971fe804bSLee Schermerhorn 148071fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 148171fe804bSLee Schermerhorn { 148271fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 148371fe804bSLee Schermerhorn if (sbinfo->mpol) { 148471fe804bSLee Schermerhorn spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 148571fe804bSLee Schermerhorn mpol = sbinfo->mpol; 148671fe804bSLee Schermerhorn mpol_get(mpol); 148771fe804bSLee Schermerhorn spin_unlock(&sbinfo->stat_lock); 148871fe804bSLee Schermerhorn } 148971fe804bSLee Schermerhorn return mpol; 149071fe804bSLee Schermerhorn } 149175edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 149275edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 149375edd345SHugh Dickins { 149475edd345SHugh Dickins } 149575edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 149675edd345SHugh Dickins { 149775edd345SHugh Dickins return NULL; 149875edd345SHugh Dickins } 149975edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 150075edd345SHugh Dickins #ifndef CONFIG_NUMA 150175edd345SHugh Dickins #define vm_policy vm_private_data 150275edd345SHugh Dickins #endif 1503680d794bSakpm@linux-foundation.org 1504800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1505800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1506800d8c63SKirill A. Shutemov { 1507800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 15082c4541e2SKirill A. Shutemov vma_init(vma, NULL); 1509800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1510800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1511800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1512800d8c63SKirill A. Shutemov } 1513800d8c63SKirill A. Shutemov 1514800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1515800d8c63SKirill A. Shutemov { 1516800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1517800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1518800d8c63SKirill A. Shutemov } 1519800d8c63SKirill A. Shutemov 152041ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 152141ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 15221da177e4SLinus Torvalds { 15231da177e4SLinus Torvalds struct vm_area_struct pvma; 152418a2f371SMel Gorman struct page *page; 1525e9e9b7ecSMinchan Kim struct vm_fault vmf; 15261da177e4SLinus Torvalds 1527800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1528e9e9b7ecSMinchan Kim vmf.vma = &pvma; 1529e9e9b7ecSMinchan Kim vmf.address = 0; 1530e9e9b7ecSMinchan Kim page = swap_cluster_readahead(swap, gfp, &vmf); 1531800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 153218a2f371SMel Gorman 1533800d8c63SKirill A. Shutemov return page; 1534800d8c63SKirill A. Shutemov } 153518a2f371SMel Gorman 1536800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp, 1537800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1538800d8c63SKirill A. Shutemov { 1539800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 15407b8d046fSMatthew Wilcox struct address_space *mapping = info->vfs_inode.i_mapping; 15417b8d046fSMatthew Wilcox pgoff_t hindex; 1542800d8c63SKirill A. Shutemov struct page *page; 1543800d8c63SKirill A. Shutemov 15444620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 15457b8d046fSMatthew Wilcox if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 15467b8d046fSMatthew Wilcox XA_PRESENT)) 1547800d8c63SKirill A. Shutemov return NULL; 1548800d8c63SKirill A. Shutemov 1549800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1550800d8c63SKirill A. Shutemov page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 155119deb769SDavid Rientjes HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1552800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1553800d8c63SKirill A. Shutemov if (page) 1554800d8c63SKirill A. Shutemov prep_transhuge_page(page); 1555dcdf11eeSDavid Rientjes else 1556dcdf11eeSDavid Rientjes count_vm_event(THP_FILE_FALLBACK); 155718a2f371SMel Gorman return page; 155818a2f371SMel Gorman } 155918a2f371SMel Gorman 156018a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp, 156118a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 156218a2f371SMel Gorman { 156318a2f371SMel Gorman struct vm_area_struct pvma; 156418a2f371SMel Gorman struct page *page; 156518a2f371SMel Gorman 1566800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1567800d8c63SKirill A. Shutemov page = alloc_page_vma(gfp, &pvma, 0); 1568800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 156918a2f371SMel Gorman 1570800d8c63SKirill A. Shutemov return page; 1571800d8c63SKirill A. Shutemov } 1572800d8c63SKirill A. Shutemov 1573800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 15740f079694SMike Rapoport struct inode *inode, 1575800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1576800d8c63SKirill A. Shutemov { 15770f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 1578800d8c63SKirill A. Shutemov struct page *page; 1579800d8c63SKirill A. Shutemov int nr; 1580800d8c63SKirill A. Shutemov int err = -ENOSPC; 1581800d8c63SKirill A. Shutemov 1582396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1583800d8c63SKirill A. Shutemov huge = false; 1584800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1585800d8c63SKirill A. Shutemov 15860f079694SMike Rapoport if (!shmem_inode_acct_block(inode, nr)) 1587800d8c63SKirill A. Shutemov goto failed; 1588800d8c63SKirill A. Shutemov 1589800d8c63SKirill A. Shutemov if (huge) 1590800d8c63SKirill A. Shutemov page = shmem_alloc_hugepage(gfp, info, index); 1591800d8c63SKirill A. Shutemov else 1592800d8c63SKirill A. Shutemov page = shmem_alloc_page(gfp, info, index); 159375edd345SHugh Dickins if (page) { 159475edd345SHugh Dickins __SetPageLocked(page); 159575edd345SHugh Dickins __SetPageSwapBacked(page); 1596800d8c63SKirill A. Shutemov return page; 159775edd345SHugh Dickins } 159818a2f371SMel Gorman 1599800d8c63SKirill A. Shutemov err = -ENOMEM; 16000f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr); 1601800d8c63SKirill A. Shutemov failed: 1602800d8c63SKirill A. Shutemov return ERR_PTR(err); 16031da177e4SLinus Torvalds } 160471fe804bSLee Schermerhorn 16051da177e4SLinus Torvalds /* 1606bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1607bde05d1cSHugh Dickins * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1608bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1609bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1610bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1611bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1612bde05d1cSHugh Dickins * 1613bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1614bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1615bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1616bde05d1cSHugh Dickins */ 1617bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1618bde05d1cSHugh Dickins { 1619bde05d1cSHugh Dickins return page_zonenum(page) > gfp_zone(gfp); 1620bde05d1cSHugh Dickins } 1621bde05d1cSHugh Dickins 1622bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1623bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1624bde05d1cSHugh Dickins { 1625bde05d1cSHugh Dickins struct page *oldpage, *newpage; 1626bde05d1cSHugh Dickins struct address_space *swap_mapping; 1627c1cb20d4SYu Zhao swp_entry_t entry; 1628bde05d1cSHugh Dickins pgoff_t swap_index; 1629bde05d1cSHugh Dickins int error; 1630bde05d1cSHugh Dickins 1631bde05d1cSHugh Dickins oldpage = *pagep; 1632c1cb20d4SYu Zhao entry.val = page_private(oldpage); 1633c1cb20d4SYu Zhao swap_index = swp_offset(entry); 1634bde05d1cSHugh Dickins swap_mapping = page_mapping(oldpage); 1635bde05d1cSHugh Dickins 1636bde05d1cSHugh Dickins /* 1637bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1638bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1639bde05d1cSHugh Dickins */ 1640bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1641bde05d1cSHugh Dickins newpage = shmem_alloc_page(gfp, info, index); 1642bde05d1cSHugh Dickins if (!newpage) 1643bde05d1cSHugh Dickins return -ENOMEM; 1644bde05d1cSHugh Dickins 164509cbfeafSKirill A. Shutemov get_page(newpage); 1646bde05d1cSHugh Dickins copy_highpage(newpage, oldpage); 16470142ef6cSHugh Dickins flush_dcache_page(newpage); 1648bde05d1cSHugh Dickins 16499956edf3SHugh Dickins __SetPageLocked(newpage); 16509956edf3SHugh Dickins __SetPageSwapBacked(newpage); 1651bde05d1cSHugh Dickins SetPageUptodate(newpage); 1652c1cb20d4SYu Zhao set_page_private(newpage, entry.val); 1653bde05d1cSHugh Dickins SetPageSwapCache(newpage); 1654bde05d1cSHugh Dickins 1655bde05d1cSHugh Dickins /* 1656bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1657bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1658bde05d1cSHugh Dickins */ 1659b93b0163SMatthew Wilcox xa_lock_irq(&swap_mapping->i_pages); 166062f945b6SMatthew Wilcox error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage); 16610142ef6cSHugh Dickins if (!error) { 16620d1c2072SJohannes Weiner mem_cgroup_migrate(oldpage, newpage); 16630d1c2072SJohannes Weiner __inc_lruvec_page_state(newpage, NR_FILE_PAGES); 16640d1c2072SJohannes Weiner __dec_lruvec_page_state(oldpage, NR_FILE_PAGES); 16650142ef6cSHugh Dickins } 1666b93b0163SMatthew Wilcox xa_unlock_irq(&swap_mapping->i_pages); 1667bde05d1cSHugh Dickins 16680142ef6cSHugh Dickins if (unlikely(error)) { 16690142ef6cSHugh Dickins /* 16700142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 16710142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 16720142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 16730142ef6cSHugh Dickins */ 16740142ef6cSHugh Dickins oldpage = newpage; 16750142ef6cSHugh Dickins } else { 16766058eaecSJohannes Weiner lru_cache_add(newpage); 16770142ef6cSHugh Dickins *pagep = newpage; 16780142ef6cSHugh Dickins } 1679bde05d1cSHugh Dickins 1680bde05d1cSHugh Dickins ClearPageSwapCache(oldpage); 1681bde05d1cSHugh Dickins set_page_private(oldpage, 0); 1682bde05d1cSHugh Dickins 1683bde05d1cSHugh Dickins unlock_page(oldpage); 168409cbfeafSKirill A. Shutemov put_page(oldpage); 168509cbfeafSKirill A. Shutemov put_page(oldpage); 16860142ef6cSHugh Dickins return error; 1687bde05d1cSHugh Dickins } 1688bde05d1cSHugh Dickins 1689bde05d1cSHugh Dickins /* 1690c5bf121eSVineeth Remanan Pillai * Swap in the page pointed to by *pagep. 1691c5bf121eSVineeth Remanan Pillai * Caller has to make sure that *pagep contains a valid swapped page. 1692c5bf121eSVineeth Remanan Pillai * Returns 0 and the page in pagep if success. On failure, returns the 1693af44c12fSRandy Dunlap * error code and NULL in *pagep. 16941da177e4SLinus Torvalds */ 1695c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index, 1696c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, 1697c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 16982b740303SSouptick Joarder vm_fault_t *fault_type) 16991da177e4SLinus Torvalds { 17001da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 170123f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 1702c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; 170327ab7006SHugh Dickins struct page *page; 17041da177e4SLinus Torvalds swp_entry_t swap; 17051da177e4SLinus Torvalds int error; 17061da177e4SLinus Torvalds 1707c5bf121eSVineeth Remanan Pillai VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); 1708c5bf121eSVineeth Remanan Pillai swap = radix_to_swp_entry(*pagep); 1709c5bf121eSVineeth Remanan Pillai *pagep = NULL; 171054af6042SHugh Dickins 17111da177e4SLinus Torvalds /* Look it up and read it in.. */ 1712ec560175SHuang Ying page = lookup_swap_cache(swap, NULL, 0); 171327ab7006SHugh Dickins if (!page) { 17149e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 17159e18eb29SAndres Lagar-Cavilla if (fault_type) { 171668da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 17179e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 17182262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT); 17199e18eb29SAndres Lagar-Cavilla } 17209e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 172141ffe5d5SHugh Dickins page = shmem_swapin(swap, gfp, info, index); 172227ab7006SHugh Dickins if (!page) { 17231da177e4SLinus Torvalds error = -ENOMEM; 172454af6042SHugh Dickins goto failed; 1725285b2c4fSHugh Dickins } 17261da177e4SLinus Torvalds } 17271da177e4SLinus Torvalds 17281da177e4SLinus Torvalds /* We have to do this with page locked to prevent races */ 172954af6042SHugh Dickins lock_page(page); 17300142ef6cSHugh Dickins if (!PageSwapCache(page) || page_private(page) != swap.val || 1731d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1732c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1733d1899228SHugh Dickins goto unlock; 1734bde05d1cSHugh Dickins } 173527ab7006SHugh Dickins if (!PageUptodate(page)) { 17361da177e4SLinus Torvalds error = -EIO; 173754af6042SHugh Dickins goto failed; 173854af6042SHugh Dickins } 173954af6042SHugh Dickins wait_on_page_writeback(page); 174054af6042SHugh Dickins 17418a84802eSSteven Price /* 17428a84802eSSteven Price * Some architectures may have to restore extra metadata to the 17438a84802eSSteven Price * physical page after reading from swap. 17448a84802eSSteven Price */ 17458a84802eSSteven Price arch_swap_restore(swap, page); 17468a84802eSSteven Price 1747bde05d1cSHugh Dickins if (shmem_should_replace_page(page, gfp)) { 1748bde05d1cSHugh Dickins error = shmem_replace_page(&page, gfp, info, index); 1749bde05d1cSHugh Dickins if (error) 175054af6042SHugh Dickins goto failed; 17511da177e4SLinus Torvalds } 17521da177e4SLinus Torvalds 17533fea5a49SJohannes Weiner error = shmem_add_to_page_cache(page, mapping, index, 17543fea5a49SJohannes Weiner swp_to_radix_entry(swap), gfp, 17553fea5a49SJohannes Weiner charge_mm); 175654af6042SHugh Dickins if (error) 175754af6042SHugh Dickins goto failed; 175854af6042SHugh Dickins 17594595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 176054af6042SHugh Dickins info->swapped--; 176154af6042SHugh Dickins shmem_recalc_inode(inode); 17624595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 176327ab7006SHugh Dickins 176466d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 176566d2f4d2SHugh Dickins mark_page_accessed(page); 176666d2f4d2SHugh Dickins 176727ab7006SHugh Dickins delete_from_swap_cache(page); 176827ab7006SHugh Dickins set_page_dirty(page); 176927ab7006SHugh Dickins swap_free(swap); 177027ab7006SHugh Dickins 1771c5bf121eSVineeth Remanan Pillai *pagep = page; 1772c5bf121eSVineeth Remanan Pillai return 0; 1773c5bf121eSVineeth Remanan Pillai failed: 1774c5bf121eSVineeth Remanan Pillai if (!shmem_confirm_swap(mapping, index, swap)) 1775c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1776c5bf121eSVineeth Remanan Pillai unlock: 1777c5bf121eSVineeth Remanan Pillai if (page) { 1778c5bf121eSVineeth Remanan Pillai unlock_page(page); 1779c5bf121eSVineeth Remanan Pillai put_page(page); 1780c5bf121eSVineeth Remanan Pillai } 1781c5bf121eSVineeth Remanan Pillai 1782c5bf121eSVineeth Remanan Pillai return error; 1783c5bf121eSVineeth Remanan Pillai } 1784c5bf121eSVineeth Remanan Pillai 1785c5bf121eSVineeth Remanan Pillai /* 1786c5bf121eSVineeth Remanan Pillai * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1787c5bf121eSVineeth Remanan Pillai * 1788c5bf121eSVineeth Remanan Pillai * If we allocate a new one we do not mark it dirty. That's up to the 1789c5bf121eSVineeth Remanan Pillai * vm. If we swap it in we mark it dirty since we also free the swap 1790c5bf121eSVineeth Remanan Pillai * entry since a page cannot live in both the swap and page cache. 1791c5bf121eSVineeth Remanan Pillai * 179228eb3c80SMiles Chen * vmf and fault_type are only supplied by shmem_fault: 1793c5bf121eSVineeth Remanan Pillai * otherwise they are NULL. 1794c5bf121eSVineeth Remanan Pillai */ 1795c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1796c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1797c5bf121eSVineeth Remanan Pillai struct vm_area_struct *vma, struct vm_fault *vmf, 1798c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type) 1799c5bf121eSVineeth Remanan Pillai { 1800c5bf121eSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1801c5bf121eSVineeth Remanan Pillai struct shmem_inode_info *info = SHMEM_I(inode); 1802c5bf121eSVineeth Remanan Pillai struct shmem_sb_info *sbinfo; 1803c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm; 1804c5bf121eSVineeth Remanan Pillai struct page *page; 1805c5bf121eSVineeth Remanan Pillai enum sgp_type sgp_huge = sgp; 1806c5bf121eSVineeth Remanan Pillai pgoff_t hindex = index; 1807c5bf121eSVineeth Remanan Pillai int error; 1808c5bf121eSVineeth Remanan Pillai int once = 0; 1809c5bf121eSVineeth Remanan Pillai int alloced = 0; 1810c5bf121eSVineeth Remanan Pillai 1811c5bf121eSVineeth Remanan Pillai if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 1812c5bf121eSVineeth Remanan Pillai return -EFBIG; 1813c5bf121eSVineeth Remanan Pillai if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1814c5bf121eSVineeth Remanan Pillai sgp = SGP_CACHE; 1815c5bf121eSVineeth Remanan Pillai repeat: 1816c5bf121eSVineeth Remanan Pillai if (sgp <= SGP_CACHE && 1817c5bf121eSVineeth Remanan Pillai ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1818c5bf121eSVineeth Remanan Pillai return -EINVAL; 1819c5bf121eSVineeth Remanan Pillai } 1820c5bf121eSVineeth Remanan Pillai 1821c5bf121eSVineeth Remanan Pillai sbinfo = SHMEM_SB(inode->i_sb); 1822c5bf121eSVineeth Remanan Pillai charge_mm = vma ? vma->vm_mm : current->mm; 1823c5bf121eSVineeth Remanan Pillai 1824c5bf121eSVineeth Remanan Pillai page = find_lock_entry(mapping, index); 1825c5bf121eSVineeth Remanan Pillai if (xa_is_value(page)) { 1826c5bf121eSVineeth Remanan Pillai error = shmem_swapin_page(inode, index, &page, 1827c5bf121eSVineeth Remanan Pillai sgp, gfp, vma, fault_type); 1828c5bf121eSVineeth Remanan Pillai if (error == -EEXIST) 1829c5bf121eSVineeth Remanan Pillai goto repeat; 1830c5bf121eSVineeth Remanan Pillai 1831c5bf121eSVineeth Remanan Pillai *pagep = page; 1832c5bf121eSVineeth Remanan Pillai return error; 1833c5bf121eSVineeth Remanan Pillai } 1834c5bf121eSVineeth Remanan Pillai 183563ec1973SMatthew Wilcox (Oracle) if (page) 183663ec1973SMatthew Wilcox (Oracle) hindex = page->index; 1837c5bf121eSVineeth Remanan Pillai if (page && sgp == SGP_WRITE) 1838c5bf121eSVineeth Remanan Pillai mark_page_accessed(page); 1839c5bf121eSVineeth Remanan Pillai 1840c5bf121eSVineeth Remanan Pillai /* fallocated page? */ 1841c5bf121eSVineeth Remanan Pillai if (page && !PageUptodate(page)) { 1842c5bf121eSVineeth Remanan Pillai if (sgp != SGP_READ) 1843c5bf121eSVineeth Remanan Pillai goto clear; 1844c5bf121eSVineeth Remanan Pillai unlock_page(page); 1845c5bf121eSVineeth Remanan Pillai put_page(page); 1846c5bf121eSVineeth Remanan Pillai page = NULL; 184763ec1973SMatthew Wilcox (Oracle) hindex = index; 1848c5bf121eSVineeth Remanan Pillai } 184963ec1973SMatthew Wilcox (Oracle) if (page || sgp == SGP_READ) 185063ec1973SMatthew Wilcox (Oracle) goto out; 1851c5bf121eSVineeth Remanan Pillai 1852c5bf121eSVineeth Remanan Pillai /* 1853c5bf121eSVineeth Remanan Pillai * Fast cache lookup did not find it: 1854c5bf121eSVineeth Remanan Pillai * bring it back from swap or allocate. 1855c5bf121eSVineeth Remanan Pillai */ 1856c5bf121eSVineeth Remanan Pillai 1857cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) { 1858cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1859cfda0526SMike Rapoport return 0; 1860cfda0526SMike Rapoport } 1861cfda0526SMike Rapoport 1862800d8c63SKirill A. Shutemov /* shmem_symlink() */ 186330e6a51dSHui Su if (!shmem_mapping(mapping)) 1864800d8c63SKirill A. Shutemov goto alloc_nohuge; 1865657e3038SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1866800d8c63SKirill A. Shutemov goto alloc_nohuge; 1867800d8c63SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 1868800d8c63SKirill A. Shutemov goto alloc_huge; 1869800d8c63SKirill A. Shutemov switch (sbinfo->huge) { 1870800d8c63SKirill A. Shutemov case SHMEM_HUGE_NEVER: 1871800d8c63SKirill A. Shutemov goto alloc_nohuge; 187227d80fa2SKees Cook case SHMEM_HUGE_WITHIN_SIZE: { 187327d80fa2SKees Cook loff_t i_size; 187427d80fa2SKees Cook pgoff_t off; 187527d80fa2SKees Cook 1876800d8c63SKirill A. Shutemov off = round_up(index, HPAGE_PMD_NR); 1877800d8c63SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 1878800d8c63SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 1879800d8c63SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 1880800d8c63SKirill A. Shutemov goto alloc_huge; 188127d80fa2SKees Cook 188227d80fa2SKees Cook fallthrough; 188327d80fa2SKees Cook } 1884800d8c63SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 1885657e3038SKirill A. Shutemov if (sgp_huge == SGP_HUGE) 1886657e3038SKirill A. Shutemov goto alloc_huge; 1887657e3038SKirill A. Shutemov /* TODO: implement fadvise() hints */ 1888800d8c63SKirill A. Shutemov goto alloc_nohuge; 188959a16eadSHugh Dickins } 18901da177e4SLinus Torvalds 1891800d8c63SKirill A. Shutemov alloc_huge: 18920f079694SMike Rapoport page = shmem_alloc_and_acct_page(gfp, inode, index, true); 1893800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1894c5bf121eSVineeth Remanan Pillai alloc_nohuge: 1895c5bf121eSVineeth Remanan Pillai page = shmem_alloc_and_acct_page(gfp, inode, 1896800d8c63SKirill A. Shutemov index, false); 189754af6042SHugh Dickins } 1898800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1899779750d2SKirill A. Shutemov int retry = 5; 1900c5bf121eSVineeth Remanan Pillai 1901800d8c63SKirill A. Shutemov error = PTR_ERR(page); 1902800d8c63SKirill A. Shutemov page = NULL; 1903779750d2SKirill A. Shutemov if (error != -ENOSPC) 1904c5bf121eSVineeth Remanan Pillai goto unlock; 1905779750d2SKirill A. Shutemov /* 1906c5bf121eSVineeth Remanan Pillai * Try to reclaim some space by splitting a huge page 1907779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1908779750d2SKirill A. Shutemov */ 1909779750d2SKirill A. Shutemov while (retry--) { 1910779750d2SKirill A. Shutemov int ret; 1911c5bf121eSVineeth Remanan Pillai 1912779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1913779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1914779750d2SKirill A. Shutemov break; 1915779750d2SKirill A. Shutemov if (ret) 1916779750d2SKirill A. Shutemov goto alloc_nohuge; 1917779750d2SKirill A. Shutemov } 1918c5bf121eSVineeth Remanan Pillai goto unlock; 1919800d8c63SKirill A. Shutemov } 1920800d8c63SKirill A. Shutemov 1921800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 1922800d8c63SKirill A. Shutemov hindex = round_down(index, HPAGE_PMD_NR); 1923800d8c63SKirill A. Shutemov else 1924800d8c63SKirill A. Shutemov hindex = index; 1925800d8c63SKirill A. Shutemov 192666d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1927eb39d618SHugh Dickins __SetPageReferenced(page); 192866d2f4d2SHugh Dickins 1929800d8c63SKirill A. Shutemov error = shmem_add_to_page_cache(page, mapping, hindex, 19303fea5a49SJohannes Weiner NULL, gfp & GFP_RECLAIM_MASK, 19313fea5a49SJohannes Weiner charge_mm); 19323fea5a49SJohannes Weiner if (error) 1933800d8c63SKirill A. Shutemov goto unacct; 19346058eaecSJohannes Weiner lru_cache_add(page); 193554af6042SHugh Dickins 19364595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1937d8c6546bSMatthew Wilcox (Oracle) info->alloced += compound_nr(page); 1938800d8c63SKirill A. Shutemov inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 193954af6042SHugh Dickins shmem_recalc_inode(inode); 19404595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 19411635f6a7SHugh Dickins alloced = true; 194254af6042SHugh Dickins 1943779750d2SKirill A. Shutemov if (PageTransHuge(page) && 1944779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1945779750d2SKirill A. Shutemov hindex + HPAGE_PMD_NR - 1) { 1946779750d2SKirill A. Shutemov /* 1947779750d2SKirill A. Shutemov * Part of the huge page is beyond i_size: subject 1948779750d2SKirill A. Shutemov * to shrink under memory pressure. 1949779750d2SKirill A. Shutemov */ 1950779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1951d041353dSCong Wang /* 1952d041353dSCong Wang * _careful to defend against unlocked access to 1953d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1954d041353dSCong Wang */ 1955d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1956779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1957779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1958779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1959779750d2SKirill A. Shutemov } 1960779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1961779750d2SKirill A. Shutemov } 1962779750d2SKirill A. Shutemov 1963ec9516fbSHugh Dickins /* 19641635f6a7SHugh Dickins * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 19651635f6a7SHugh Dickins */ 19661635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 19671635f6a7SHugh Dickins sgp = SGP_WRITE; 19681635f6a7SHugh Dickins clear: 19691635f6a7SHugh Dickins /* 19701635f6a7SHugh Dickins * Let SGP_WRITE caller clear ends if write does not fill page; 19711635f6a7SHugh Dickins * but SGP_FALLOC on a page fallocated earlier must initialize 19721635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 1973ec9516fbSHugh Dickins */ 1974800d8c63SKirill A. Shutemov if (sgp != SGP_WRITE && !PageUptodate(page)) { 1975800d8c63SKirill A. Shutemov int i; 1976800d8c63SKirill A. Shutemov 197763ec1973SMatthew Wilcox (Oracle) for (i = 0; i < compound_nr(page); i++) { 197863ec1973SMatthew Wilcox (Oracle) clear_highpage(page + i); 197963ec1973SMatthew Wilcox (Oracle) flush_dcache_page(page + i); 1980800d8c63SKirill A. Shutemov } 198163ec1973SMatthew Wilcox (Oracle) SetPageUptodate(page); 1982ec9516fbSHugh Dickins } 1983bde05d1cSHugh Dickins 198454af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 198575edd345SHugh Dickins if (sgp <= SGP_CACHE && 198609cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1987267a4c76SHugh Dickins if (alloced) { 1988267a4c76SHugh Dickins ClearPageDirty(page); 1989267a4c76SHugh Dickins delete_from_page_cache(page); 19904595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1991267a4c76SHugh Dickins shmem_recalc_inode(inode); 19924595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1993267a4c76SHugh Dickins } 199454af6042SHugh Dickins error = -EINVAL; 1995267a4c76SHugh Dickins goto unlock; 1996ff36b801SShaohua Li } 199763ec1973SMatthew Wilcox (Oracle) out: 1998800d8c63SKirill A. Shutemov *pagep = page + index - hindex; 199954af6042SHugh Dickins return 0; 2000d00806b1SNick Piggin 2001d0217ac0SNick Piggin /* 200254af6042SHugh Dickins * Error recovery. 20031da177e4SLinus Torvalds */ 200454af6042SHugh Dickins unacct: 2005d8c6546bSMatthew Wilcox (Oracle) shmem_inode_unacct_blocks(inode, compound_nr(page)); 2006800d8c63SKirill A. Shutemov 2007800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 2008800d8c63SKirill A. Shutemov unlock_page(page); 2009800d8c63SKirill A. Shutemov put_page(page); 2010800d8c63SKirill A. Shutemov goto alloc_nohuge; 2011800d8c63SKirill A. Shutemov } 2012d1899228SHugh Dickins unlock: 201327ab7006SHugh Dickins if (page) { 201454af6042SHugh Dickins unlock_page(page); 201509cbfeafSKirill A. Shutemov put_page(page); 201654af6042SHugh Dickins } 201754af6042SHugh Dickins if (error == -ENOSPC && !once++) { 20184595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 201954af6042SHugh Dickins shmem_recalc_inode(inode); 20204595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 20211da177e4SLinus Torvalds goto repeat; 2022d8dc74f2SAdrian Bunk } 20237f4446eeSMatthew Wilcox if (error == -EEXIST) 202454af6042SHugh Dickins goto repeat; 202554af6042SHugh Dickins return error; 20261da177e4SLinus Torvalds } 20271da177e4SLinus Torvalds 202810d20bd2SLinus Torvalds /* 202910d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 203010d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 203110d20bd2SLinus Torvalds * target. 203210d20bd2SLinus Torvalds */ 2033ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 203410d20bd2SLinus Torvalds { 203510d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 20362055da97SIngo Molnar list_del_init(&wait->entry); 203710d20bd2SLinus Torvalds return ret; 203810d20bd2SLinus Torvalds } 203910d20bd2SLinus Torvalds 204020acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf) 20411da177e4SLinus Torvalds { 204211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 2043496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 20449e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 2045657e3038SKirill A. Shutemov enum sgp_type sgp; 204620acce67SSouptick Joarder int err; 204720acce67SSouptick Joarder vm_fault_t ret = VM_FAULT_LOCKED; 20481da177e4SLinus Torvalds 2049f00cdc6dSHugh Dickins /* 2050f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 2051f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 2052f00cdc6dSHugh Dickins * locks writers out with its hold on i_mutex. So refrain from 20538e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 20548e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 20558e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 20568e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 20578e205f77SHugh Dickins * 20588e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 20598e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 20608e205f77SHugh Dickins * we just need to make racing faults a rare case. 20618e205f77SHugh Dickins * 20628e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 20638e205f77SHugh Dickins * standard mutex or completion: but we cannot take i_mutex in fault, 20648e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 2065f00cdc6dSHugh Dickins */ 2066f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 2067f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 2068f00cdc6dSHugh Dickins 2069f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2070f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 20718e205f77SHugh Dickins if (shmem_falloc && 20728e205f77SHugh Dickins shmem_falloc->waitq && 20738e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 20748e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 20758897c1b1SKirill A. Shutemov struct file *fpin; 20768e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 207710d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 20788e205f77SHugh Dickins 20798e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 20808897c1b1SKirill A. Shutemov fpin = maybe_unlock_mmap_for_io(vmf, NULL); 20818897c1b1SKirill A. Shutemov if (fpin) 20828e205f77SHugh Dickins ret = VM_FAULT_RETRY; 20838e205f77SHugh Dickins 20848e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 20858e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 20868e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 20878e205f77SHugh Dickins spin_unlock(&inode->i_lock); 20888e205f77SHugh Dickins schedule(); 20898e205f77SHugh Dickins 20908e205f77SHugh Dickins /* 20918e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 20928e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 20938e205f77SHugh Dickins * is usually invalid by the time we reach here, but 20948e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 20958e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 20968e205f77SHugh Dickins */ 20978e205f77SHugh Dickins spin_lock(&inode->i_lock); 20988e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 20998e205f77SHugh Dickins spin_unlock(&inode->i_lock); 21008897c1b1SKirill A. Shutemov 21018897c1b1SKirill A. Shutemov if (fpin) 21028897c1b1SKirill A. Shutemov fput(fpin); 21038e205f77SHugh Dickins return ret; 2104f00cdc6dSHugh Dickins } 21058e205f77SHugh Dickins spin_unlock(&inode->i_lock); 2106f00cdc6dSHugh Dickins } 2107f00cdc6dSHugh Dickins 2108657e3038SKirill A. Shutemov sgp = SGP_CACHE; 210918600332SMichal Hocko 211018600332SMichal Hocko if ((vma->vm_flags & VM_NOHUGEPAGE) || 211118600332SMichal Hocko test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 2112657e3038SKirill A. Shutemov sgp = SGP_NOHUGE; 211318600332SMichal Hocko else if (vma->vm_flags & VM_HUGEPAGE) 211418600332SMichal Hocko sgp = SGP_HUGE; 2115657e3038SKirill A. Shutemov 211620acce67SSouptick Joarder err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 2117cfda0526SMike Rapoport gfp, vma, vmf, &ret); 211820acce67SSouptick Joarder if (err) 211920acce67SSouptick Joarder return vmf_error(err); 212068da9f05SHugh Dickins return ret; 21211da177e4SLinus Torvalds } 21221da177e4SLinus Torvalds 2123c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 2124c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 2125c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 2126c01d5b30SHugh Dickins { 2127c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 2128c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 2129c01d5b30SHugh Dickins unsigned long addr; 2130c01d5b30SHugh Dickins unsigned long offset; 2131c01d5b30SHugh Dickins unsigned long inflated_len; 2132c01d5b30SHugh Dickins unsigned long inflated_addr; 2133c01d5b30SHugh Dickins unsigned long inflated_offset; 2134c01d5b30SHugh Dickins 2135c01d5b30SHugh Dickins if (len > TASK_SIZE) 2136c01d5b30SHugh Dickins return -ENOMEM; 2137c01d5b30SHugh Dickins 2138c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 2139c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 2140c01d5b30SHugh Dickins 2141396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2142c01d5b30SHugh Dickins return addr; 2143c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 2144c01d5b30SHugh Dickins return addr; 2145c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 2146c01d5b30SHugh Dickins return addr; 2147c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2148c01d5b30SHugh Dickins return addr; 2149c01d5b30SHugh Dickins 2150c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2151c01d5b30SHugh Dickins return addr; 2152c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2153c01d5b30SHugh Dickins return addr; 2154c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2155c01d5b30SHugh Dickins return addr; 2156c01d5b30SHugh Dickins /* 2157c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2158c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 215999158997SKirill A. Shutemov * But if caller specified an address hint and we allocated area there 216099158997SKirill A. Shutemov * successfully, respect that as before. 2161c01d5b30SHugh Dickins */ 216299158997SKirill A. Shutemov if (uaddr == addr) 2163c01d5b30SHugh Dickins return addr; 2164c01d5b30SHugh Dickins 2165c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2166c01d5b30SHugh Dickins struct super_block *sb; 2167c01d5b30SHugh Dickins 2168c01d5b30SHugh Dickins if (file) { 2169c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2170c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2171c01d5b30SHugh Dickins } else { 2172c01d5b30SHugh Dickins /* 2173c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2174c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2175c01d5b30SHugh Dickins */ 2176c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2177c01d5b30SHugh Dickins return addr; 2178c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2179c01d5b30SHugh Dickins } 21803089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2181c01d5b30SHugh Dickins return addr; 2182c01d5b30SHugh Dickins } 2183c01d5b30SHugh Dickins 2184c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2185c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2186c01d5b30SHugh Dickins return addr; 2187c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2188c01d5b30SHugh Dickins return addr; 2189c01d5b30SHugh Dickins 2190c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2191c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2192c01d5b30SHugh Dickins return addr; 2193c01d5b30SHugh Dickins if (inflated_len < len) 2194c01d5b30SHugh Dickins return addr; 2195c01d5b30SHugh Dickins 219699158997SKirill A. Shutemov inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2197c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2198c01d5b30SHugh Dickins return addr; 2199c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2200c01d5b30SHugh Dickins return addr; 2201c01d5b30SHugh Dickins 2202c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2203c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2204c01d5b30SHugh Dickins if (inflated_offset > offset) 2205c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2206c01d5b30SHugh Dickins 2207c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2208c01d5b30SHugh Dickins return addr; 2209c01d5b30SHugh Dickins return inflated_addr; 2210c01d5b30SHugh Dickins } 2211c01d5b30SHugh Dickins 22121da177e4SLinus Torvalds #ifdef CONFIG_NUMA 221341ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 22141da177e4SLinus Torvalds { 2215496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 221641ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 22171da177e4SLinus Torvalds } 22181da177e4SLinus Torvalds 2219d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2220d8dc74f2SAdrian Bunk unsigned long addr) 22211da177e4SLinus Torvalds { 2222496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 222341ffe5d5SHugh Dickins pgoff_t index; 22241da177e4SLinus Torvalds 222541ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 222641ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 22271da177e4SLinus Torvalds } 22281da177e4SLinus Torvalds #endif 22291da177e4SLinus Torvalds 22301da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user) 22311da177e4SLinus Torvalds { 2232496ad9aaSAl Viro struct inode *inode = file_inode(file); 22331da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 22341da177e4SLinus Torvalds int retval = -ENOMEM; 22351da177e4SLinus Torvalds 2236ea0dfeb4SHugh Dickins /* 2237ea0dfeb4SHugh Dickins * What serializes the accesses to info->flags? 2238ea0dfeb4SHugh Dickins * ipc_lock_object() when called from shmctl_do_lock(), 2239ea0dfeb4SHugh Dickins * no serialization needed when called from shm_destroy(). 2240ea0dfeb4SHugh Dickins */ 22411da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 22421da177e4SLinus Torvalds if (!user_shm_lock(inode->i_size, user)) 22431da177e4SLinus Torvalds goto out_nomem; 22441da177e4SLinus Torvalds info->flags |= VM_LOCKED; 224589e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 22461da177e4SLinus Torvalds } 22471da177e4SLinus Torvalds if (!lock && (info->flags & VM_LOCKED) && user) { 22481da177e4SLinus Torvalds user_shm_unlock(inode->i_size, user); 22491da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 225089e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 22511da177e4SLinus Torvalds } 22521da177e4SLinus Torvalds retval = 0; 225389e004eaSLee Schermerhorn 22541da177e4SLinus Torvalds out_nomem: 22551da177e4SLinus Torvalds return retval; 22561da177e4SLinus Torvalds } 22571da177e4SLinus Torvalds 22589b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 22591da177e4SLinus Torvalds { 2260ab3948f5SJoel Fernandes (Google) struct shmem_inode_info *info = SHMEM_I(file_inode(file)); 2261ab3948f5SJoel Fernandes (Google) 2262ab3948f5SJoel Fernandes (Google) if (info->seals & F_SEAL_FUTURE_WRITE) { 2263ab3948f5SJoel Fernandes (Google) /* 2264ab3948f5SJoel Fernandes (Google) * New PROT_WRITE and MAP_SHARED mmaps are not allowed when 2265ab3948f5SJoel Fernandes (Google) * "future write" seal active. 2266ab3948f5SJoel Fernandes (Google) */ 2267ab3948f5SJoel Fernandes (Google) if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) 2268ab3948f5SJoel Fernandes (Google) return -EPERM; 2269ab3948f5SJoel Fernandes (Google) 2270ab3948f5SJoel Fernandes (Google) /* 227105d35110SNicolas Geoffray * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as 227205d35110SNicolas Geoffray * MAP_SHARED and read-only, take care to not allow mprotect to 227305d35110SNicolas Geoffray * revert protections on such mappings. Do this only for shared 227405d35110SNicolas Geoffray * mappings. For private mappings, don't need to mask 227505d35110SNicolas Geoffray * VM_MAYWRITE as we still want them to be COW-writable. 2276ab3948f5SJoel Fernandes (Google) */ 227705d35110SNicolas Geoffray if (vma->vm_flags & VM_SHARED) 2278ab3948f5SJoel Fernandes (Google) vma->vm_flags &= ~(VM_MAYWRITE); 2279ab3948f5SJoel Fernandes (Google) } 2280ab3948f5SJoel Fernandes (Google) 228151b0bff2SCatalin Marinas /* arm64 - allow memory tagging on RAM-based files */ 228251b0bff2SCatalin Marinas vma->vm_flags |= VM_MTE_ALLOWED; 228351b0bff2SCatalin Marinas 22841da177e4SLinus Torvalds file_accessed(file); 22851da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2286396bcc52SMatthew Wilcox (Oracle) if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 2287f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2288f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 2289f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 2290f3f0e1d2SKirill A. Shutemov } 22911da177e4SLinus Torvalds return 0; 22921da177e4SLinus Torvalds } 22931da177e4SLinus Torvalds 2294454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 229509208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 22961da177e4SLinus Torvalds { 22971da177e4SLinus Torvalds struct inode *inode; 22981da177e4SLinus Torvalds struct shmem_inode_info *info; 22991da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2300e809d5f0SChris Down ino_t ino; 23011da177e4SLinus Torvalds 2302e809d5f0SChris Down if (shmem_reserve_inode(sb, &ino)) 23031da177e4SLinus Torvalds return NULL; 23041da177e4SLinus Torvalds 23051da177e4SLinus Torvalds inode = new_inode(sb); 23061da177e4SLinus Torvalds if (inode) { 2307e809d5f0SChris Down inode->i_ino = ino; 230821cb47beSChristian Brauner inode_init_owner(&init_user_ns, inode, dir, mode); 23091da177e4SLinus Torvalds inode->i_blocks = 0; 2310078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 231146c9a946SArnd Bergmann inode->i_generation = prandom_u32(); 23121da177e4SLinus Torvalds info = SHMEM_I(inode); 23131da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 23141da177e4SLinus Torvalds spin_lock_init(&info->lock); 2315af53d3e9SHugh Dickins atomic_set(&info->stop_eviction, 0); 231640e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 23170b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2318779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 23191da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 232038f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 232172c04902SAl Viro cache_no_acl(inode); 23221da177e4SLinus Torvalds 23231da177e4SLinus Torvalds switch (mode & S_IFMT) { 23241da177e4SLinus Torvalds default: 232539f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 23261da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 23271da177e4SLinus Torvalds break; 23281da177e4SLinus Torvalds case S_IFREG: 232914fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 23301da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 23311da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 233271fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 233371fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 23341da177e4SLinus Torvalds break; 23351da177e4SLinus Torvalds case S_IFDIR: 2336d8c76e6fSDave Hansen inc_nlink(inode); 23371da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 23381da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 23391da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 23401da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 23411da177e4SLinus Torvalds break; 23421da177e4SLinus Torvalds case S_IFLNK: 23431da177e4SLinus Torvalds /* 23441da177e4SLinus Torvalds * Must not load anything in the rbtree, 23451da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 23461da177e4SLinus Torvalds */ 234771fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 23481da177e4SLinus Torvalds break; 23491da177e4SLinus Torvalds } 2350b45d71fbSJoel Fernandes (Google) 2351b45d71fbSJoel Fernandes (Google) lockdep_annotate_inode_mutex_key(inode); 23525b04c689SPavel Emelyanov } else 23535b04c689SPavel Emelyanov shmem_free_inode(sb); 23541da177e4SLinus Torvalds return inode; 23551da177e4SLinus Torvalds } 23561da177e4SLinus Torvalds 23578d103963SMike Rapoport static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 23584c27fe4cSMike Rapoport pmd_t *dst_pmd, 23594c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 23604c27fe4cSMike Rapoport unsigned long dst_addr, 23614c27fe4cSMike Rapoport unsigned long src_addr, 23628d103963SMike Rapoport bool zeropage, 23634c27fe4cSMike Rapoport struct page **pagep) 23644c27fe4cSMike Rapoport { 23654c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 23664c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 23674c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 23684c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 23694c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 23704c27fe4cSMike Rapoport spinlock_t *ptl; 23714c27fe4cSMike Rapoport void *page_kaddr; 23724c27fe4cSMike Rapoport struct page *page; 23734c27fe4cSMike Rapoport pte_t _dst_pte, *dst_pte; 23744c27fe4cSMike Rapoport int ret; 2375e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 23764c27fe4cSMike Rapoport 23774c27fe4cSMike Rapoport ret = -ENOMEM; 23780f079694SMike Rapoport if (!shmem_inode_acct_block(inode, 1)) 23794c27fe4cSMike Rapoport goto out; 23804c27fe4cSMike Rapoport 2381cb658a45SAndrea Arcangeli if (!*pagep) { 23824c27fe4cSMike Rapoport page = shmem_alloc_page(gfp, info, pgoff); 23834c27fe4cSMike Rapoport if (!page) 23840f079694SMike Rapoport goto out_unacct_blocks; 23854c27fe4cSMike Rapoport 23868d103963SMike Rapoport if (!zeropage) { /* mcopy_atomic */ 23874c27fe4cSMike Rapoport page_kaddr = kmap_atomic(page); 23888d103963SMike Rapoport ret = copy_from_user(page_kaddr, 23898d103963SMike Rapoport (const void __user *)src_addr, 23904c27fe4cSMike Rapoport PAGE_SIZE); 23914c27fe4cSMike Rapoport kunmap_atomic(page_kaddr); 23924c27fe4cSMike Rapoport 2393c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 23944c27fe4cSMike Rapoport if (unlikely(ret)) { 23954c27fe4cSMike Rapoport *pagep = page; 23960f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 23974c27fe4cSMike Rapoport /* don't free the page */ 23989e368259SAndrea Arcangeli return -ENOENT; 23994c27fe4cSMike Rapoport } 24008d103963SMike Rapoport } else { /* mfill_zeropage_atomic */ 24018d103963SMike Rapoport clear_highpage(page); 24028d103963SMike Rapoport } 24034c27fe4cSMike Rapoport } else { 24044c27fe4cSMike Rapoport page = *pagep; 24054c27fe4cSMike Rapoport *pagep = NULL; 24064c27fe4cSMike Rapoport } 24074c27fe4cSMike Rapoport 24089cc90c66SAndrea Arcangeli VM_BUG_ON(PageLocked(page) || PageSwapBacked(page)); 24099cc90c66SAndrea Arcangeli __SetPageLocked(page); 24109cc90c66SAndrea Arcangeli __SetPageSwapBacked(page); 2411a425d358SAndrea Arcangeli __SetPageUptodate(page); 24129cc90c66SAndrea Arcangeli 2413e2a50c1fSAndrea Arcangeli ret = -EFAULT; 2414e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 2415e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2416e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 2417e2a50c1fSAndrea Arcangeli goto out_release; 2418e2a50c1fSAndrea Arcangeli 24193fea5a49SJohannes Weiner ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 24203fea5a49SJohannes Weiner gfp & GFP_RECLAIM_MASK, dst_mm); 24214c27fe4cSMike Rapoport if (ret) 24224c27fe4cSMike Rapoport goto out_release; 24234c27fe4cSMike Rapoport 24244c27fe4cSMike Rapoport _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 24254c27fe4cSMike Rapoport if (dst_vma->vm_flags & VM_WRITE) 24264c27fe4cSMike Rapoport _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 2427dcf7fe9dSAndrea Arcangeli else { 2428dcf7fe9dSAndrea Arcangeli /* 2429dcf7fe9dSAndrea Arcangeli * We don't set the pte dirty if the vma has no 2430dcf7fe9dSAndrea Arcangeli * VM_WRITE permission, so mark the page dirty or it 2431dcf7fe9dSAndrea Arcangeli * could be freed from under us. We could do it 2432dcf7fe9dSAndrea Arcangeli * unconditionally before unlock_page(), but doing it 2433dcf7fe9dSAndrea Arcangeli * only if VM_WRITE is not set is faster. 2434dcf7fe9dSAndrea Arcangeli */ 2435dcf7fe9dSAndrea Arcangeli set_page_dirty(page); 2436dcf7fe9dSAndrea Arcangeli } 24374c27fe4cSMike Rapoport 24384c27fe4cSMike Rapoport dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2439e2a50c1fSAndrea Arcangeli 2440e2a50c1fSAndrea Arcangeli ret = -EFAULT; 2441e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2442e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 24433fea5a49SJohannes Weiner goto out_release_unlock; 2444e2a50c1fSAndrea Arcangeli 2445e2a50c1fSAndrea Arcangeli ret = -EEXIST; 24464c27fe4cSMike Rapoport if (!pte_none(*dst_pte)) 24473fea5a49SJohannes Weiner goto out_release_unlock; 24484c27fe4cSMike Rapoport 24496058eaecSJohannes Weiner lru_cache_add(page); 24504c27fe4cSMike Rapoport 245194b7cc01SYang Shi spin_lock_irq(&info->lock); 24524c27fe4cSMike Rapoport info->alloced++; 24534c27fe4cSMike Rapoport inode->i_blocks += BLOCKS_PER_PAGE; 24544c27fe4cSMike Rapoport shmem_recalc_inode(inode); 245594b7cc01SYang Shi spin_unlock_irq(&info->lock); 24564c27fe4cSMike Rapoport 24574c27fe4cSMike Rapoport inc_mm_counter(dst_mm, mm_counter_file(page)); 24584c27fe4cSMike Rapoport page_add_file_rmap(page, false); 24594c27fe4cSMike Rapoport set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 24604c27fe4cSMike Rapoport 24614c27fe4cSMike Rapoport /* No need to invalidate - it was non-present before */ 24624c27fe4cSMike Rapoport update_mmu_cache(dst_vma, dst_addr, dst_pte); 24634c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 2464e2a50c1fSAndrea Arcangeli unlock_page(page); 24654c27fe4cSMike Rapoport ret = 0; 24664c27fe4cSMike Rapoport out: 24674c27fe4cSMike Rapoport return ret; 24683fea5a49SJohannes Weiner out_release_unlock: 24694c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 2470dcf7fe9dSAndrea Arcangeli ClearPageDirty(page); 2471e2a50c1fSAndrea Arcangeli delete_from_page_cache(page); 24724c27fe4cSMike Rapoport out_release: 24739cc90c66SAndrea Arcangeli unlock_page(page); 24744c27fe4cSMike Rapoport put_page(page); 24754c27fe4cSMike Rapoport out_unacct_blocks: 24760f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 24774c27fe4cSMike Rapoport goto out; 24784c27fe4cSMike Rapoport } 24794c27fe4cSMike Rapoport 24808d103963SMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, 24818d103963SMike Rapoport pmd_t *dst_pmd, 24828d103963SMike Rapoport struct vm_area_struct *dst_vma, 24838d103963SMike Rapoport unsigned long dst_addr, 24848d103963SMike Rapoport unsigned long src_addr, 24858d103963SMike Rapoport struct page **pagep) 24868d103963SMike Rapoport { 24878d103963SMike Rapoport return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 24888d103963SMike Rapoport dst_addr, src_addr, false, pagep); 24898d103963SMike Rapoport } 24908d103963SMike Rapoport 24918d103963SMike Rapoport int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, 24928d103963SMike Rapoport pmd_t *dst_pmd, 24938d103963SMike Rapoport struct vm_area_struct *dst_vma, 24948d103963SMike Rapoport unsigned long dst_addr) 24958d103963SMike Rapoport { 24968d103963SMike Rapoport struct page *page = NULL; 24978d103963SMike Rapoport 24988d103963SMike Rapoport return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 24998d103963SMike Rapoport dst_addr, 0, true, &page); 25008d103963SMike Rapoport } 25018d103963SMike Rapoport 25021da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 250392e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 250469f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 25051da177e4SLinus Torvalds 25066d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR 25076d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 25086d9d88d0SJarkko Sakkinen #else 25096d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL 25106d9d88d0SJarkko Sakkinen #endif 25116d9d88d0SJarkko Sakkinen 25121da177e4SLinus Torvalds static int 2513800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 2514800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 2515800d15a5SNick Piggin struct page **pagep, void **fsdata) 25161da177e4SLinus Torvalds { 2517800d15a5SNick Piggin struct inode *inode = mapping->host; 251840e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 251909cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 252040e041a2SDavid Herrmann 252140e041a2SDavid Herrmann /* i_mutex is held by caller */ 2522ab3948f5SJoel Fernandes (Google) if (unlikely(info->seals & (F_SEAL_GROW | 2523ab3948f5SJoel Fernandes (Google) F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 2524ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 252540e041a2SDavid Herrmann return -EPERM; 252640e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 252740e041a2SDavid Herrmann return -EPERM; 252840e041a2SDavid Herrmann } 252940e041a2SDavid Herrmann 25309e18eb29SAndres Lagar-Cavilla return shmem_getpage(inode, index, pagep, SGP_WRITE); 2531800d15a5SNick Piggin } 2532800d15a5SNick Piggin 2533800d15a5SNick Piggin static int 2534800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2535800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2536800d15a5SNick Piggin struct page *page, void *fsdata) 2537800d15a5SNick Piggin { 2538800d15a5SNick Piggin struct inode *inode = mapping->host; 2539800d15a5SNick Piggin 2540800d15a5SNick Piggin if (pos + copied > inode->i_size) 2541800d15a5SNick Piggin i_size_write(inode, pos + copied); 2542800d15a5SNick Piggin 2543ec9516fbSHugh Dickins if (!PageUptodate(page)) { 2544800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 2545800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 2546800d8c63SKirill A. Shutemov int i; 2547800d8c63SKirill A. Shutemov 2548800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2549800d8c63SKirill A. Shutemov if (head + i == page) 2550800d8c63SKirill A. Shutemov continue; 2551800d8c63SKirill A. Shutemov clear_highpage(head + i); 2552800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 2553800d8c63SKirill A. Shutemov } 2554800d8c63SKirill A. Shutemov } 255509cbfeafSKirill A. Shutemov if (copied < PAGE_SIZE) { 255609cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2557ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 255809cbfeafSKirill A. Shutemov from + copied, PAGE_SIZE); 2559ec9516fbSHugh Dickins } 2560800d8c63SKirill A. Shutemov SetPageUptodate(head); 2561ec9516fbSHugh Dickins } 2562d3602444SHugh Dickins set_page_dirty(page); 25636746aff7SWu Fengguang unlock_page(page); 256409cbfeafSKirill A. Shutemov put_page(page); 2565d3602444SHugh Dickins 2566800d15a5SNick Piggin return copied; 25671da177e4SLinus Torvalds } 25681da177e4SLinus Torvalds 25692ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 25701da177e4SLinus Torvalds { 25716e58e79dSAl Viro struct file *file = iocb->ki_filp; 25726e58e79dSAl Viro struct inode *inode = file_inode(file); 25731da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 257441ffe5d5SHugh Dickins pgoff_t index; 257541ffe5d5SHugh Dickins unsigned long offset; 2576a0ee5ec5SHugh Dickins enum sgp_type sgp = SGP_READ; 2577f7c1d074SGeert Uytterhoeven int error = 0; 2578cb66a7a1SAl Viro ssize_t retval = 0; 25796e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2580a0ee5ec5SHugh Dickins 2581a0ee5ec5SHugh Dickins /* 2582a0ee5ec5SHugh Dickins * Might this read be for a stacking filesystem? Then when reading 2583a0ee5ec5SHugh Dickins * holes of a sparse file, we actually need to allocate those pages, 2584a0ee5ec5SHugh Dickins * and even mark them dirty, so it cannot exceed the max_blocks limit. 2585a0ee5ec5SHugh Dickins */ 2586777eda2cSAl Viro if (!iter_is_iovec(to)) 258775edd345SHugh Dickins sgp = SGP_CACHE; 25881da177e4SLinus Torvalds 258909cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 259009cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 25911da177e4SLinus Torvalds 25921da177e4SLinus Torvalds for (;;) { 25931da177e4SLinus Torvalds struct page *page = NULL; 259441ffe5d5SHugh Dickins pgoff_t end_index; 259541ffe5d5SHugh Dickins unsigned long nr, ret; 25961da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 25971da177e4SLinus Torvalds 259809cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 25991da177e4SLinus Torvalds if (index > end_index) 26001da177e4SLinus Torvalds break; 26011da177e4SLinus Torvalds if (index == end_index) { 260209cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 26031da177e4SLinus Torvalds if (nr <= offset) 26041da177e4SLinus Torvalds break; 26051da177e4SLinus Torvalds } 26061da177e4SLinus Torvalds 26079e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, sgp); 26086e58e79dSAl Viro if (error) { 26096e58e79dSAl Viro if (error == -EINVAL) 26106e58e79dSAl Viro error = 0; 26111da177e4SLinus Torvalds break; 26121da177e4SLinus Torvalds } 261375edd345SHugh Dickins if (page) { 261475edd345SHugh Dickins if (sgp == SGP_CACHE) 261575edd345SHugh Dickins set_page_dirty(page); 2616d3602444SHugh Dickins unlock_page(page); 261775edd345SHugh Dickins } 26181da177e4SLinus Torvalds 26191da177e4SLinus Torvalds /* 26201da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 26211b1dcc1bSJes Sorensen * are called without i_mutex protection against truncate 26221da177e4SLinus Torvalds */ 262309cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 26241da177e4SLinus Torvalds i_size = i_size_read(inode); 262509cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 26261da177e4SLinus Torvalds if (index == end_index) { 262709cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 26281da177e4SLinus Torvalds if (nr <= offset) { 26291da177e4SLinus Torvalds if (page) 263009cbfeafSKirill A. Shutemov put_page(page); 26311da177e4SLinus Torvalds break; 26321da177e4SLinus Torvalds } 26331da177e4SLinus Torvalds } 26341da177e4SLinus Torvalds nr -= offset; 26351da177e4SLinus Torvalds 26361da177e4SLinus Torvalds if (page) { 26371da177e4SLinus Torvalds /* 26381da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 26391da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 26401da177e4SLinus Torvalds * before reading the page on the kernel side. 26411da177e4SLinus Torvalds */ 26421da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 26431da177e4SLinus Torvalds flush_dcache_page(page); 26441da177e4SLinus Torvalds /* 26451da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 26461da177e4SLinus Torvalds */ 26471da177e4SLinus Torvalds if (!offset) 26481da177e4SLinus Torvalds mark_page_accessed(page); 2649b5810039SNick Piggin } else { 26501da177e4SLinus Torvalds page = ZERO_PAGE(0); 265109cbfeafSKirill A. Shutemov get_page(page); 2652b5810039SNick Piggin } 26531da177e4SLinus Torvalds 26541da177e4SLinus Torvalds /* 26551da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 26561da177e4SLinus Torvalds * now we can copy it to user space... 26571da177e4SLinus Torvalds */ 26582ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 26596e58e79dSAl Viro retval += ret; 26601da177e4SLinus Torvalds offset += ret; 266109cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 266209cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 26631da177e4SLinus Torvalds 266409cbfeafSKirill A. Shutemov put_page(page); 26652ba5bbedSAl Viro if (!iov_iter_count(to)) 26661da177e4SLinus Torvalds break; 26676e58e79dSAl Viro if (ret < nr) { 26686e58e79dSAl Viro error = -EFAULT; 26696e58e79dSAl Viro break; 26706e58e79dSAl Viro } 26711da177e4SLinus Torvalds cond_resched(); 26721da177e4SLinus Torvalds } 26731da177e4SLinus Torvalds 267409cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 26756e58e79dSAl Viro file_accessed(file); 26766e58e79dSAl Viro return retval ? retval : error; 26771da177e4SLinus Torvalds } 26781da177e4SLinus Torvalds 2679220f2ac9SHugh Dickins /* 26807f4446eeSMatthew Wilcox * llseek SEEK_DATA or SEEK_HOLE through the page cache. 2681220f2ac9SHugh Dickins */ 2682220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 2683965c8e59SAndrew Morton pgoff_t index, pgoff_t end, int whence) 2684220f2ac9SHugh Dickins { 2685220f2ac9SHugh Dickins struct page *page; 2686220f2ac9SHugh Dickins struct pagevec pvec; 2687220f2ac9SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 2688220f2ac9SHugh Dickins bool done = false; 2689220f2ac9SHugh Dickins int i; 2690220f2ac9SHugh Dickins 269186679820SMel Gorman pagevec_init(&pvec); 2692220f2ac9SHugh Dickins pvec.nr = 1; /* start small: we may be there already */ 2693220f2ac9SHugh Dickins while (!done) { 26940cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 2695220f2ac9SHugh Dickins pvec.nr, pvec.pages, indices); 2696220f2ac9SHugh Dickins if (!pvec.nr) { 2697965c8e59SAndrew Morton if (whence == SEEK_DATA) 2698220f2ac9SHugh Dickins index = end; 2699220f2ac9SHugh Dickins break; 2700220f2ac9SHugh Dickins } 2701220f2ac9SHugh Dickins for (i = 0; i < pvec.nr; i++, index++) { 2702220f2ac9SHugh Dickins if (index < indices[i]) { 2703965c8e59SAndrew Morton if (whence == SEEK_HOLE) { 2704220f2ac9SHugh Dickins done = true; 2705220f2ac9SHugh Dickins break; 2706220f2ac9SHugh Dickins } 2707220f2ac9SHugh Dickins index = indices[i]; 2708220f2ac9SHugh Dickins } 2709220f2ac9SHugh Dickins page = pvec.pages[i]; 27103159f943SMatthew Wilcox if (page && !xa_is_value(page)) { 2711220f2ac9SHugh Dickins if (!PageUptodate(page)) 2712220f2ac9SHugh Dickins page = NULL; 2713220f2ac9SHugh Dickins } 2714220f2ac9SHugh Dickins if (index >= end || 2715965c8e59SAndrew Morton (page && whence == SEEK_DATA) || 2716965c8e59SAndrew Morton (!page && whence == SEEK_HOLE)) { 2717220f2ac9SHugh Dickins done = true; 2718220f2ac9SHugh Dickins break; 2719220f2ac9SHugh Dickins } 2720220f2ac9SHugh Dickins } 27210cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 2722220f2ac9SHugh Dickins pagevec_release(&pvec); 2723220f2ac9SHugh Dickins pvec.nr = PAGEVEC_SIZE; 2724220f2ac9SHugh Dickins cond_resched(); 2725220f2ac9SHugh Dickins } 2726220f2ac9SHugh Dickins return index; 2727220f2ac9SHugh Dickins } 2728220f2ac9SHugh Dickins 2729965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2730220f2ac9SHugh Dickins { 2731220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2732220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2733220f2ac9SHugh Dickins pgoff_t start, end; 2734220f2ac9SHugh Dickins loff_t new_offset; 2735220f2ac9SHugh Dickins 2736965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2737965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2738220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 27395955102cSAl Viro inode_lock(inode); 2740220f2ac9SHugh Dickins /* We're holding i_mutex so we can access i_size directly */ 2741220f2ac9SHugh Dickins 27421a413646SYufen Yu if (offset < 0 || offset >= inode->i_size) 2743220f2ac9SHugh Dickins offset = -ENXIO; 2744220f2ac9SHugh Dickins else { 274509cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 274609cbfeafSKirill A. Shutemov end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2747965c8e59SAndrew Morton new_offset = shmem_seek_hole_data(mapping, start, end, whence); 274809cbfeafSKirill A. Shutemov new_offset <<= PAGE_SHIFT; 2749220f2ac9SHugh Dickins if (new_offset > offset) { 2750220f2ac9SHugh Dickins if (new_offset < inode->i_size) 2751220f2ac9SHugh Dickins offset = new_offset; 2752965c8e59SAndrew Morton else if (whence == SEEK_DATA) 2753220f2ac9SHugh Dickins offset = -ENXIO; 2754220f2ac9SHugh Dickins else 2755220f2ac9SHugh Dickins offset = inode->i_size; 2756220f2ac9SHugh Dickins } 2757220f2ac9SHugh Dickins } 2758220f2ac9SHugh Dickins 2759387aae6fSHugh Dickins if (offset >= 0) 276046a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 27615955102cSAl Viro inode_unlock(inode); 2762220f2ac9SHugh Dickins return offset; 2763220f2ac9SHugh Dickins } 2764220f2ac9SHugh Dickins 276583e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 276683e4fa9cSHugh Dickins loff_t len) 276783e4fa9cSHugh Dickins { 2768496ad9aaSAl Viro struct inode *inode = file_inode(file); 2769e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 277040e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 27711aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2772e2d12e22SHugh Dickins pgoff_t start, index, end; 2773e2d12e22SHugh Dickins int error; 277483e4fa9cSHugh Dickins 277513ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 277613ace4d0SHugh Dickins return -EOPNOTSUPP; 277713ace4d0SHugh Dickins 27785955102cSAl Viro inode_lock(inode); 277983e4fa9cSHugh Dickins 278083e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 278183e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 278283e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 278383e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 27848e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 278583e4fa9cSHugh Dickins 278640e041a2SDavid Herrmann /* protected by i_mutex */ 2787ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 278840e041a2SDavid Herrmann error = -EPERM; 278940e041a2SDavid Herrmann goto out; 279040e041a2SDavid Herrmann } 279140e041a2SDavid Herrmann 27928e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2793aa71ecd8SChen Jun shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 2794f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2795f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2796f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2797f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2798f00cdc6dSHugh Dickins 279983e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 280083e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 280183e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 280283e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 280383e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 28048e205f77SHugh Dickins 28058e205f77SHugh Dickins spin_lock(&inode->i_lock); 28068e205f77SHugh Dickins inode->i_private = NULL; 28078e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 28082055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 28098e205f77SHugh Dickins spin_unlock(&inode->i_lock); 281083e4fa9cSHugh Dickins error = 0; 28118e205f77SHugh Dickins goto out; 281283e4fa9cSHugh Dickins } 281383e4fa9cSHugh Dickins 2814e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2815e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2816e2d12e22SHugh Dickins if (error) 2817e2d12e22SHugh Dickins goto out; 2818e2d12e22SHugh Dickins 281940e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 282040e041a2SDavid Herrmann error = -EPERM; 282140e041a2SDavid Herrmann goto out; 282240e041a2SDavid Herrmann } 282340e041a2SDavid Herrmann 282409cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 282509cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2826e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2827e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2828e2d12e22SHugh Dickins error = -ENOSPC; 2829e2d12e22SHugh Dickins goto out; 2830e2d12e22SHugh Dickins } 2831e2d12e22SHugh Dickins 28328e205f77SHugh Dickins shmem_falloc.waitq = NULL; 28331aac1400SHugh Dickins shmem_falloc.start = start; 28341aac1400SHugh Dickins shmem_falloc.next = start; 28351aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 28361aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 28371aac1400SHugh Dickins spin_lock(&inode->i_lock); 28381aac1400SHugh Dickins inode->i_private = &shmem_falloc; 28391aac1400SHugh Dickins spin_unlock(&inode->i_lock); 28401aac1400SHugh Dickins 2841e2d12e22SHugh Dickins for (index = start; index < end; index++) { 2842e2d12e22SHugh Dickins struct page *page; 2843e2d12e22SHugh Dickins 2844e2d12e22SHugh Dickins /* 2845e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2846e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2847e2d12e22SHugh Dickins */ 2848e2d12e22SHugh Dickins if (signal_pending(current)) 2849e2d12e22SHugh Dickins error = -EINTR; 28501aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 28511aac1400SHugh Dickins error = -ENOMEM; 2852e2d12e22SHugh Dickins else 28539e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2854e2d12e22SHugh Dickins if (error) { 28551635f6a7SHugh Dickins /* Remove the !PageUptodate pages we added */ 28567f556567SHugh Dickins if (index > start) { 28571635f6a7SHugh Dickins shmem_undo_range(inode, 285809cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2859b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 28607f556567SHugh Dickins } 28611aac1400SHugh Dickins goto undone; 2862e2d12e22SHugh Dickins } 2863e2d12e22SHugh Dickins 2864e2d12e22SHugh Dickins /* 28651aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 28661aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 28671aac1400SHugh Dickins */ 28681aac1400SHugh Dickins shmem_falloc.next++; 28691aac1400SHugh Dickins if (!PageUptodate(page)) 28701aac1400SHugh Dickins shmem_falloc.nr_falloced++; 28711aac1400SHugh Dickins 28721aac1400SHugh Dickins /* 28731635f6a7SHugh Dickins * If !PageUptodate, leave it that way so that freeable pages 28741635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 28751635f6a7SHugh Dickins * But set_page_dirty so that memory pressure will swap rather 2876e2d12e22SHugh Dickins * than free the pages we are allocating (and SGP_CACHE pages 2877e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2878e2d12e22SHugh Dickins */ 2879e2d12e22SHugh Dickins set_page_dirty(page); 2880e2d12e22SHugh Dickins unlock_page(page); 288109cbfeafSKirill A. Shutemov put_page(page); 2882e2d12e22SHugh Dickins cond_resched(); 2883e2d12e22SHugh Dickins } 2884e2d12e22SHugh Dickins 2885e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2886e2d12e22SHugh Dickins i_size_write(inode, offset + len); 2887078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 28881aac1400SHugh Dickins undone: 28891aac1400SHugh Dickins spin_lock(&inode->i_lock); 28901aac1400SHugh Dickins inode->i_private = NULL; 28911aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2892e2d12e22SHugh Dickins out: 28935955102cSAl Viro inode_unlock(inode); 289483e4fa9cSHugh Dickins return error; 289583e4fa9cSHugh Dickins } 289683e4fa9cSHugh Dickins 2897726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 28981da177e4SLinus Torvalds { 2899726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 29001da177e4SLinus Torvalds 29011da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 290209cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 29031da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 29040edd73b3SHugh Dickins if (sbinfo->max_blocks) { 29051da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 290641ffe5d5SHugh Dickins buf->f_bavail = 290741ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 290841ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 29090edd73b3SHugh Dickins } 29100edd73b3SHugh Dickins if (sbinfo->max_inodes) { 29111da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 29121da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 29131da177e4SLinus Torvalds } 29141da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 29151da177e4SLinus Torvalds return 0; 29161da177e4SLinus Torvalds } 29171da177e4SLinus Torvalds 29181da177e4SLinus Torvalds /* 29191da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 29201da177e4SLinus Torvalds */ 29211da177e4SLinus Torvalds static int 2922*549c7297SChristian Brauner shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir, 2923*549c7297SChristian Brauner struct dentry *dentry, umode_t mode, dev_t dev) 29241da177e4SLinus Torvalds { 29250b0a0806SHugh Dickins struct inode *inode; 29261da177e4SLinus Torvalds int error = -ENOSPC; 29271da177e4SLinus Torvalds 2928454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 29291da177e4SLinus Torvalds if (inode) { 2930feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2931feda821eSChristoph Hellwig if (error) 2932feda821eSChristoph Hellwig goto out_iput; 29332a7dba39SEric Paris error = security_inode_init_security(inode, dir, 29349d8f13baSMimi Zohar &dentry->d_name, 29356d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2936feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2937feda821eSChristoph Hellwig goto out_iput; 293837ec43cdSMimi Zohar 2939718deb6bSAl Viro error = 0; 29401da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2941078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 29421da177e4SLinus Torvalds d_instantiate(dentry, inode); 29431da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 29441da177e4SLinus Torvalds } 29451da177e4SLinus Torvalds return error; 2946feda821eSChristoph Hellwig out_iput: 2947feda821eSChristoph Hellwig iput(inode); 2948feda821eSChristoph Hellwig return error; 29491da177e4SLinus Torvalds } 29501da177e4SLinus Torvalds 295160545d0dSAl Viro static int 2952*549c7297SChristian Brauner shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, 2953*549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 295460545d0dSAl Viro { 295560545d0dSAl Viro struct inode *inode; 295660545d0dSAl Viro int error = -ENOSPC; 295760545d0dSAl Viro 295860545d0dSAl Viro inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 295960545d0dSAl Viro if (inode) { 296060545d0dSAl Viro error = security_inode_init_security(inode, dir, 296160545d0dSAl Viro NULL, 296260545d0dSAl Viro shmem_initxattrs, NULL); 2963feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2964feda821eSChristoph Hellwig goto out_iput; 2965feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2966feda821eSChristoph Hellwig if (error) 2967feda821eSChristoph Hellwig goto out_iput; 296860545d0dSAl Viro d_tmpfile(dentry, inode); 296960545d0dSAl Viro } 297060545d0dSAl Viro return error; 2971feda821eSChristoph Hellwig out_iput: 2972feda821eSChristoph Hellwig iput(inode); 2973feda821eSChristoph Hellwig return error; 297460545d0dSAl Viro } 297560545d0dSAl Viro 2976*549c7297SChristian Brauner static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 2977*549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 29781da177e4SLinus Torvalds { 29791da177e4SLinus Torvalds int error; 29801da177e4SLinus Torvalds 2981*549c7297SChristian Brauner if ((error = shmem_mknod(&init_user_ns, dir, dentry, 2982*549c7297SChristian Brauner mode | S_IFDIR, 0))) 29831da177e4SLinus Torvalds return error; 2984d8c76e6fSDave Hansen inc_nlink(dir); 29851da177e4SLinus Torvalds return 0; 29861da177e4SLinus Torvalds } 29871da177e4SLinus Torvalds 2988*549c7297SChristian Brauner static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir, 2989*549c7297SChristian Brauner struct dentry *dentry, umode_t mode, bool excl) 29901da177e4SLinus Torvalds { 2991*549c7297SChristian Brauner return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 29921da177e4SLinus Torvalds } 29931da177e4SLinus Torvalds 29941da177e4SLinus Torvalds /* 29951da177e4SLinus Torvalds * Link a file.. 29961da177e4SLinus Torvalds */ 29971da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 29981da177e4SLinus Torvalds { 299975c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 300029b00e60SDarrick J. Wong int ret = 0; 30011da177e4SLinus Torvalds 30021da177e4SLinus Torvalds /* 30031da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 30041da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 30051da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 30061062af92SDarrick J. Wong * But if an O_TMPFILE file is linked into the tmpfs, the 30071062af92SDarrick J. Wong * first link must skip that, to get the accounting right. 30081da177e4SLinus Torvalds */ 30091062af92SDarrick J. Wong if (inode->i_nlink) { 3010e809d5f0SChris Down ret = shmem_reserve_inode(inode->i_sb, NULL); 30115b04c689SPavel Emelyanov if (ret) 30125b04c689SPavel Emelyanov goto out; 30131062af92SDarrick J. Wong } 30141da177e4SLinus Torvalds 30151da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3016078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3017d8c76e6fSDave Hansen inc_nlink(inode); 30187de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 30191da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 30201da177e4SLinus Torvalds d_instantiate(dentry, inode); 30215b04c689SPavel Emelyanov out: 30225b04c689SPavel Emelyanov return ret; 30231da177e4SLinus Torvalds } 30241da177e4SLinus Torvalds 30251da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 30261da177e4SLinus Torvalds { 302775c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 30281da177e4SLinus Torvalds 30295b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 30305b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 30311da177e4SLinus Torvalds 30321da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 3033078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 30349a53c3a7SDave Hansen drop_nlink(inode); 30351da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 30361da177e4SLinus Torvalds return 0; 30371da177e4SLinus Torvalds } 30381da177e4SLinus Torvalds 30391da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 30401da177e4SLinus Torvalds { 30411da177e4SLinus Torvalds if (!simple_empty(dentry)) 30421da177e4SLinus Torvalds return -ENOTEMPTY; 30431da177e4SLinus Torvalds 304475c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 30459a53c3a7SDave Hansen drop_nlink(dir); 30461da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 30471da177e4SLinus Torvalds } 30481da177e4SLinus Torvalds 304937456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 305037456771SMiklos Szeredi { 3051e36cb0b8SDavid Howells bool old_is_dir = d_is_dir(old_dentry); 3052e36cb0b8SDavid Howells bool new_is_dir = d_is_dir(new_dentry); 305337456771SMiklos Szeredi 305437456771SMiklos Szeredi if (old_dir != new_dir && old_is_dir != new_is_dir) { 305537456771SMiklos Szeredi if (old_is_dir) { 305637456771SMiklos Szeredi drop_nlink(old_dir); 305737456771SMiklos Szeredi inc_nlink(new_dir); 305837456771SMiklos Szeredi } else { 305937456771SMiklos Szeredi drop_nlink(new_dir); 306037456771SMiklos Szeredi inc_nlink(old_dir); 306137456771SMiklos Szeredi } 306237456771SMiklos Szeredi } 306337456771SMiklos Szeredi old_dir->i_ctime = old_dir->i_mtime = 306437456771SMiklos Szeredi new_dir->i_ctime = new_dir->i_mtime = 306575c3cfa8SDavid Howells d_inode(old_dentry)->i_ctime = 3066078cd827SDeepa Dinamani d_inode(new_dentry)->i_ctime = current_time(old_dir); 306737456771SMiklos Szeredi 306837456771SMiklos Szeredi return 0; 306937456771SMiklos Szeredi } 307037456771SMiklos Szeredi 3071*549c7297SChristian Brauner static int shmem_whiteout(struct user_namespace *mnt_userns, 3072*549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry) 307346fdb794SMiklos Szeredi { 307446fdb794SMiklos Szeredi struct dentry *whiteout; 307546fdb794SMiklos Szeredi int error; 307646fdb794SMiklos Szeredi 307746fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 307846fdb794SMiklos Szeredi if (!whiteout) 307946fdb794SMiklos Szeredi return -ENOMEM; 308046fdb794SMiklos Szeredi 3081*549c7297SChristian Brauner error = shmem_mknod(&init_user_ns, old_dir, whiteout, 308246fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 308346fdb794SMiklos Szeredi dput(whiteout); 308446fdb794SMiklos Szeredi if (error) 308546fdb794SMiklos Szeredi return error; 308646fdb794SMiklos Szeredi 308746fdb794SMiklos Szeredi /* 308846fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 308946fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 309046fdb794SMiklos Szeredi * 309146fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 309246fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 309346fdb794SMiklos Szeredi */ 309446fdb794SMiklos Szeredi d_rehash(whiteout); 309546fdb794SMiklos Szeredi return 0; 309646fdb794SMiklos Szeredi } 309746fdb794SMiklos Szeredi 30981da177e4SLinus Torvalds /* 30991da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 31001da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 31011da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 31021da177e4SLinus Torvalds * gets overwritten. 31031da177e4SLinus Torvalds */ 3104*549c7297SChristian Brauner static int shmem_rename2(struct user_namespace *mnt_userns, 3105*549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry, 3106*549c7297SChristian Brauner struct inode *new_dir, struct dentry *new_dentry, 3107*549c7297SChristian Brauner unsigned int flags) 31081da177e4SLinus Torvalds { 310975c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 31101da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 31111da177e4SLinus Torvalds 311246fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 31133b69ff51SMiklos Szeredi return -EINVAL; 31143b69ff51SMiklos Szeredi 311537456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 311637456771SMiklos Szeredi return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 311737456771SMiklos Szeredi 31181da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 31191da177e4SLinus Torvalds return -ENOTEMPTY; 31201da177e4SLinus Torvalds 312146fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 312246fdb794SMiklos Szeredi int error; 312346fdb794SMiklos Szeredi 3124*549c7297SChristian Brauner error = shmem_whiteout(&init_user_ns, old_dir, old_dentry); 312546fdb794SMiklos Szeredi if (error) 312646fdb794SMiklos Szeredi return error; 312746fdb794SMiklos Szeredi } 312846fdb794SMiklos Szeredi 312975c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 31301da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 3131b928095bSMiklos Szeredi if (they_are_dirs) { 313275c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 31339a53c3a7SDave Hansen drop_nlink(old_dir); 3134b928095bSMiklos Szeredi } 31351da177e4SLinus Torvalds } else if (they_are_dirs) { 31369a53c3a7SDave Hansen drop_nlink(old_dir); 3137d8c76e6fSDave Hansen inc_nlink(new_dir); 31381da177e4SLinus Torvalds } 31391da177e4SLinus Torvalds 31401da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 31411da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 31421da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 31431da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 3144078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 31451da177e4SLinus Torvalds return 0; 31461da177e4SLinus Torvalds } 31471da177e4SLinus Torvalds 3148*549c7297SChristian Brauner static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir, 3149*549c7297SChristian Brauner struct dentry *dentry, const char *symname) 31501da177e4SLinus Torvalds { 31511da177e4SLinus Torvalds int error; 31521da177e4SLinus Torvalds int len; 31531da177e4SLinus Torvalds struct inode *inode; 31549276aad6SHugh Dickins struct page *page; 31551da177e4SLinus Torvalds 31561da177e4SLinus Torvalds len = strlen(symname) + 1; 315709cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 31581da177e4SLinus Torvalds return -ENAMETOOLONG; 31591da177e4SLinus Torvalds 31600825a6f9SJoe Perches inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 31610825a6f9SJoe Perches VM_NORESERVE); 31621da177e4SLinus Torvalds if (!inode) 31631da177e4SLinus Torvalds return -ENOSPC; 31641da177e4SLinus Torvalds 31659d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 31666d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3167343c3d7fSMateusz Nosek if (error && error != -EOPNOTSUPP) { 3168570bc1c2SStephen Smalley iput(inode); 3169570bc1c2SStephen Smalley return error; 3170570bc1c2SStephen Smalley } 3171570bc1c2SStephen Smalley 31721da177e4SLinus Torvalds inode->i_size = len-1; 317369f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 31743ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 31753ed47db3SAl Viro if (!inode->i_link) { 317669f07ec9SHugh Dickins iput(inode); 317769f07ec9SHugh Dickins return -ENOMEM; 317869f07ec9SHugh Dickins } 317969f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 31801da177e4SLinus Torvalds } else { 3181e8ecde25SAl Viro inode_nohighmem(inode); 31829e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_WRITE); 31831da177e4SLinus Torvalds if (error) { 31841da177e4SLinus Torvalds iput(inode); 31851da177e4SLinus Torvalds return error; 31861da177e4SLinus Torvalds } 318714fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 31881da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 318921fc61c7SAl Viro memcpy(page_address(page), symname, len); 3190ec9516fbSHugh Dickins SetPageUptodate(page); 31911da177e4SLinus Torvalds set_page_dirty(page); 31926746aff7SWu Fengguang unlock_page(page); 319309cbfeafSKirill A. Shutemov put_page(page); 31941da177e4SLinus Torvalds } 31951da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3196078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 31971da177e4SLinus Torvalds d_instantiate(dentry, inode); 31981da177e4SLinus Torvalds dget(dentry); 31991da177e4SLinus Torvalds return 0; 32001da177e4SLinus Torvalds } 32011da177e4SLinus Torvalds 3202fceef393SAl Viro static void shmem_put_link(void *arg) 3203fceef393SAl Viro { 3204fceef393SAl Viro mark_page_accessed(arg); 3205fceef393SAl Viro put_page(arg); 3206fceef393SAl Viro } 3207fceef393SAl Viro 32086b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3209fceef393SAl Viro struct inode *inode, 3210fceef393SAl Viro struct delayed_call *done) 32111da177e4SLinus Torvalds { 32121da177e4SLinus Torvalds struct page *page = NULL; 32136b255391SAl Viro int error; 32146a6c9904SAl Viro if (!dentry) { 32156a6c9904SAl Viro page = find_get_page(inode->i_mapping, 0); 32166a6c9904SAl Viro if (!page) 32176b255391SAl Viro return ERR_PTR(-ECHILD); 32186a6c9904SAl Viro if (!PageUptodate(page)) { 32196a6c9904SAl Viro put_page(page); 32206a6c9904SAl Viro return ERR_PTR(-ECHILD); 32216a6c9904SAl Viro } 32226a6c9904SAl Viro } else { 32239e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_READ); 3224680baacbSAl Viro if (error) 3225680baacbSAl Viro return ERR_PTR(error); 3226d3602444SHugh Dickins unlock_page(page); 32271da177e4SLinus Torvalds } 3228fceef393SAl Viro set_delayed_call(done, shmem_put_link, page); 322921fc61c7SAl Viro return page_address(page); 32301da177e4SLinus Torvalds } 32311da177e4SLinus Torvalds 3232b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3233b09e0fa4SEric Paris /* 3234b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3235b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3236b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3237b09e0fa4SEric Paris * filesystem level, though. 3238b09e0fa4SEric Paris */ 3239b09e0fa4SEric Paris 32406d9d88d0SJarkko Sakkinen /* 32416d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 32426d9d88d0SJarkko Sakkinen */ 32436d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 32446d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 32456d9d88d0SJarkko Sakkinen void *fs_info) 32466d9d88d0SJarkko Sakkinen { 32476d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 32486d9d88d0SJarkko Sakkinen const struct xattr *xattr; 324938f38657SAristeu Rozanski struct simple_xattr *new_xattr; 32506d9d88d0SJarkko Sakkinen size_t len; 32516d9d88d0SJarkko Sakkinen 32526d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 325338f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 32546d9d88d0SJarkko Sakkinen if (!new_xattr) 32556d9d88d0SJarkko Sakkinen return -ENOMEM; 32566d9d88d0SJarkko Sakkinen 32576d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 32586d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 32596d9d88d0SJarkko Sakkinen GFP_KERNEL); 32606d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 32613bef735aSChengguang Xu kvfree(new_xattr); 32626d9d88d0SJarkko Sakkinen return -ENOMEM; 32636d9d88d0SJarkko Sakkinen } 32646d9d88d0SJarkko Sakkinen 32656d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 32666d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 32676d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 32686d9d88d0SJarkko Sakkinen xattr->name, len); 32696d9d88d0SJarkko Sakkinen 327038f38657SAristeu Rozanski simple_xattr_list_add(&info->xattrs, new_xattr); 32716d9d88d0SJarkko Sakkinen } 32726d9d88d0SJarkko Sakkinen 32736d9d88d0SJarkko Sakkinen return 0; 32746d9d88d0SJarkko Sakkinen } 32756d9d88d0SJarkko Sakkinen 3276aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3277b296821aSAl Viro struct dentry *unused, struct inode *inode, 3278b296821aSAl Viro const char *name, void *buffer, size_t size) 3279aa7c5241SAndreas Gruenbacher { 3280b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3281aa7c5241SAndreas Gruenbacher 3282aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3283aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3284aa7c5241SAndreas Gruenbacher } 3285aa7c5241SAndreas Gruenbacher 3286aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 3287e65ce2a5SChristian Brauner struct user_namespace *mnt_userns, 328859301226SAl Viro struct dentry *unused, struct inode *inode, 328959301226SAl Viro const char *name, const void *value, 329059301226SAl Viro size_t size, int flags) 3291aa7c5241SAndreas Gruenbacher { 329259301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3293aa7c5241SAndreas Gruenbacher 3294aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3295a46a2295SDaniel Xu return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); 3296aa7c5241SAndreas Gruenbacher } 3297aa7c5241SAndreas Gruenbacher 3298aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3299aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3300aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3301aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3302aa7c5241SAndreas Gruenbacher }; 3303aa7c5241SAndreas Gruenbacher 3304aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3305aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3306aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3307aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3308aa7c5241SAndreas Gruenbacher }; 3309aa7c5241SAndreas Gruenbacher 3310b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3311b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3312feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3313feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3314b09e0fa4SEric Paris #endif 3315aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3316aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3317b09e0fa4SEric Paris NULL 3318b09e0fa4SEric Paris }; 3319b09e0fa4SEric Paris 3320b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3321b09e0fa4SEric Paris { 332275c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3323786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3324b09e0fa4SEric Paris } 3325b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3326b09e0fa4SEric Paris 332769f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 33286b255391SAl Viro .get_link = simple_get_link, 3329b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3330b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3331b09e0fa4SEric Paris #endif 33321da177e4SLinus Torvalds }; 33331da177e4SLinus Torvalds 333492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 33356b255391SAl Viro .get_link = shmem_get_link, 3336b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3337b09e0fa4SEric Paris .listxattr = shmem_listxattr, 333839f0247dSAndreas Gruenbacher #endif 3339b09e0fa4SEric Paris }; 334039f0247dSAndreas Gruenbacher 334191828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 334291828a40SDavid M. Grimes { 334391828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 334491828a40SDavid M. Grimes } 334591828a40SDavid M. Grimes 334691828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 334791828a40SDavid M. Grimes { 334891828a40SDavid M. Grimes __u32 *fh = vfh; 334991828a40SDavid M. Grimes __u64 inum = fh[2]; 335091828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 335191828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 335291828a40SDavid M. Grimes } 335391828a40SDavid M. Grimes 335412ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */ 335512ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode) 335612ba780dSAmir Goldstein { 335712ba780dSAmir Goldstein struct dentry *alias = d_find_alias(inode); 335812ba780dSAmir Goldstein 335912ba780dSAmir Goldstein return alias ?: d_find_any_alias(inode); 336012ba780dSAmir Goldstein } 336112ba780dSAmir Goldstein 336212ba780dSAmir Goldstein 3363480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3364480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 336591828a40SDavid M. Grimes { 336691828a40SDavid M. Grimes struct inode *inode; 3367480b116cSChristoph Hellwig struct dentry *dentry = NULL; 336835c2a7f4SHugh Dickins u64 inum; 336991828a40SDavid M. Grimes 3370480b116cSChristoph Hellwig if (fh_len < 3) 3371480b116cSChristoph Hellwig return NULL; 3372480b116cSChristoph Hellwig 337335c2a7f4SHugh Dickins inum = fid->raw[2]; 337435c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 337535c2a7f4SHugh Dickins 3376480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3377480b116cSChristoph Hellwig shmem_match, fid->raw); 337891828a40SDavid M. Grimes if (inode) { 337912ba780dSAmir Goldstein dentry = shmem_find_alias(inode); 338091828a40SDavid M. Grimes iput(inode); 338191828a40SDavid M. Grimes } 338291828a40SDavid M. Grimes 3383480b116cSChristoph Hellwig return dentry; 338491828a40SDavid M. Grimes } 338591828a40SDavid M. Grimes 3386b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3387b0b0382bSAl Viro struct inode *parent) 338891828a40SDavid M. Grimes { 33895fe0c237SAneesh Kumar K.V if (*len < 3) { 33905fe0c237SAneesh Kumar K.V *len = 3; 339194e07a75SNamjae Jeon return FILEID_INVALID; 33925fe0c237SAneesh Kumar K.V } 339391828a40SDavid M. Grimes 33941d3382cbSAl Viro if (inode_unhashed(inode)) { 339591828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 339691828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 339791828a40SDavid M. Grimes * time, we need a lock to ensure we only try 339891828a40SDavid M. Grimes * to do it once 339991828a40SDavid M. Grimes */ 340091828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 340191828a40SDavid M. Grimes spin_lock(&lock); 34021d3382cbSAl Viro if (inode_unhashed(inode)) 340391828a40SDavid M. Grimes __insert_inode_hash(inode, 340491828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 340591828a40SDavid M. Grimes spin_unlock(&lock); 340691828a40SDavid M. Grimes } 340791828a40SDavid M. Grimes 340891828a40SDavid M. Grimes fh[0] = inode->i_generation; 340991828a40SDavid M. Grimes fh[1] = inode->i_ino; 341091828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 341191828a40SDavid M. Grimes 341291828a40SDavid M. Grimes *len = 3; 341391828a40SDavid M. Grimes return 1; 341491828a40SDavid M. Grimes } 341591828a40SDavid M. Grimes 341639655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 341791828a40SDavid M. Grimes .get_parent = shmem_get_parent, 341891828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3419480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 342091828a40SDavid M. Grimes }; 342191828a40SDavid M. Grimes 3422626c3920SAl Viro enum shmem_param { 3423626c3920SAl Viro Opt_gid, 3424626c3920SAl Viro Opt_huge, 3425626c3920SAl Viro Opt_mode, 3426626c3920SAl Viro Opt_mpol, 3427626c3920SAl Viro Opt_nr_blocks, 3428626c3920SAl Viro Opt_nr_inodes, 3429626c3920SAl Viro Opt_size, 3430626c3920SAl Viro Opt_uid, 3431ea3271f7SChris Down Opt_inode32, 3432ea3271f7SChris Down Opt_inode64, 3433626c3920SAl Viro }; 34341da177e4SLinus Torvalds 34355eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = { 34362710c957SAl Viro {"never", SHMEM_HUGE_NEVER }, 34372710c957SAl Viro {"always", SHMEM_HUGE_ALWAYS }, 34382710c957SAl Viro {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 34392710c957SAl Viro {"advise", SHMEM_HUGE_ADVISE }, 34402710c957SAl Viro {} 34412710c957SAl Viro }; 34422710c957SAl Viro 3443d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = { 3444626c3920SAl Viro fsparam_u32 ("gid", Opt_gid), 34452710c957SAl Viro fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 3446626c3920SAl Viro fsparam_u32oct("mode", Opt_mode), 3447626c3920SAl Viro fsparam_string("mpol", Opt_mpol), 3448626c3920SAl Viro fsparam_string("nr_blocks", Opt_nr_blocks), 3449626c3920SAl Viro fsparam_string("nr_inodes", Opt_nr_inodes), 3450626c3920SAl Viro fsparam_string("size", Opt_size), 3451626c3920SAl Viro fsparam_u32 ("uid", Opt_uid), 3452ea3271f7SChris Down fsparam_flag ("inode32", Opt_inode32), 3453ea3271f7SChris Down fsparam_flag ("inode64", Opt_inode64), 3454626c3920SAl Viro {} 3455626c3920SAl Viro }; 3456626c3920SAl Viro 3457f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 3458626c3920SAl Viro { 3459f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3460626c3920SAl Viro struct fs_parse_result result; 3461e04dc423SAl Viro unsigned long long size; 3462626c3920SAl Viro char *rest; 3463626c3920SAl Viro int opt; 3464626c3920SAl Viro 3465d7167b14SAl Viro opt = fs_parse(fc, shmem_fs_parameters, param, &result); 3466f3235626SDavid Howells if (opt < 0) 3467626c3920SAl Viro return opt; 3468626c3920SAl Viro 3469626c3920SAl Viro switch (opt) { 3470626c3920SAl Viro case Opt_size: 3471626c3920SAl Viro size = memparse(param->string, &rest); 3472e04dc423SAl Viro if (*rest == '%') { 3473e04dc423SAl Viro size <<= PAGE_SHIFT; 3474e04dc423SAl Viro size *= totalram_pages(); 3475e04dc423SAl Viro do_div(size, 100); 3476e04dc423SAl Viro rest++; 3477e04dc423SAl Viro } 3478e04dc423SAl Viro if (*rest) 3479626c3920SAl Viro goto bad_value; 3480e04dc423SAl Viro ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 3481e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3482626c3920SAl Viro break; 3483626c3920SAl Viro case Opt_nr_blocks: 3484626c3920SAl Viro ctx->blocks = memparse(param->string, &rest); 3485e04dc423SAl Viro if (*rest) 3486626c3920SAl Viro goto bad_value; 3487e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3488626c3920SAl Viro break; 3489626c3920SAl Viro case Opt_nr_inodes: 3490626c3920SAl Viro ctx->inodes = memparse(param->string, &rest); 3491e04dc423SAl Viro if (*rest) 3492626c3920SAl Viro goto bad_value; 3493e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_INODES; 3494626c3920SAl Viro break; 3495626c3920SAl Viro case Opt_mode: 3496626c3920SAl Viro ctx->mode = result.uint_32 & 07777; 3497626c3920SAl Viro break; 3498626c3920SAl Viro case Opt_uid: 3499626c3920SAl Viro ctx->uid = make_kuid(current_user_ns(), result.uint_32); 3500e04dc423SAl Viro if (!uid_valid(ctx->uid)) 3501626c3920SAl Viro goto bad_value; 3502626c3920SAl Viro break; 3503626c3920SAl Viro case Opt_gid: 3504626c3920SAl Viro ctx->gid = make_kgid(current_user_ns(), result.uint_32); 3505e04dc423SAl Viro if (!gid_valid(ctx->gid)) 3506626c3920SAl Viro goto bad_value; 3507626c3920SAl Viro break; 3508626c3920SAl Viro case Opt_huge: 3509626c3920SAl Viro ctx->huge = result.uint_32; 3510626c3920SAl Viro if (ctx->huge != SHMEM_HUGE_NEVER && 3511396bcc52SMatthew Wilcox (Oracle) !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 3512626c3920SAl Viro has_transparent_hugepage())) 3513626c3920SAl Viro goto unsupported_parameter; 3514e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_HUGE; 3515626c3920SAl Viro break; 3516626c3920SAl Viro case Opt_mpol: 3517626c3920SAl Viro if (IS_ENABLED(CONFIG_NUMA)) { 3518e04dc423SAl Viro mpol_put(ctx->mpol); 3519e04dc423SAl Viro ctx->mpol = NULL; 3520626c3920SAl Viro if (mpol_parse_str(param->string, &ctx->mpol)) 3521626c3920SAl Viro goto bad_value; 3522626c3920SAl Viro break; 3523626c3920SAl Viro } 3524626c3920SAl Viro goto unsupported_parameter; 3525ea3271f7SChris Down case Opt_inode32: 3526ea3271f7SChris Down ctx->full_inums = false; 3527ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3528ea3271f7SChris Down break; 3529ea3271f7SChris Down case Opt_inode64: 3530ea3271f7SChris Down if (sizeof(ino_t) < 8) { 3531ea3271f7SChris Down return invalfc(fc, 3532ea3271f7SChris Down "Cannot use inode64 with <64bit inums in kernel\n"); 3533ea3271f7SChris Down } 3534ea3271f7SChris Down ctx->full_inums = true; 3535ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3536ea3271f7SChris Down break; 3537e04dc423SAl Viro } 3538e04dc423SAl Viro return 0; 3539e04dc423SAl Viro 3540626c3920SAl Viro unsupported_parameter: 3541f35aa2bcSAl Viro return invalfc(fc, "Unsupported parameter '%s'", param->key); 3542626c3920SAl Viro bad_value: 3543f35aa2bcSAl Viro return invalfc(fc, "Bad value for '%s'", param->key); 3544e04dc423SAl Viro } 3545e04dc423SAl Viro 3546f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data) 3547e04dc423SAl Viro { 3548f3235626SDavid Howells char *options = data; 3549f3235626SDavid Howells 355033f37c64SAl Viro if (options) { 355133f37c64SAl Viro int err = security_sb_eat_lsm_opts(options, &fc->security); 355233f37c64SAl Viro if (err) 355333f37c64SAl Viro return err; 355433f37c64SAl Viro } 355533f37c64SAl Viro 3556b00dc3adSHugh Dickins while (options != NULL) { 3557626c3920SAl Viro char *this_char = options; 3558b00dc3adSHugh Dickins for (;;) { 3559b00dc3adSHugh Dickins /* 3560b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3561b00dc3adSHugh Dickins * mount options form a comma-separated list, 3562b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3563b00dc3adSHugh Dickins */ 3564b00dc3adSHugh Dickins options = strchr(options, ','); 3565b00dc3adSHugh Dickins if (options == NULL) 3566b00dc3adSHugh Dickins break; 3567b00dc3adSHugh Dickins options++; 3568b00dc3adSHugh Dickins if (!isdigit(*options)) { 3569b00dc3adSHugh Dickins options[-1] = '\0'; 3570b00dc3adSHugh Dickins break; 3571b00dc3adSHugh Dickins } 3572b00dc3adSHugh Dickins } 3573626c3920SAl Viro if (*this_char) { 3574626c3920SAl Viro char *value = strchr(this_char,'='); 3575f3235626SDavid Howells size_t len = 0; 3576626c3920SAl Viro int err; 3577626c3920SAl Viro 3578626c3920SAl Viro if (value) { 3579626c3920SAl Viro *value++ = '\0'; 3580f3235626SDavid Howells len = strlen(value); 35811da177e4SLinus Torvalds } 3582f3235626SDavid Howells err = vfs_parse_fs_string(fc, this_char, value, len); 3583f3235626SDavid Howells if (err < 0) 3584f3235626SDavid Howells return err; 35851da177e4SLinus Torvalds } 3586626c3920SAl Viro } 35871da177e4SLinus Torvalds return 0; 35881da177e4SLinus Torvalds } 35891da177e4SLinus Torvalds 3590f3235626SDavid Howells /* 3591f3235626SDavid Howells * Reconfigure a shmem filesystem. 3592f3235626SDavid Howells * 3593f3235626SDavid Howells * Note that we disallow change from limited->unlimited blocks/inodes while any 3594f3235626SDavid Howells * are in use; but we must separately disallow unlimited->limited, because in 3595f3235626SDavid Howells * that case we have no record of how much is already in use. 3596f3235626SDavid Howells */ 3597f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc) 35981da177e4SLinus Torvalds { 3599f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3600f3235626SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 36010edd73b3SHugh Dickins unsigned long inodes; 3602f3235626SDavid Howells const char *err; 36030edd73b3SHugh Dickins 36040edd73b3SHugh Dickins spin_lock(&sbinfo->stat_lock); 36050edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 3606f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 3607f3235626SDavid Howells if (!sbinfo->max_blocks) { 3608f3235626SDavid Howells err = "Cannot retroactively limit size"; 36090edd73b3SHugh Dickins goto out; 36100b5071ddSAl Viro } 3611f3235626SDavid Howells if (percpu_counter_compare(&sbinfo->used_blocks, 3612f3235626SDavid Howells ctx->blocks) > 0) { 3613f3235626SDavid Howells err = "Too small a size for current use"; 36140b5071ddSAl Viro goto out; 3615f3235626SDavid Howells } 3616f3235626SDavid Howells } 3617f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 3618f3235626SDavid Howells if (!sbinfo->max_inodes) { 3619f3235626SDavid Howells err = "Cannot retroactively limit inodes"; 36200b5071ddSAl Viro goto out; 36210b5071ddSAl Viro } 3622f3235626SDavid Howells if (ctx->inodes < inodes) { 3623f3235626SDavid Howells err = "Too few inodes for current use"; 3624f3235626SDavid Howells goto out; 3625f3235626SDavid Howells } 3626f3235626SDavid Howells } 36270edd73b3SHugh Dickins 3628ea3271f7SChris Down if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 3629ea3271f7SChris Down sbinfo->next_ino > UINT_MAX) { 3630ea3271f7SChris Down err = "Current inum too high to switch to 32-bit inums"; 3631ea3271f7SChris Down goto out; 3632ea3271f7SChris Down } 3633ea3271f7SChris Down 3634f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_HUGE) 3635f3235626SDavid Howells sbinfo->huge = ctx->huge; 3636ea3271f7SChris Down if (ctx->seen & SHMEM_SEEN_INUMS) 3637ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 3638f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_BLOCKS) 3639f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3640f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_INODES) { 3641f3235626SDavid Howells sbinfo->max_inodes = ctx->inodes; 3642f3235626SDavid Howells sbinfo->free_inodes = ctx->inodes - inodes; 36430b5071ddSAl Viro } 364471fe804bSLee Schermerhorn 36455f00110fSGreg Thelen /* 36465f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 36475f00110fSGreg Thelen */ 3648f3235626SDavid Howells if (ctx->mpol) { 364971fe804bSLee Schermerhorn mpol_put(sbinfo->mpol); 3650f3235626SDavid Howells sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 3651f3235626SDavid Howells ctx->mpol = NULL; 36525f00110fSGreg Thelen } 3653f3235626SDavid Howells spin_unlock(&sbinfo->stat_lock); 3654f3235626SDavid Howells return 0; 36550edd73b3SHugh Dickins out: 36560edd73b3SHugh Dickins spin_unlock(&sbinfo->stat_lock); 3657f35aa2bcSAl Viro return invalfc(fc, "%s", err); 36581da177e4SLinus Torvalds } 3659680d794bSakpm@linux-foundation.org 366034c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3661680d794bSakpm@linux-foundation.org { 366234c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3663680d794bSakpm@linux-foundation.org 3664680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3665680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 366609cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3667680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3668680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 36690825a6f9SJoe Perches if (sbinfo->mode != (0777 | S_ISVTX)) 367009208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 36718751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 36728751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 36738751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 36748751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 36758751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 36768751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3677ea3271f7SChris Down 3678ea3271f7SChris Down /* 3679ea3271f7SChris Down * Showing inode{64,32} might be useful even if it's the system default, 3680ea3271f7SChris Down * since then people don't have to resort to checking both here and 3681ea3271f7SChris Down * /proc/config.gz to confirm 64-bit inums were successfully applied 3682ea3271f7SChris Down * (which may not even exist if IKCONFIG_PROC isn't enabled). 3683ea3271f7SChris Down * 3684ea3271f7SChris Down * We hide it when inode64 isn't the default and we are using 32-bit 3685ea3271f7SChris Down * inodes, since that probably just means the feature isn't even under 3686ea3271f7SChris Down * consideration. 3687ea3271f7SChris Down * 3688ea3271f7SChris Down * As such: 3689ea3271f7SChris Down * 3690ea3271f7SChris Down * +-----------------+-----------------+ 3691ea3271f7SChris Down * | TMPFS_INODE64=y | TMPFS_INODE64=n | 3692ea3271f7SChris Down * +------------------+-----------------+-----------------+ 3693ea3271f7SChris Down * | full_inums=true | show | show | 3694ea3271f7SChris Down * | full_inums=false | show | hide | 3695ea3271f7SChris Down * +------------------+-----------------+-----------------+ 3696ea3271f7SChris Down * 3697ea3271f7SChris Down */ 3698ea3271f7SChris Down if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 3699ea3271f7SChris Down seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 3700396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 37015a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 37025a6e75f8SKirill A. Shutemov if (sbinfo->huge) 37035a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 37045a6e75f8SKirill A. Shutemov #endif 370571fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 3706680d794bSakpm@linux-foundation.org return 0; 3707680d794bSakpm@linux-foundation.org } 37089183df25SDavid Herrmann 3709680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 37101da177e4SLinus Torvalds 37111da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 37121da177e4SLinus Torvalds { 3713602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3714602586a8SHugh Dickins 3715e809d5f0SChris Down free_percpu(sbinfo->ino_batch); 3716602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 371749cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3718602586a8SHugh Dickins kfree(sbinfo); 37191da177e4SLinus Torvalds sb->s_fs_info = NULL; 37201da177e4SLinus Torvalds } 37211da177e4SLinus Torvalds 3722f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 37231da177e4SLinus Torvalds { 3724f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 37251da177e4SLinus Torvalds struct inode *inode; 37260edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3727680d794bSakpm@linux-foundation.org int err = -ENOMEM; 3728680d794bSakpm@linux-foundation.org 3729680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3730425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3731680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3732680d794bSakpm@linux-foundation.org if (!sbinfo) 3733680d794bSakpm@linux-foundation.org return -ENOMEM; 3734680d794bSakpm@linux-foundation.org 3735680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 37361da177e4SLinus Torvalds 37370edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 37381da177e4SLinus Torvalds /* 37391da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 37401da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 37411da177e4SLinus Torvalds * but the internal instance is left unlimited. 37421da177e4SLinus Torvalds */ 37431751e8a6SLinus Torvalds if (!(sb->s_flags & SB_KERNMOUNT)) { 3744f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 3745f3235626SDavid Howells ctx->blocks = shmem_default_max_blocks(); 3746f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_INODES)) 3747f3235626SDavid Howells ctx->inodes = shmem_default_max_inodes(); 3748ea3271f7SChris Down if (!(ctx->seen & SHMEM_SEEN_INUMS)) 3749ea3271f7SChris Down ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 3750ca4e0519SAl Viro } else { 37511751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 37521da177e4SLinus Torvalds } 375391828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 37541751e8a6SLinus Torvalds sb->s_flags |= SB_NOSEC; 37550edd73b3SHugh Dickins #else 37561751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 37570edd73b3SHugh Dickins #endif 3758f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3759f3235626SDavid Howells sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; 3760e809d5f0SChris Down if (sb->s_flags & SB_KERNMOUNT) { 3761e809d5f0SChris Down sbinfo->ino_batch = alloc_percpu(ino_t); 3762e809d5f0SChris Down if (!sbinfo->ino_batch) 3763e809d5f0SChris Down goto failed; 3764e809d5f0SChris Down } 3765f3235626SDavid Howells sbinfo->uid = ctx->uid; 3766f3235626SDavid Howells sbinfo->gid = ctx->gid; 3767ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 3768f3235626SDavid Howells sbinfo->mode = ctx->mode; 3769f3235626SDavid Howells sbinfo->huge = ctx->huge; 3770f3235626SDavid Howells sbinfo->mpol = ctx->mpol; 3771f3235626SDavid Howells ctx->mpol = NULL; 37721da177e4SLinus Torvalds 37731da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 3774908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3775602586a8SHugh Dickins goto failed; 3776779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3777779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 37781da177e4SLinus Torvalds 3779285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 378009cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 378109cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 37821da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 37831da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3784cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3785b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 378639f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3787b09e0fa4SEric Paris #endif 3788b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 37891751e8a6SLinus Torvalds sb->s_flags |= SB_POSIXACL; 379039f0247dSAndreas Gruenbacher #endif 37912b4db796SAmir Goldstein uuid_gen(&sb->s_uuid); 37920edd73b3SHugh Dickins 3793454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 37941da177e4SLinus Torvalds if (!inode) 37951da177e4SLinus Torvalds goto failed; 3796680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3797680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3798318ceed0SAl Viro sb->s_root = d_make_root(inode); 3799318ceed0SAl Viro if (!sb->s_root) 380048fde701SAl Viro goto failed; 38011da177e4SLinus Torvalds return 0; 38021da177e4SLinus Torvalds 38031da177e4SLinus Torvalds failed: 38041da177e4SLinus Torvalds shmem_put_super(sb); 38051da177e4SLinus Torvalds return err; 38061da177e4SLinus Torvalds } 38071da177e4SLinus Torvalds 3808f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc) 3809f3235626SDavid Howells { 3810f3235626SDavid Howells return get_tree_nodev(fc, shmem_fill_super); 3811f3235626SDavid Howells } 3812f3235626SDavid Howells 3813f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc) 3814f3235626SDavid Howells { 3815f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3816f3235626SDavid Howells 3817f3235626SDavid Howells if (ctx) { 3818f3235626SDavid Howells mpol_put(ctx->mpol); 3819f3235626SDavid Howells kfree(ctx); 3820f3235626SDavid Howells } 3821f3235626SDavid Howells } 3822f3235626SDavid Howells 3823f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = { 3824f3235626SDavid Howells .free = shmem_free_fc, 3825f3235626SDavid Howells .get_tree = shmem_get_tree, 3826f3235626SDavid Howells #ifdef CONFIG_TMPFS 3827f3235626SDavid Howells .parse_monolithic = shmem_parse_options, 3828f3235626SDavid Howells .parse_param = shmem_parse_one, 3829f3235626SDavid Howells .reconfigure = shmem_reconfigure, 3830f3235626SDavid Howells #endif 3831f3235626SDavid Howells }; 3832f3235626SDavid Howells 3833fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 38341da177e4SLinus Torvalds 38351da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 38361da177e4SLinus Torvalds { 383741ffe5d5SHugh Dickins struct shmem_inode_info *info; 383841ffe5d5SHugh Dickins info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 383941ffe5d5SHugh Dickins if (!info) 38401da177e4SLinus Torvalds return NULL; 384141ffe5d5SHugh Dickins return &info->vfs_inode; 38421da177e4SLinus Torvalds } 38431da177e4SLinus Torvalds 384474b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode) 3845fa0d7e3dSNick Piggin { 384684e710daSAl Viro if (S_ISLNK(inode->i_mode)) 38473ed47db3SAl Viro kfree(inode->i_link); 3848fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3849fa0d7e3dSNick Piggin } 3850fa0d7e3dSNick Piggin 38511da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 38521da177e4SLinus Torvalds { 385309208d15SAl Viro if (S_ISREG(inode->i_mode)) 38541da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 38551da177e4SLinus Torvalds } 38561da177e4SLinus Torvalds 385741ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 38581da177e4SLinus Torvalds { 385941ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 386041ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 38611da177e4SLinus Torvalds } 38621da177e4SLinus Torvalds 38639a8ec03eSweiping zhang static void shmem_init_inodecache(void) 38641da177e4SLinus Torvalds { 38651da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 38661da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 38675d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 38681da177e4SLinus Torvalds } 38691da177e4SLinus Torvalds 387041ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 38711da177e4SLinus Torvalds { 38721a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 38731da177e4SLinus Torvalds } 38741da177e4SLinus Torvalds 387530e6a51dSHui Su const struct address_space_operations shmem_aops = { 38761da177e4SLinus Torvalds .writepage = shmem_writepage, 387776719325SKen Chen .set_page_dirty = __set_page_dirty_no_writeback, 38781da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3879800d15a5SNick Piggin .write_begin = shmem_write_begin, 3880800d15a5SNick Piggin .write_end = shmem_write_end, 38811da177e4SLinus Torvalds #endif 38821c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 3883304dbdb7SLee Schermerhorn .migratepage = migrate_page, 38841c93923cSAndrew Morton #endif 3885aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 38861da177e4SLinus Torvalds }; 388730e6a51dSHui Su EXPORT_SYMBOL(shmem_aops); 38881da177e4SLinus Torvalds 388915ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 38901da177e4SLinus Torvalds .mmap = shmem_mmap, 3891c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 38921da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3893220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 38942ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 38958174202bSAl Viro .write_iter = generic_file_write_iter, 38961b061d92SChristoph Hellwig .fsync = noop_fsync, 389782c156f8SAl Viro .splice_read = generic_file_splice_read, 3898f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 389983e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 39001da177e4SLinus Torvalds #endif 39011da177e4SLinus Torvalds }; 39021da177e4SLinus Torvalds 390392e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 390444a30220SYu Zhao .getattr = shmem_getattr, 390594c1e62dSHugh Dickins .setattr = shmem_setattr, 3906b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3907b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3908feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3909b09e0fa4SEric Paris #endif 39101da177e4SLinus Torvalds }; 39111da177e4SLinus Torvalds 391292e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 39131da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 39141da177e4SLinus Torvalds .create = shmem_create, 39151da177e4SLinus Torvalds .lookup = simple_lookup, 39161da177e4SLinus Torvalds .link = shmem_link, 39171da177e4SLinus Torvalds .unlink = shmem_unlink, 39181da177e4SLinus Torvalds .symlink = shmem_symlink, 39191da177e4SLinus Torvalds .mkdir = shmem_mkdir, 39201da177e4SLinus Torvalds .rmdir = shmem_rmdir, 39211da177e4SLinus Torvalds .mknod = shmem_mknod, 39222773bf00SMiklos Szeredi .rename = shmem_rename2, 392360545d0dSAl Viro .tmpfile = shmem_tmpfile, 39241da177e4SLinus Torvalds #endif 3925b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3926b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3927b09e0fa4SEric Paris #endif 392839f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 392994c1e62dSHugh Dickins .setattr = shmem_setattr, 3930feda821eSChristoph Hellwig .set_acl = simple_set_acl, 393139f0247dSAndreas Gruenbacher #endif 393239f0247dSAndreas Gruenbacher }; 393339f0247dSAndreas Gruenbacher 393492e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 3935b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3936b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3937b09e0fa4SEric Paris #endif 393839f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 393994c1e62dSHugh Dickins .setattr = shmem_setattr, 3940feda821eSChristoph Hellwig .set_acl = simple_set_acl, 394139f0247dSAndreas Gruenbacher #endif 39421da177e4SLinus Torvalds }; 39431da177e4SLinus Torvalds 3944759b9775SHugh Dickins static const struct super_operations shmem_ops = { 39451da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 394674b1da56SAl Viro .free_inode = shmem_free_in_core_inode, 39471da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 39481da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 39491da177e4SLinus Torvalds .statfs = shmem_statfs, 3950680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 39511da177e4SLinus Torvalds #endif 39521f895f75SAl Viro .evict_inode = shmem_evict_inode, 39531da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 39541da177e4SLinus Torvalds .put_super = shmem_put_super, 3955396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3956779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 3957779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 3958779750d2SKirill A. Shutemov #endif 39591da177e4SLinus Torvalds }; 39601da177e4SLinus Torvalds 3961f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 396254cb8821SNick Piggin .fault = shmem_fault, 3963d7c17551SNing Qu .map_pages = filemap_map_pages, 39641da177e4SLinus Torvalds #ifdef CONFIG_NUMA 39651da177e4SLinus Torvalds .set_policy = shmem_set_policy, 39661da177e4SLinus Torvalds .get_policy = shmem_get_policy, 39671da177e4SLinus Torvalds #endif 39681da177e4SLinus Torvalds }; 39691da177e4SLinus Torvalds 3970f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc) 39711da177e4SLinus Torvalds { 3972f3235626SDavid Howells struct shmem_options *ctx; 3973f3235626SDavid Howells 3974f3235626SDavid Howells ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 3975f3235626SDavid Howells if (!ctx) 3976f3235626SDavid Howells return -ENOMEM; 3977f3235626SDavid Howells 3978f3235626SDavid Howells ctx->mode = 0777 | S_ISVTX; 3979f3235626SDavid Howells ctx->uid = current_fsuid(); 3980f3235626SDavid Howells ctx->gid = current_fsgid(); 3981f3235626SDavid Howells 3982f3235626SDavid Howells fc->fs_private = ctx; 3983f3235626SDavid Howells fc->ops = &shmem_fs_context_ops; 3984f3235626SDavid Howells return 0; 39851da177e4SLinus Torvalds } 39861da177e4SLinus Torvalds 398741ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 39881da177e4SLinus Torvalds .owner = THIS_MODULE, 39891da177e4SLinus Torvalds .name = "tmpfs", 3990f3235626SDavid Howells .init_fs_context = shmem_init_fs_context, 3991f3235626SDavid Howells #ifdef CONFIG_TMPFS 3992d7167b14SAl Viro .parameters = shmem_fs_parameters, 3993f3235626SDavid Howells #endif 39941da177e4SLinus Torvalds .kill_sb = kill_litter_super, 399501c70267SMatthew Wilcox (Oracle) .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, 39961da177e4SLinus Torvalds }; 39971da177e4SLinus Torvalds 399841ffe5d5SHugh Dickins int __init shmem_init(void) 39991da177e4SLinus Torvalds { 40001da177e4SLinus Torvalds int error; 40011da177e4SLinus Torvalds 40029a8ec03eSweiping zhang shmem_init_inodecache(); 40031da177e4SLinus Torvalds 400441ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 40051da177e4SLinus Torvalds if (error) { 40061170532bSJoe Perches pr_err("Could not register tmpfs\n"); 40071da177e4SLinus Torvalds goto out2; 40081da177e4SLinus Torvalds } 400995dc112aSGreg Kroah-Hartman 4010ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 40111da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 40121da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 40131170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 40141da177e4SLinus Torvalds goto out1; 40151da177e4SLinus Torvalds } 40165a6e75f8SKirill A. Shutemov 4017396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4018435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 40195a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 40205a6e75f8SKirill A. Shutemov else 40215a6e75f8SKirill A. Shutemov shmem_huge = 0; /* just in case it was patched */ 40225a6e75f8SKirill A. Shutemov #endif 40231da177e4SLinus Torvalds return 0; 40241da177e4SLinus Torvalds 40251da177e4SLinus Torvalds out1: 402641ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 40271da177e4SLinus Torvalds out2: 402841ffe5d5SHugh Dickins shmem_destroy_inodecache(); 40291da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 40301da177e4SLinus Torvalds return error; 40311da177e4SLinus Torvalds } 4032853ac43aSMatt Mackall 4033396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 40345a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 40355a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 40365a6e75f8SKirill A. Shutemov { 403726083eb6SColin Ian King static const int values[] = { 40385a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 40395a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 40405a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 40415a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 40425a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 40435a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 40445a6e75f8SKirill A. Shutemov }; 404579d4d38aSJoe Perches int len = 0; 404679d4d38aSJoe Perches int i; 40475a6e75f8SKirill A. Shutemov 404879d4d38aSJoe Perches for (i = 0; i < ARRAY_SIZE(values); i++) { 404979d4d38aSJoe Perches len += sysfs_emit_at(buf, len, 405079d4d38aSJoe Perches shmem_huge == values[i] ? "%s[%s]" : "%s%s", 405179d4d38aSJoe Perches i ? " " : "", 40525a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 40535a6e75f8SKirill A. Shutemov } 405479d4d38aSJoe Perches 405579d4d38aSJoe Perches len += sysfs_emit_at(buf, len, "\n"); 405679d4d38aSJoe Perches 405779d4d38aSJoe Perches return len; 40585a6e75f8SKirill A. Shutemov } 40595a6e75f8SKirill A. Shutemov 40605a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 40615a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 40625a6e75f8SKirill A. Shutemov { 40635a6e75f8SKirill A. Shutemov char tmp[16]; 40645a6e75f8SKirill A. Shutemov int huge; 40655a6e75f8SKirill A. Shutemov 40665a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 40675a6e75f8SKirill A. Shutemov return -EINVAL; 40685a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 40695a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 40705a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 40715a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 40725a6e75f8SKirill A. Shutemov 40735a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 40745a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 40755a6e75f8SKirill A. Shutemov return -EINVAL; 40765a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 40775a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 40785a6e75f8SKirill A. Shutemov return -EINVAL; 40795a6e75f8SKirill A. Shutemov 40805a6e75f8SKirill A. Shutemov shmem_huge = huge; 4081435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY) 40825a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 40835a6e75f8SKirill A. Shutemov return count; 40845a6e75f8SKirill A. Shutemov } 40855a6e75f8SKirill A. Shutemov 40865a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr = 40875a6e75f8SKirill A. Shutemov __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 4088396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 4089f3f0e1d2SKirill A. Shutemov 4090396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4091f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma) 4092f3f0e1d2SKirill A. Shutemov { 4093f3f0e1d2SKirill A. Shutemov struct inode *inode = file_inode(vma->vm_file); 4094f3f0e1d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 4095f3f0e1d2SKirill A. Shutemov loff_t i_size; 4096f3f0e1d2SKirill A. Shutemov pgoff_t off; 4097f3f0e1d2SKirill A. Shutemov 4098c0630669SYang Shi if ((vma->vm_flags & VM_NOHUGEPAGE) || 4099c0630669SYang Shi test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 4100c0630669SYang Shi return false; 4101f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 4102f3f0e1d2SKirill A. Shutemov return true; 4103f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY) 4104f3f0e1d2SKirill A. Shutemov return false; 4105f3f0e1d2SKirill A. Shutemov switch (sbinfo->huge) { 4106f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_NEVER: 4107f3f0e1d2SKirill A. Shutemov return false; 4108f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 4109f3f0e1d2SKirill A. Shutemov return true; 4110f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 4111f3f0e1d2SKirill A. Shutemov off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 4112f3f0e1d2SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 4113f3f0e1d2SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 4114f3f0e1d2SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 4115f3f0e1d2SKirill A. Shutemov return true; 4116e4a9bc58SJoe Perches fallthrough; 4117f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4118f3f0e1d2SKirill A. Shutemov /* TODO: implement fadvise() hints */ 4119f3f0e1d2SKirill A. Shutemov return (vma->vm_flags & VM_HUGEPAGE); 4120f3f0e1d2SKirill A. Shutemov default: 4121f3f0e1d2SKirill A. Shutemov VM_BUG_ON(1); 4122f3f0e1d2SKirill A. Shutemov return false; 4123f3f0e1d2SKirill A. Shutemov } 4124f3f0e1d2SKirill A. Shutemov } 4125396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 41265a6e75f8SKirill A. Shutemov 4127853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 4128853ac43aSMatt Mackall 4129853ac43aSMatt Mackall /* 4130853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4131853ac43aSMatt Mackall * 4132853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 4133853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 4134853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 4135853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 4136853ac43aSMatt Mackall */ 4137853ac43aSMatt Mackall 413841ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 4139853ac43aSMatt Mackall .name = "tmpfs", 4140f3235626SDavid Howells .init_fs_context = ramfs_init_fs_context, 4141d7167b14SAl Viro .parameters = ramfs_fs_parameters, 4142853ac43aSMatt Mackall .kill_sb = kill_litter_super, 41432b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 4144853ac43aSMatt Mackall }; 4145853ac43aSMatt Mackall 414641ffe5d5SHugh Dickins int __init shmem_init(void) 4147853ac43aSMatt Mackall { 414841ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4149853ac43aSMatt Mackall 415041ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 4151853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 4152853ac43aSMatt Mackall 4153853ac43aSMatt Mackall return 0; 4154853ac43aSMatt Mackall } 4155853ac43aSMatt Mackall 4156b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap, 4157b56a2d8aSVineeth Remanan Pillai unsigned long *fs_pages_to_unuse) 4158853ac43aSMatt Mackall { 4159853ac43aSMatt Mackall return 0; 4160853ac43aSMatt Mackall } 4161853ac43aSMatt Mackall 41623f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user) 41633f96b79aSHugh Dickins { 41643f96b79aSHugh Dickins return 0; 41653f96b79aSHugh Dickins } 41663f96b79aSHugh Dickins 416724513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 416824513264SHugh Dickins { 416924513264SHugh Dickins } 417024513264SHugh Dickins 4171c01d5b30SHugh Dickins #ifdef CONFIG_MMU 4172c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 4173c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 4174c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 4175c01d5b30SHugh Dickins { 4176c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4177c01d5b30SHugh Dickins } 4178c01d5b30SHugh Dickins #endif 4179c01d5b30SHugh Dickins 418041ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 418194c1e62dSHugh Dickins { 418241ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 418394c1e62dSHugh Dickins } 418494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 418594c1e62dSHugh Dickins 4186853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 41870b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 4188454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 41890b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 41900b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 4191853ac43aSMatt Mackall 4192853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 4193853ac43aSMatt Mackall 4194853ac43aSMatt Mackall /* common code */ 41951da177e4SLinus Torvalds 4196703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 4197c7277090SEric Paris unsigned long flags, unsigned int i_flags) 41981da177e4SLinus Torvalds { 41991da177e4SLinus Torvalds struct inode *inode; 420093dec2daSAl Viro struct file *res; 42011da177e4SLinus Torvalds 4202703321b6SMatthew Auld if (IS_ERR(mnt)) 4203703321b6SMatthew Auld return ERR_CAST(mnt); 42041da177e4SLinus Torvalds 4205285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 42061da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 42071da177e4SLinus Torvalds 42081da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 42091da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 42101da177e4SLinus Torvalds 421193dec2daSAl Viro inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, 421293dec2daSAl Viro flags); 4213dac2d1f6SAl Viro if (unlikely(!inode)) { 4214dac2d1f6SAl Viro shmem_unacct_size(flags, size); 4215dac2d1f6SAl Viro return ERR_PTR(-ENOSPC); 4216dac2d1f6SAl Viro } 4217c7277090SEric Paris inode->i_flags |= i_flags; 42181da177e4SLinus Torvalds inode->i_size = size; 42196d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 422026567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 422193dec2daSAl Viro if (!IS_ERR(res)) 422293dec2daSAl Viro res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 42234b42af81SAl Viro &shmem_file_operations); 42246b4d0b27SAl Viro if (IS_ERR(res)) 422593dec2daSAl Viro iput(inode); 42266b4d0b27SAl Viro return res; 42271da177e4SLinus Torvalds } 4228c7277090SEric Paris 4229c7277090SEric Paris /** 4230c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4231c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4232c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4233e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4234e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4235c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4236c7277090SEric Paris * @size: size to be set for the file 4237c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4238c7277090SEric Paris */ 4239c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4240c7277090SEric Paris { 4241703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 4242c7277090SEric Paris } 4243c7277090SEric Paris 4244c7277090SEric Paris /** 4245c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4246c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4247c7277090SEric Paris * @size: size to be set for the file 4248c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4249c7277090SEric Paris */ 4250c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4251c7277090SEric Paris { 4252703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, 0); 4253c7277090SEric Paris } 4254395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 42551da177e4SLinus Torvalds 425646711810SRandy Dunlap /** 4257703321b6SMatthew Auld * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 4258703321b6SMatthew Auld * @mnt: the tmpfs mount where the file will be created 4259703321b6SMatthew Auld * @name: name for dentry (to be seen in /proc/<pid>/maps 4260703321b6SMatthew Auld * @size: size to be set for the file 4261703321b6SMatthew Auld * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4262703321b6SMatthew Auld */ 4263703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 4264703321b6SMatthew Auld loff_t size, unsigned long flags) 4265703321b6SMatthew Auld { 4266703321b6SMatthew Auld return __shmem_file_setup(mnt, name, size, flags, 0); 4267703321b6SMatthew Auld } 4268703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4269703321b6SMatthew Auld 4270703321b6SMatthew Auld /** 42711da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 427245e55300SPeter Collingbourne * @vma: the vma to be mmapped is prepared by do_mmap 42731da177e4SLinus Torvalds */ 42741da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 42751da177e4SLinus Torvalds { 42761da177e4SLinus Torvalds struct file *file; 42771da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 42781da177e4SLinus Torvalds 427966fc1303SHugh Dickins /* 4280c1e8d7c6SMichel Lespinasse * Cloning a new file under mmap_lock leads to a lock ordering conflict 428166fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 428266fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 428366fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 428466fc1303SHugh Dickins */ 4285703321b6SMatthew Auld file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 42861da177e4SLinus Torvalds if (IS_ERR(file)) 42871da177e4SLinus Torvalds return PTR_ERR(file); 42881da177e4SLinus Torvalds 42891da177e4SLinus Torvalds if (vma->vm_file) 42901da177e4SLinus Torvalds fput(vma->vm_file); 42911da177e4SLinus Torvalds vma->vm_file = file; 42921da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 4293f3f0e1d2SKirill A. Shutemov 4294396bcc52SMatthew Wilcox (Oracle) if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4295f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 4296f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 4297f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 4298f3f0e1d2SKirill A. Shutemov } 4299f3f0e1d2SKirill A. Shutemov 43001da177e4SLinus Torvalds return 0; 43011da177e4SLinus Torvalds } 4302d9d90e5eSHugh Dickins 4303d9d90e5eSHugh Dickins /** 4304d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4305d9d90e5eSHugh Dickins * @mapping: the page's address_space 4306d9d90e5eSHugh Dickins * @index: the page index 4307d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4308d9d90e5eSHugh Dickins * 4309d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4310d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 4311d9d90e5eSHugh Dickins * But read_cache_page_gfp() uses the ->readpage() method: which does not 4312d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4313d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4314d9d90e5eSHugh Dickins * 431568da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 431668da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4317d9d90e5eSHugh Dickins */ 4318d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4319d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4320d9d90e5eSHugh Dickins { 432168da9f05SHugh Dickins #ifdef CONFIG_SHMEM 432268da9f05SHugh Dickins struct inode *inode = mapping->host; 43239276aad6SHugh Dickins struct page *page; 432468da9f05SHugh Dickins int error; 432568da9f05SHugh Dickins 432630e6a51dSHui Su BUG_ON(!shmem_mapping(mapping)); 43279e18eb29SAndres Lagar-Cavilla error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, 4328cfda0526SMike Rapoport gfp, NULL, NULL, NULL); 432968da9f05SHugh Dickins if (error) 433068da9f05SHugh Dickins page = ERR_PTR(error); 433168da9f05SHugh Dickins else 433268da9f05SHugh Dickins unlock_page(page); 433368da9f05SHugh Dickins return page; 433468da9f05SHugh Dickins #else 433568da9f05SHugh Dickins /* 433668da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 433768da9f05SHugh Dickins */ 4338d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 433968da9f05SHugh Dickins #endif 4340d9d90e5eSHugh Dickins } 4341d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4342