11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31853ac43aSMatt Mackall #include <linux/mm.h> 3246c9a946SArnd Bergmann #include <linux/random.h> 33174cd4b1SIngo Molnar #include <linux/sched/signal.h> 34b95f1b31SPaul Gortmaker #include <linux/export.h> 35853ac43aSMatt Mackall #include <linux/swap.h> 36e2e40f2cSChristoph Hellwig #include <linux/uio.h> 37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h> 38749df87bSMike Kravetz #include <linux/hugetlb.h> 39b56a2d8aSVineeth Remanan Pillai #include <linux/frontswap.h> 40626c3920SAl Viro #include <linux/fs_parser.h> 4186a2f3f2SMiaohe Lin #include <linux/swapfile.h> 42853ac43aSMatt Mackall 43853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 44853ac43aSMatt Mackall 45853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 461da177e4SLinus Torvalds /* 471da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 481da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 491da177e4SLinus Torvalds * which makes it a completely usable filesystem. 501da177e4SLinus Torvalds */ 511da177e4SLinus Torvalds 5239f0247dSAndreas Gruenbacher #include <linux/xattr.h> 53a5694255SChristoph Hellwig #include <linux/exportfs.h> 541c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 55feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 561da177e4SLinus Torvalds #include <linux/mman.h> 571da177e4SLinus Torvalds #include <linux/string.h> 581da177e4SLinus Torvalds #include <linux/slab.h> 591da177e4SLinus Torvalds #include <linux/backing-dev.h> 601da177e4SLinus Torvalds #include <linux/shmem_fs.h> 611da177e4SLinus Torvalds #include <linux/writeback.h> 621da177e4SLinus Torvalds #include <linux/blkdev.h> 63bda97eabSHugh Dickins #include <linux/pagevec.h> 6441ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 6583e4fa9cSHugh Dickins #include <linux/falloc.h> 66708e3508SHugh Dickins #include <linux/splice.h> 671da177e4SLinus Torvalds #include <linux/security.h> 681da177e4SLinus Torvalds #include <linux/swapops.h> 691da177e4SLinus Torvalds #include <linux/mempolicy.h> 701da177e4SLinus Torvalds #include <linux/namei.h> 71b00dc3adSHugh Dickins #include <linux/ctype.h> 72304dbdb7SLee Schermerhorn #include <linux/migrate.h> 73c1f60a5aSChristoph Lameter #include <linux/highmem.h> 74680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 7592562927SMimi Zohar #include <linux/magic.h> 769183df25SDavid Herrmann #include <linux/syscalls.h> 7740e041a2SDavid Herrmann #include <linux/fcntl.h> 789183df25SDavid Herrmann #include <uapi/linux/memfd.h> 79cfda0526SMike Rapoport #include <linux/userfaultfd_k.h> 804c27fe4cSMike Rapoport #include <linux/rmap.h> 812b4db796SAmir Goldstein #include <linux/uuid.h> 82304dbdb7SLee Schermerhorn 837c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 841da177e4SLinus Torvalds 85dd56b046SMel Gorman #include "internal.h" 86dd56b046SMel Gorman 8709cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8809cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 911da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 921da177e4SLinus Torvalds 9369f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 9469f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 9569f07ec9SHugh Dickins 961aac1400SHugh Dickins /* 97f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 98f00cdc6dSHugh Dickins * inode->i_private (with i_mutex making sure that it has only one user at 99f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 1001aac1400SHugh Dickins */ 1011aac1400SHugh Dickins struct shmem_falloc { 1028e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 1031aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 1041aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 1051aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 1061aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1071aac1400SHugh Dickins }; 1081aac1400SHugh Dickins 1090b5071ddSAl Viro struct shmem_options { 1100b5071ddSAl Viro unsigned long long blocks; 1110b5071ddSAl Viro unsigned long long inodes; 1120b5071ddSAl Viro struct mempolicy *mpol; 1130b5071ddSAl Viro kuid_t uid; 1140b5071ddSAl Viro kgid_t gid; 1150b5071ddSAl Viro umode_t mode; 116ea3271f7SChris Down bool full_inums; 1170b5071ddSAl Viro int huge; 1180b5071ddSAl Viro int seen; 1190b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1 1200b5071ddSAl Viro #define SHMEM_SEEN_INODES 2 1210b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4 122ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8 1230b5071ddSAl Viro }; 1240b5071ddSAl Viro 125b76db735SAndrew Morton #ifdef CONFIG_TMPFS 126680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 127680d794bSakpm@linux-foundation.org { 128ca79b0c2SArun KS return totalram_pages() / 2; 129680d794bSakpm@linux-foundation.org } 130680d794bSakpm@linux-foundation.org 131680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 132680d794bSakpm@linux-foundation.org { 133ca79b0c2SArun KS unsigned long nr_pages = totalram_pages(); 134ca79b0c2SArun KS 135ca79b0c2SArun KS return min(nr_pages - totalhigh_pages(), nr_pages / 2); 136680d794bSakpm@linux-foundation.org } 137b76db735SAndrew Morton #endif 138680d794bSakpm@linux-foundation.org 139c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index, 140c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, 141c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 142c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type); 14368da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1449e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, 145cfda0526SMike Rapoport gfp_t gfp, struct vm_area_struct *vma, 1462b740303SSouptick Joarder struct vm_fault *vmf, vm_fault_t *fault_type); 14768da9f05SHugh Dickins 148f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index, 1499e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp) 15068da9f05SHugh Dickins { 15168da9f05SHugh Dickins return shmem_getpage_gfp(inode, index, pagep, sgp, 152cfda0526SMike Rapoport mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 15368da9f05SHugh Dickins } 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1561da177e4SLinus Torvalds { 1571da177e4SLinus Torvalds return sb->s_fs_info; 1581da177e4SLinus Torvalds } 1591da177e4SLinus Torvalds 1601da177e4SLinus Torvalds /* 1611da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1621da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1631da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1641da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1651da177e4SLinus Torvalds */ 1661da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1671da177e4SLinus Torvalds { 1680b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 169191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1701da177e4SLinus Torvalds } 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1731da177e4SLinus Torvalds { 1740b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1751da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1761da177e4SLinus Torvalds } 1771da177e4SLinus Torvalds 17877142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 17977142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 18077142517SKonstantin Khlebnikov { 18177142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 18277142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 18377142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 18477142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 18577142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 18677142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 18777142517SKonstantin Khlebnikov } 18877142517SKonstantin Khlebnikov return 0; 18977142517SKonstantin Khlebnikov } 19077142517SKonstantin Khlebnikov 1911da177e4SLinus Torvalds /* 1921da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 19375edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 1941da177e4SLinus Torvalds * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1951da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1961da177e4SLinus Torvalds */ 197800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 1981da177e4SLinus Torvalds { 199800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 200800d8c63SKirill A. Shutemov return 0; 201800d8c63SKirill A. Shutemov 202800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 203800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 2041da177e4SLinus Torvalds } 2051da177e4SLinus Torvalds 2061da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 2071da177e4SLinus Torvalds { 2080b0a0806SHugh Dickins if (flags & VM_NORESERVE) 20909cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 2101da177e4SLinus Torvalds } 2111da177e4SLinus Torvalds 2120f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 2130f079694SMike Rapoport { 2140f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2150f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2160f079694SMike Rapoport 2170f079694SMike Rapoport if (shmem_acct_block(info->flags, pages)) 2180f079694SMike Rapoport return false; 2190f079694SMike Rapoport 2200f079694SMike Rapoport if (sbinfo->max_blocks) { 2210f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2220f079694SMike Rapoport sbinfo->max_blocks - pages) > 0) 2230f079694SMike Rapoport goto unacct; 2240f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages); 2250f079694SMike Rapoport } 2260f079694SMike Rapoport 2270f079694SMike Rapoport return true; 2280f079694SMike Rapoport 2290f079694SMike Rapoport unacct: 2300f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2310f079694SMike Rapoport return false; 2320f079694SMike Rapoport } 2330f079694SMike Rapoport 2340f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 2350f079694SMike Rapoport { 2360f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2370f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2380f079694SMike Rapoport 2390f079694SMike Rapoport if (sbinfo->max_blocks) 2400f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages); 2410f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2420f079694SMike Rapoport } 2430f079694SMike Rapoport 244759b9775SHugh Dickins static const struct super_operations shmem_ops; 24530e6a51dSHui Su const struct address_space_operations shmem_aops; 24615ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 24792e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 24892e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 24992e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 250f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 251779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 2521da177e4SLinus Torvalds 253b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma) 254b0506e48SMike Rapoport { 255b0506e48SMike Rapoport return vma->vm_ops == &shmem_vm_ops; 256b0506e48SMike Rapoport } 257b0506e48SMike Rapoport 2581da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 259cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 2601da177e4SLinus Torvalds 261e809d5f0SChris Down /* 262e809d5f0SChris Down * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 263e809d5f0SChris Down * produces a novel ino for the newly allocated inode. 264e809d5f0SChris Down * 265e809d5f0SChris Down * It may also be called when making a hard link to permit the space needed by 266e809d5f0SChris Down * each dentry. However, in that case, no new inode number is needed since that 267e809d5f0SChris Down * internally draws from another pool of inode numbers (currently global 268e809d5f0SChris Down * get_next_ino()). This case is indicated by passing NULL as inop. 269e809d5f0SChris Down */ 270e809d5f0SChris Down #define SHMEM_INO_BATCH 1024 271e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 2725b04c689SPavel Emelyanov { 2735b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 274e809d5f0SChris Down ino_t ino; 275e809d5f0SChris Down 276e809d5f0SChris Down if (!(sb->s_flags & SB_KERNMOUNT)) { 277bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 278bb3e96d6SByron Stanoszek if (sbinfo->max_inodes) { 2795b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 280bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 2815b04c689SPavel Emelyanov return -ENOSPC; 2825b04c689SPavel Emelyanov } 2835b04c689SPavel Emelyanov sbinfo->free_inodes--; 284bb3e96d6SByron Stanoszek } 285e809d5f0SChris Down if (inop) { 286e809d5f0SChris Down ino = sbinfo->next_ino++; 287e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 288e809d5f0SChris Down ino = sbinfo->next_ino++; 289ea3271f7SChris Down if (unlikely(!sbinfo->full_inums && 290ea3271f7SChris Down ino > UINT_MAX)) { 291e809d5f0SChris Down /* 292e809d5f0SChris Down * Emulate get_next_ino uint wraparound for 293e809d5f0SChris Down * compatibility 294e809d5f0SChris Down */ 295ea3271f7SChris Down if (IS_ENABLED(CONFIG_64BIT)) 296ea3271f7SChris Down pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 297ea3271f7SChris Down __func__, MINOR(sb->s_dev)); 298ea3271f7SChris Down sbinfo->next_ino = 1; 299ea3271f7SChris Down ino = sbinfo->next_ino++; 3005b04c689SPavel Emelyanov } 301e809d5f0SChris Down *inop = ino; 302e809d5f0SChris Down } 303bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 304e809d5f0SChris Down } else if (inop) { 305e809d5f0SChris Down /* 306e809d5f0SChris Down * __shmem_file_setup, one of our callers, is lock-free: it 307e809d5f0SChris Down * doesn't hold stat_lock in shmem_reserve_inode since 308e809d5f0SChris Down * max_inodes is always 0, and is called from potentially 309e809d5f0SChris Down * unknown contexts. As such, use a per-cpu batched allocator 310e809d5f0SChris Down * which doesn't require the per-sb stat_lock unless we are at 311e809d5f0SChris Down * the batch boundary. 312ea3271f7SChris Down * 313ea3271f7SChris Down * We don't need to worry about inode{32,64} since SB_KERNMOUNT 314ea3271f7SChris Down * shmem mounts are not exposed to userspace, so we don't need 315ea3271f7SChris Down * to worry about things like glibc compatibility. 316e809d5f0SChris Down */ 317e809d5f0SChris Down ino_t *next_ino; 318bf11b9a8SSebastian Andrzej Siewior 319e809d5f0SChris Down next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 320e809d5f0SChris Down ino = *next_ino; 321e809d5f0SChris Down if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 322bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 323e809d5f0SChris Down ino = sbinfo->next_ino; 324e809d5f0SChris Down sbinfo->next_ino += SHMEM_INO_BATCH; 325bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 326e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 327e809d5f0SChris Down ino++; 328e809d5f0SChris Down } 329e809d5f0SChris Down *inop = ino; 330e809d5f0SChris Down *next_ino = ++ino; 331e809d5f0SChris Down put_cpu(); 332e809d5f0SChris Down } 333e809d5f0SChris Down 3345b04c689SPavel Emelyanov return 0; 3355b04c689SPavel Emelyanov } 3365b04c689SPavel Emelyanov 3375b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 3385b04c689SPavel Emelyanov { 3395b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3405b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 341bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 3425b04c689SPavel Emelyanov sbinfo->free_inodes++; 343bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3445b04c689SPavel Emelyanov } 3455b04c689SPavel Emelyanov } 3465b04c689SPavel Emelyanov 34746711810SRandy Dunlap /** 34841ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 3491da177e4SLinus Torvalds * @inode: inode to recalc 3501da177e4SLinus Torvalds * 3511da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 3521da177e4SLinus Torvalds * undirtied hole pages behind our back. 3531da177e4SLinus Torvalds * 3541da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 3551da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 3561da177e4SLinus Torvalds * 3571da177e4SLinus Torvalds * It has to be called with the spinlock held. 3581da177e4SLinus Torvalds */ 3591da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 3601da177e4SLinus Torvalds { 3611da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 3621da177e4SLinus Torvalds long freed; 3631da177e4SLinus Torvalds 3641da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 3651da177e4SLinus Torvalds if (freed > 0) { 3661da177e4SLinus Torvalds info->alloced -= freed; 36754af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 3680f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed); 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds } 3711da177e4SLinus Torvalds 372800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 373800d8c63SKirill A. Shutemov { 374800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3754595ef88SKirill A. Shutemov unsigned long flags; 376800d8c63SKirill A. Shutemov 3770f079694SMike Rapoport if (!shmem_inode_acct_block(inode, pages)) 378800d8c63SKirill A. Shutemov return false; 379b1cc94abSMike Rapoport 380aaa52e34SHugh Dickins /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 381aaa52e34SHugh Dickins inode->i_mapping->nrpages += pages; 382aaa52e34SHugh Dickins 3834595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 384800d8c63SKirill A. Shutemov info->alloced += pages; 385800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 386800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3874595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 388800d8c63SKirill A. Shutemov 389800d8c63SKirill A. Shutemov return true; 390800d8c63SKirill A. Shutemov } 391800d8c63SKirill A. Shutemov 392800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 393800d8c63SKirill A. Shutemov { 394800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3954595ef88SKirill A. Shutemov unsigned long flags; 396800d8c63SKirill A. Shutemov 397aaa52e34SHugh Dickins /* nrpages adjustment done by __delete_from_page_cache() or caller */ 398aaa52e34SHugh Dickins 3994595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 400800d8c63SKirill A. Shutemov info->alloced -= pages; 401800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 402800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 4034595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 404800d8c63SKirill A. Shutemov 4050f079694SMike Rapoport shmem_inode_unacct_blocks(inode, pages); 406800d8c63SKirill A. Shutemov } 407800d8c63SKirill A. Shutemov 4087a5d0fbbSHugh Dickins /* 40962f945b6SMatthew Wilcox * Replace item expected in xarray by a new item, while holding xa_lock. 4107a5d0fbbSHugh Dickins */ 41162f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping, 4127a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 4137a5d0fbbSHugh Dickins { 41462f945b6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 4156dbaf22cSJohannes Weiner void *item; 4167a5d0fbbSHugh Dickins 4177a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 4186dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 41962f945b6SMatthew Wilcox item = xas_load(&xas); 4207a5d0fbbSHugh Dickins if (item != expected) 4217a5d0fbbSHugh Dickins return -ENOENT; 42262f945b6SMatthew Wilcox xas_store(&xas, replacement); 4237a5d0fbbSHugh Dickins return 0; 4247a5d0fbbSHugh Dickins } 4257a5d0fbbSHugh Dickins 4267a5d0fbbSHugh Dickins /* 427d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 428d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 429d1899228SHugh Dickins * 430d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 431d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 432d1899228SHugh Dickins */ 433d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 434d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 435d1899228SHugh Dickins { 436a12831bfSMatthew Wilcox return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 437d1899228SHugh Dickins } 438d1899228SHugh Dickins 439d1899228SHugh Dickins /* 4405a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 4415a6e75f8SKirill A. Shutemov * 4425a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 4435a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 4445a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 4455a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 4465a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 4475a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 4485a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 4495a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 4505a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 4515a6e75f8SKirill A. Shutemov */ 4525a6e75f8SKirill A. Shutemov 4535a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 4545a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 4555a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 4565a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 4575a6e75f8SKirill A. Shutemov 4585a6e75f8SKirill A. Shutemov /* 4595a6e75f8SKirill A. Shutemov * Special values. 4605a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 4615a6e75f8SKirill A. Shutemov * 4625a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 4635a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 4645a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 4655a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 4665a6e75f8SKirill A. Shutemov * 4675a6e75f8SKirill A. Shutemov */ 4685a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 4695a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 4705a6e75f8SKirill A. Shutemov 471396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4725a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 4735a6e75f8SKirill A. Shutemov 4745b9c98f3SMike Kravetz static int shmem_huge __read_mostly; 4755a6e75f8SKirill A. Shutemov 476*c852023eSHugh Dickins bool shmem_huge_enabled(struct vm_area_struct *vma) 477*c852023eSHugh Dickins { 478*c852023eSHugh Dickins struct inode *inode = file_inode(vma->vm_file); 479*c852023eSHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 480*c852023eSHugh Dickins loff_t i_size; 481*c852023eSHugh Dickins pgoff_t off; 482*c852023eSHugh Dickins 483*c852023eSHugh Dickins if ((vma->vm_flags & VM_NOHUGEPAGE) || 484*c852023eSHugh Dickins test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 485*c852023eSHugh Dickins return false; 486*c852023eSHugh Dickins if (shmem_huge == SHMEM_HUGE_FORCE) 487*c852023eSHugh Dickins return true; 488*c852023eSHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 489*c852023eSHugh Dickins return false; 490*c852023eSHugh Dickins switch (sbinfo->huge) { 491*c852023eSHugh Dickins case SHMEM_HUGE_NEVER: 492*c852023eSHugh Dickins return false; 493*c852023eSHugh Dickins case SHMEM_HUGE_ALWAYS: 494*c852023eSHugh Dickins return true; 495*c852023eSHugh Dickins case SHMEM_HUGE_WITHIN_SIZE: 496*c852023eSHugh Dickins off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 497*c852023eSHugh Dickins i_size = round_up(i_size_read(inode), PAGE_SIZE); 498*c852023eSHugh Dickins if (i_size >= HPAGE_PMD_SIZE && 499*c852023eSHugh Dickins i_size >> PAGE_SHIFT >= off) 500*c852023eSHugh Dickins return true; 501*c852023eSHugh Dickins fallthrough; 502*c852023eSHugh Dickins case SHMEM_HUGE_ADVISE: 503*c852023eSHugh Dickins /* TODO: implement fadvise() hints */ 504*c852023eSHugh Dickins return (vma->vm_flags & VM_HUGEPAGE); 505*c852023eSHugh Dickins default: 506*c852023eSHugh Dickins VM_BUG_ON(1); 507*c852023eSHugh Dickins return false; 508*c852023eSHugh Dickins } 509*c852023eSHugh Dickins } 510*c852023eSHugh Dickins 511e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) 5125a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 5135a6e75f8SKirill A. Shutemov { 5145a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 5155a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 5165a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 5175a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 5185a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 5195a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 5205a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 5215a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 5225a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 5235a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 5245a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 5255a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 5265a6e75f8SKirill A. Shutemov return -EINVAL; 5275a6e75f8SKirill A. Shutemov } 528e5f2249aSArnd Bergmann #endif 5295a6e75f8SKirill A. Shutemov 530e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 5315a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 5325a6e75f8SKirill A. Shutemov { 5335a6e75f8SKirill A. Shutemov switch (huge) { 5345a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 5355a6e75f8SKirill A. Shutemov return "never"; 5365a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 5375a6e75f8SKirill A. Shutemov return "always"; 5385a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 5395a6e75f8SKirill A. Shutemov return "within_size"; 5405a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 5415a6e75f8SKirill A. Shutemov return "advise"; 5425a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 5435a6e75f8SKirill A. Shutemov return "deny"; 5445a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 5455a6e75f8SKirill A. Shutemov return "force"; 5465a6e75f8SKirill A. Shutemov default: 5475a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 5485a6e75f8SKirill A. Shutemov return "bad_val"; 5495a6e75f8SKirill A. Shutemov } 5505a6e75f8SKirill A. Shutemov } 551f1f5929cSJérémy Lefaure #endif 5525a6e75f8SKirill A. Shutemov 553779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 554779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 555779750d2SKirill A. Shutemov { 556779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 557253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 558779750d2SKirill A. Shutemov struct inode *inode; 559779750d2SKirill A. Shutemov struct shmem_inode_info *info; 560779750d2SKirill A. Shutemov struct page *page; 561779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 562779750d2SKirill A. Shutemov int removed = 0, split = 0; 563779750d2SKirill A. Shutemov 564779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 565779750d2SKirill A. Shutemov return SHRINK_STOP; 566779750d2SKirill A. Shutemov 567779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 568779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 569779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 570779750d2SKirill A. Shutemov 571779750d2SKirill A. Shutemov /* pin the inode */ 572779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 573779750d2SKirill A. Shutemov 574779750d2SKirill A. Shutemov /* inode is about to be evicted */ 575779750d2SKirill A. Shutemov if (!inode) { 576779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 577779750d2SKirill A. Shutemov removed++; 578779750d2SKirill A. Shutemov goto next; 579779750d2SKirill A. Shutemov } 580779750d2SKirill A. Shutemov 581779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 582779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 583779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 584253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 585779750d2SKirill A. Shutemov removed++; 586779750d2SKirill A. Shutemov goto next; 587779750d2SKirill A. Shutemov } 588779750d2SKirill A. Shutemov 589779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 590779750d2SKirill A. Shutemov next: 591779750d2SKirill A. Shutemov if (!--batch) 592779750d2SKirill A. Shutemov break; 593779750d2SKirill A. Shutemov } 594779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 595779750d2SKirill A. Shutemov 596253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 597253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 598253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 599253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 600253fd0f0SKirill A. Shutemov iput(inode); 601253fd0f0SKirill A. Shutemov } 602253fd0f0SKirill A. Shutemov 603779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 604779750d2SKirill A. Shutemov int ret; 605779750d2SKirill A. Shutemov 606779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 607779750d2SKirill A. Shutemov inode = &info->vfs_inode; 608779750d2SKirill A. Shutemov 609b3cd54b2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) 610b3cd54b2SKirill A. Shutemov goto leave; 611779750d2SKirill A. Shutemov 612b3cd54b2SKirill A. Shutemov page = find_get_page(inode->i_mapping, 613779750d2SKirill A. Shutemov (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 614779750d2SKirill A. Shutemov if (!page) 615779750d2SKirill A. Shutemov goto drop; 616779750d2SKirill A. Shutemov 617b3cd54b2SKirill A. Shutemov /* No huge page at the end of the file: nothing to split */ 618779750d2SKirill A. Shutemov if (!PageTransHuge(page)) { 619779750d2SKirill A. Shutemov put_page(page); 620779750d2SKirill A. Shutemov goto drop; 621779750d2SKirill A. Shutemov } 622779750d2SKirill A. Shutemov 623b3cd54b2SKirill A. Shutemov /* 624b3cd54b2SKirill A. Shutemov * Leave the inode on the list if we failed to lock 625b3cd54b2SKirill A. Shutemov * the page at this time. 626b3cd54b2SKirill A. Shutemov * 627b3cd54b2SKirill A. Shutemov * Waiting for the lock may lead to deadlock in the 628b3cd54b2SKirill A. Shutemov * reclaim path. 629b3cd54b2SKirill A. Shutemov */ 630b3cd54b2SKirill A. Shutemov if (!trylock_page(page)) { 631b3cd54b2SKirill A. Shutemov put_page(page); 632b3cd54b2SKirill A. Shutemov goto leave; 633b3cd54b2SKirill A. Shutemov } 634b3cd54b2SKirill A. Shutemov 635779750d2SKirill A. Shutemov ret = split_huge_page(page); 636779750d2SKirill A. Shutemov unlock_page(page); 637779750d2SKirill A. Shutemov put_page(page); 638779750d2SKirill A. Shutemov 639b3cd54b2SKirill A. Shutemov /* If split failed leave the inode on the list */ 640b3cd54b2SKirill A. Shutemov if (ret) 641b3cd54b2SKirill A. Shutemov goto leave; 642779750d2SKirill A. Shutemov 643779750d2SKirill A. Shutemov split++; 644779750d2SKirill A. Shutemov drop: 645779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 646779750d2SKirill A. Shutemov removed++; 647b3cd54b2SKirill A. Shutemov leave: 648779750d2SKirill A. Shutemov iput(inode); 649779750d2SKirill A. Shutemov } 650779750d2SKirill A. Shutemov 651779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 652779750d2SKirill A. Shutemov list_splice_tail(&list, &sbinfo->shrinklist); 653779750d2SKirill A. Shutemov sbinfo->shrinklist_len -= removed; 654779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 655779750d2SKirill A. Shutemov 656779750d2SKirill A. Shutemov return split; 657779750d2SKirill A. Shutemov } 658779750d2SKirill A. Shutemov 659779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 660779750d2SKirill A. Shutemov struct shrink_control *sc) 661779750d2SKirill A. Shutemov { 662779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 663779750d2SKirill A. Shutemov 664779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 665779750d2SKirill A. Shutemov return SHRINK_STOP; 666779750d2SKirill A. Shutemov 667779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 668779750d2SKirill A. Shutemov } 669779750d2SKirill A. Shutemov 670779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 671779750d2SKirill A. Shutemov struct shrink_control *sc) 672779750d2SKirill A. Shutemov { 673779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 674779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 675779750d2SKirill A. Shutemov } 676396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 6775a6e75f8SKirill A. Shutemov 6785a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 6795a6e75f8SKirill A. Shutemov 680779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 681779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 682779750d2SKirill A. Shutemov { 683779750d2SKirill A. Shutemov return 0; 684779750d2SKirill A. Shutemov } 685396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 6865a6e75f8SKirill A. Shutemov 68789fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) 68889fdcd26SYang Shi { 689396bcc52SMatthew Wilcox (Oracle) if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 69089fdcd26SYang Shi (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && 69189fdcd26SYang Shi shmem_huge != SHMEM_HUGE_DENY) 69289fdcd26SYang Shi return true; 69389fdcd26SYang Shi return false; 69489fdcd26SYang Shi } 69589fdcd26SYang Shi 6965a6e75f8SKirill A. Shutemov /* 69746f65ec1SHugh Dickins * Like add_to_page_cache_locked, but error if expected item has gone. 69846f65ec1SHugh Dickins */ 69946f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page, 70046f65ec1SHugh Dickins struct address_space *mapping, 7013fea5a49SJohannes Weiner pgoff_t index, void *expected, gfp_t gfp, 7023fea5a49SJohannes Weiner struct mm_struct *charge_mm) 70346f65ec1SHugh Dickins { 704552446a4SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 705552446a4SMatthew Wilcox unsigned long i = 0; 706d8c6546bSMatthew Wilcox (Oracle) unsigned long nr = compound_nr(page); 7073fea5a49SJohannes Weiner int error; 70846f65ec1SHugh Dickins 709800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 710800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(index != round_down(index, nr), page); 711309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 712309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 713800d8c63SKirill A. Shutemov VM_BUG_ON(expected && PageTransHuge(page)); 71446f65ec1SHugh Dickins 715800d8c63SKirill A. Shutemov page_ref_add(page, nr); 71646f65ec1SHugh Dickins page->mapping = mapping; 71746f65ec1SHugh Dickins page->index = index; 71846f65ec1SHugh Dickins 7194c6355b2SJohannes Weiner if (!PageSwapCache(page)) { 720d9eb1ea2SJohannes Weiner error = mem_cgroup_charge(page, charge_mm, gfp); 7213fea5a49SJohannes Weiner if (error) { 7224c6355b2SJohannes Weiner if (PageTransHuge(page)) { 7233fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK); 7243fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK_CHARGE); 7253fea5a49SJohannes Weiner } 7263fea5a49SJohannes Weiner goto error; 7273fea5a49SJohannes Weiner } 7284c6355b2SJohannes Weiner } 7293fea5a49SJohannes Weiner cgroup_throttle_swaprate(page, gfp); 7303fea5a49SJohannes Weiner 731552446a4SMatthew Wilcox do { 732552446a4SMatthew Wilcox void *entry; 733552446a4SMatthew Wilcox xas_lock_irq(&xas); 734552446a4SMatthew Wilcox entry = xas_find_conflict(&xas); 735552446a4SMatthew Wilcox if (entry != expected) 736552446a4SMatthew Wilcox xas_set_err(&xas, -EEXIST); 737552446a4SMatthew Wilcox xas_create_range(&xas); 738552446a4SMatthew Wilcox if (xas_error(&xas)) 739552446a4SMatthew Wilcox goto unlock; 740552446a4SMatthew Wilcox next: 7414101196bSMatthew Wilcox (Oracle) xas_store(&xas, page); 742552446a4SMatthew Wilcox if (++i < nr) { 743552446a4SMatthew Wilcox xas_next(&xas); 744552446a4SMatthew Wilcox goto next; 745552446a4SMatthew Wilcox } 746800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 747800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 74857b2847dSMuchun Song __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr); 749552446a4SMatthew Wilcox } 750552446a4SMatthew Wilcox mapping->nrpages += nr; 7510d1c2072SJohannes Weiner __mod_lruvec_page_state(page, NR_FILE_PAGES, nr); 7520d1c2072SJohannes Weiner __mod_lruvec_page_state(page, NR_SHMEM, nr); 753552446a4SMatthew Wilcox unlock: 754552446a4SMatthew Wilcox xas_unlock_irq(&xas); 755552446a4SMatthew Wilcox } while (xas_nomem(&xas, gfp)); 756552446a4SMatthew Wilcox 757552446a4SMatthew Wilcox if (xas_error(&xas)) { 7583fea5a49SJohannes Weiner error = xas_error(&xas); 7593fea5a49SJohannes Weiner goto error; 76046f65ec1SHugh Dickins } 761552446a4SMatthew Wilcox 762552446a4SMatthew Wilcox return 0; 7633fea5a49SJohannes Weiner error: 7643fea5a49SJohannes Weiner page->mapping = NULL; 7653fea5a49SJohannes Weiner page_ref_sub(page, nr); 7663fea5a49SJohannes Weiner return error; 76746f65ec1SHugh Dickins } 76846f65ec1SHugh Dickins 76946f65ec1SHugh Dickins /* 7706922c0c7SHugh Dickins * Like delete_from_page_cache, but substitutes swap for page. 7716922c0c7SHugh Dickins */ 7726922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap) 7736922c0c7SHugh Dickins { 7746922c0c7SHugh Dickins struct address_space *mapping = page->mapping; 7756922c0c7SHugh Dickins int error; 7766922c0c7SHugh Dickins 777800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 778800d8c63SKirill A. Shutemov 779b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 78062f945b6SMatthew Wilcox error = shmem_replace_entry(mapping, page->index, page, radswap); 7816922c0c7SHugh Dickins page->mapping = NULL; 7826922c0c7SHugh Dickins mapping->nrpages--; 7830d1c2072SJohannes Weiner __dec_lruvec_page_state(page, NR_FILE_PAGES); 7840d1c2072SJohannes Weiner __dec_lruvec_page_state(page, NR_SHMEM); 785b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 78609cbfeafSKirill A. Shutemov put_page(page); 7876922c0c7SHugh Dickins BUG_ON(error); 7886922c0c7SHugh Dickins } 7896922c0c7SHugh Dickins 7906922c0c7SHugh Dickins /* 791c121d3bbSMatthew Wilcox * Remove swap entry from page cache, free the swap and its page cache. 7927a5d0fbbSHugh Dickins */ 7937a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 7947a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 7957a5d0fbbSHugh Dickins { 7966dbaf22cSJohannes Weiner void *old; 7977a5d0fbbSHugh Dickins 79855f3f7eaSMatthew Wilcox old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 7996dbaf22cSJohannes Weiner if (old != radswap) 8006dbaf22cSJohannes Weiner return -ENOENT; 8017a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 8026dbaf22cSJohannes Weiner return 0; 8037a5d0fbbSHugh Dickins } 8047a5d0fbbSHugh Dickins 8057a5d0fbbSHugh Dickins /* 8066a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 80748131e03SVlastimil Babka * given offsets are swapped out. 8086a15a370SVlastimil Babka * 809b93b0163SMatthew Wilcox * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 8106a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 8116a15a370SVlastimil Babka */ 81248131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 81348131e03SVlastimil Babka pgoff_t start, pgoff_t end) 8146a15a370SVlastimil Babka { 8157ae3424fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 8166a15a370SVlastimil Babka struct page *page; 81748131e03SVlastimil Babka unsigned long swapped = 0; 8186a15a370SVlastimil Babka 8196a15a370SVlastimil Babka rcu_read_lock(); 8207ae3424fSMatthew Wilcox xas_for_each(&xas, page, end - 1) { 8217ae3424fSMatthew Wilcox if (xas_retry(&xas, page)) 8222cf938aaSMatthew Wilcox continue; 8233159f943SMatthew Wilcox if (xa_is_value(page)) 8246a15a370SVlastimil Babka swapped++; 8256a15a370SVlastimil Babka 8266a15a370SVlastimil Babka if (need_resched()) { 8277ae3424fSMatthew Wilcox xas_pause(&xas); 8286a15a370SVlastimil Babka cond_resched_rcu(); 8296a15a370SVlastimil Babka } 8306a15a370SVlastimil Babka } 8316a15a370SVlastimil Babka 8326a15a370SVlastimil Babka rcu_read_unlock(); 8336a15a370SVlastimil Babka 8346a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 8356a15a370SVlastimil Babka } 8366a15a370SVlastimil Babka 8376a15a370SVlastimil Babka /* 83848131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 83948131e03SVlastimil Babka * given vma is swapped out. 84048131e03SVlastimil Babka * 841b93b0163SMatthew Wilcox * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 84248131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 84348131e03SVlastimil Babka */ 84448131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 84548131e03SVlastimil Babka { 84648131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 84748131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 84848131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 84948131e03SVlastimil Babka unsigned long swapped; 85048131e03SVlastimil Babka 85148131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 85248131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 85348131e03SVlastimil Babka 85448131e03SVlastimil Babka /* 85548131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 85648131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 85748131e03SVlastimil Babka * already track. 85848131e03SVlastimil Babka */ 85948131e03SVlastimil Babka if (!swapped) 86048131e03SVlastimil Babka return 0; 86148131e03SVlastimil Babka 86248131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 86348131e03SVlastimil Babka return swapped << PAGE_SHIFT; 86448131e03SVlastimil Babka 86548131e03SVlastimil Babka /* Here comes the more involved part */ 86648131e03SVlastimil Babka return shmem_partial_swap_usage(mapping, 86748131e03SVlastimil Babka linear_page_index(vma, vma->vm_start), 86848131e03SVlastimil Babka linear_page_index(vma, vma->vm_end)); 86948131e03SVlastimil Babka } 87048131e03SVlastimil Babka 87148131e03SVlastimil Babka /* 87224513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 87324513264SHugh Dickins */ 87424513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 87524513264SHugh Dickins { 87624513264SHugh Dickins struct pagevec pvec; 87724513264SHugh Dickins pgoff_t index = 0; 87824513264SHugh Dickins 87986679820SMel Gorman pagevec_init(&pvec); 88024513264SHugh Dickins /* 88124513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 88224513264SHugh Dickins */ 88324513264SHugh Dickins while (!mapping_unevictable(mapping)) { 88496888e0aSMatthew Wilcox (Oracle) if (!pagevec_lookup(&pvec, mapping, &index)) 88524513264SHugh Dickins break; 88664e3d12fSKuo-Hsin Yang check_move_unevictable_pages(&pvec); 88724513264SHugh Dickins pagevec_release(&pvec); 88824513264SHugh Dickins cond_resched(); 88924513264SHugh Dickins } 8907a5d0fbbSHugh Dickins } 8917a5d0fbbSHugh Dickins 8927a5d0fbbSHugh Dickins /* 89371725ed1SHugh Dickins * Check whether a hole-punch or truncation needs to split a huge page, 89471725ed1SHugh Dickins * returning true if no split was required, or the split has been successful. 89571725ed1SHugh Dickins * 89671725ed1SHugh Dickins * Eviction (or truncation to 0 size) should never need to split a huge page; 89771725ed1SHugh Dickins * but in rare cases might do so, if shmem_undo_range() failed to trylock on 89871725ed1SHugh Dickins * head, and then succeeded to trylock on tail. 89971725ed1SHugh Dickins * 90071725ed1SHugh Dickins * A split can only succeed when there are no additional references on the 90171725ed1SHugh Dickins * huge page: so the split below relies upon find_get_entries() having stopped 90271725ed1SHugh Dickins * when it found a subpage of the huge page, without getting further references. 90371725ed1SHugh Dickins */ 90471725ed1SHugh Dickins static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end) 90571725ed1SHugh Dickins { 90671725ed1SHugh Dickins if (!PageTransCompound(page)) 90771725ed1SHugh Dickins return true; 90871725ed1SHugh Dickins 90971725ed1SHugh Dickins /* Just proceed to delete a huge page wholly within the range punched */ 91071725ed1SHugh Dickins if (PageHead(page) && 91171725ed1SHugh Dickins page->index >= start && page->index + HPAGE_PMD_NR <= end) 91271725ed1SHugh Dickins return true; 91371725ed1SHugh Dickins 91471725ed1SHugh Dickins /* Try to split huge page, so we can truly punch the hole or truncate */ 91571725ed1SHugh Dickins return split_huge_page(page) >= 0; 91671725ed1SHugh Dickins } 91771725ed1SHugh Dickins 91871725ed1SHugh Dickins /* 9197f4446eeSMatthew Wilcox * Remove range of pages and swap entries from page cache, and free them. 9201635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 9217a5d0fbbSHugh Dickins */ 9221635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 9231635f6a7SHugh Dickins bool unfalloc) 9241da177e4SLinus Torvalds { 925285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 9261da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 92709cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 92809cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 92909cbfeafSKirill A. Shutemov unsigned int partial_start = lstart & (PAGE_SIZE - 1); 93009cbfeafSKirill A. Shutemov unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); 931bda97eabSHugh Dickins struct pagevec pvec; 9327a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 9337a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 934285b2c4fSHugh Dickins pgoff_t index; 935bda97eabSHugh Dickins int i; 9361da177e4SLinus Torvalds 93783e4fa9cSHugh Dickins if (lend == -1) 93883e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 939bda97eabSHugh Dickins 940d144bf62SHugh Dickins if (info->fallocend > start && info->fallocend <= end && !unfalloc) 941d144bf62SHugh Dickins info->fallocend = start; 942d144bf62SHugh Dickins 94386679820SMel Gorman pagevec_init(&pvec); 944bda97eabSHugh Dickins index = start; 9455c211ba2SMatthew Wilcox (Oracle) while (index < end && find_lock_entries(mapping, index, end - 1, 9465c211ba2SMatthew Wilcox (Oracle) &pvec, indices)) { 947bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 948bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 949bda97eabSHugh Dickins 9507a5d0fbbSHugh Dickins index = indices[i]; 951bda97eabSHugh Dickins 9523159f943SMatthew Wilcox if (xa_is_value(page)) { 9531635f6a7SHugh Dickins if (unfalloc) 9541635f6a7SHugh Dickins continue; 9557a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 9567a5d0fbbSHugh Dickins index, page); 9577a5d0fbbSHugh Dickins continue; 9587a5d0fbbSHugh Dickins } 9595c211ba2SMatthew Wilcox (Oracle) index += thp_nr_pages(page) - 1; 9607a5d0fbbSHugh Dickins 9615c211ba2SMatthew Wilcox (Oracle) if (!unfalloc || !PageUptodate(page)) 962bda97eabSHugh Dickins truncate_inode_page(mapping, page); 963bda97eabSHugh Dickins unlock_page(page); 964bda97eabSHugh Dickins } 9650cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 96624513264SHugh Dickins pagevec_release(&pvec); 967bda97eabSHugh Dickins cond_resched(); 968bda97eabSHugh Dickins index++; 969bda97eabSHugh Dickins } 970bda97eabSHugh Dickins 97183e4fa9cSHugh Dickins if (partial_start) { 972bda97eabSHugh Dickins struct page *page = NULL; 9739e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, start - 1, &page, SGP_READ); 974bda97eabSHugh Dickins if (page) { 97509cbfeafSKirill A. Shutemov unsigned int top = PAGE_SIZE; 97683e4fa9cSHugh Dickins if (start > end) { 97783e4fa9cSHugh Dickins top = partial_end; 97883e4fa9cSHugh Dickins partial_end = 0; 97983e4fa9cSHugh Dickins } 98083e4fa9cSHugh Dickins zero_user_segment(page, partial_start, top); 981bda97eabSHugh Dickins set_page_dirty(page); 982bda97eabSHugh Dickins unlock_page(page); 98309cbfeafSKirill A. Shutemov put_page(page); 984bda97eabSHugh Dickins } 985bda97eabSHugh Dickins } 98683e4fa9cSHugh Dickins if (partial_end) { 98783e4fa9cSHugh Dickins struct page *page = NULL; 9889e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, end, &page, SGP_READ); 98983e4fa9cSHugh Dickins if (page) { 99083e4fa9cSHugh Dickins zero_user_segment(page, 0, partial_end); 99183e4fa9cSHugh Dickins set_page_dirty(page); 99283e4fa9cSHugh Dickins unlock_page(page); 99309cbfeafSKirill A. Shutemov put_page(page); 99483e4fa9cSHugh Dickins } 99583e4fa9cSHugh Dickins } 99683e4fa9cSHugh Dickins if (start >= end) 99783e4fa9cSHugh Dickins return; 998bda97eabSHugh Dickins 999bda97eabSHugh Dickins index = start; 1000b1a36650SHugh Dickins while (index < end) { 1001bda97eabSHugh Dickins cond_resched(); 10020cd6144aSJohannes Weiner 1003cf2039afSMatthew Wilcox (Oracle) if (!find_get_entries(mapping, index, end - 1, &pvec, 1004cf2039afSMatthew Wilcox (Oracle) indices)) { 1005b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 1006b1a36650SHugh Dickins if (index == start || end != -1) 1007bda97eabSHugh Dickins break; 1008b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 1009bda97eabSHugh Dickins index = start; 1010bda97eabSHugh Dickins continue; 1011bda97eabSHugh Dickins } 1012bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 1013bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 1014bda97eabSHugh Dickins 10157a5d0fbbSHugh Dickins index = indices[i]; 10163159f943SMatthew Wilcox if (xa_is_value(page)) { 10171635f6a7SHugh Dickins if (unfalloc) 10181635f6a7SHugh Dickins continue; 1019b1a36650SHugh Dickins if (shmem_free_swap(mapping, index, page)) { 1020b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 1021b1a36650SHugh Dickins index--; 1022b1a36650SHugh Dickins break; 1023b1a36650SHugh Dickins } 1024b1a36650SHugh Dickins nr_swaps_freed++; 10257a5d0fbbSHugh Dickins continue; 10267a5d0fbbSHugh Dickins } 10277a5d0fbbSHugh Dickins 1028bda97eabSHugh Dickins lock_page(page); 1029800d8c63SKirill A. Shutemov 10301635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 103171725ed1SHugh Dickins if (page_mapping(page) != mapping) { 1032b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 1033b1a36650SHugh Dickins unlock_page(page); 1034b1a36650SHugh Dickins index--; 1035b1a36650SHugh Dickins break; 10367a5d0fbbSHugh Dickins } 103771725ed1SHugh Dickins VM_BUG_ON_PAGE(PageWriteback(page), page); 103871725ed1SHugh Dickins if (shmem_punch_compound(page, start, end)) 103971725ed1SHugh Dickins truncate_inode_page(mapping, page); 10400783ac95SHugh Dickins else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 104171725ed1SHugh Dickins /* Wipe the page and don't get stuck */ 104271725ed1SHugh Dickins clear_highpage(page); 104371725ed1SHugh Dickins flush_dcache_page(page); 104471725ed1SHugh Dickins set_page_dirty(page); 104571725ed1SHugh Dickins if (index < 104671725ed1SHugh Dickins round_up(start, HPAGE_PMD_NR)) 104771725ed1SHugh Dickins start = index + 1; 104871725ed1SHugh Dickins } 10491635f6a7SHugh Dickins } 1050bda97eabSHugh Dickins unlock_page(page); 1051bda97eabSHugh Dickins } 10520cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 105324513264SHugh Dickins pagevec_release(&pvec); 1054bda97eabSHugh Dickins index++; 1055bda97eabSHugh Dickins } 105694c1e62dSHugh Dickins 10574595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 10587a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 10591da177e4SLinus Torvalds shmem_recalc_inode(inode); 10604595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 10611635f6a7SHugh Dickins } 10621da177e4SLinus Torvalds 10631635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 10641635f6a7SHugh Dickins { 10651635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 1066078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 10671da177e4SLinus Torvalds } 106894c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 10691da177e4SLinus Torvalds 1070549c7297SChristian Brauner static int shmem_getattr(struct user_namespace *mnt_userns, 1071549c7297SChristian Brauner const struct path *path, struct kstat *stat, 1072a528d35eSDavid Howells u32 request_mask, unsigned int query_flags) 107344a30220SYu Zhao { 1074a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode; 107544a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 107689fdcd26SYang Shi struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb); 107744a30220SYu Zhao 1078d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 10794595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 108044a30220SYu Zhao shmem_recalc_inode(inode); 10814595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1082d0424c42SHugh Dickins } 10830d56a451SChristian Brauner generic_fillattr(&init_user_ns, inode, stat); 108489fdcd26SYang Shi 108589fdcd26SYang Shi if (is_huge_enabled(sb_info)) 108689fdcd26SYang Shi stat->blksize = HPAGE_PMD_SIZE; 108789fdcd26SYang Shi 108844a30220SYu Zhao return 0; 108944a30220SYu Zhao } 109044a30220SYu Zhao 1091549c7297SChristian Brauner static int shmem_setattr(struct user_namespace *mnt_userns, 1092549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 10931da177e4SLinus Torvalds { 109475c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 109540e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 10961da177e4SLinus Torvalds int error; 10971da177e4SLinus Torvalds 10982f221d6fSChristian Brauner error = setattr_prepare(&init_user_ns, dentry, attr); 1099db78b877SChristoph Hellwig if (error) 1100db78b877SChristoph Hellwig return error; 1101db78b877SChristoph Hellwig 110294c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 110394c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 110494c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 11053889e6e7Snpiggin@suse.de 110640e041a2SDavid Herrmann /* protected by i_mutex */ 110740e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 110840e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 110940e041a2SDavid Herrmann return -EPERM; 111040e041a2SDavid Herrmann 111194c1e62dSHugh Dickins if (newsize != oldsize) { 111277142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 111377142517SKonstantin Khlebnikov oldsize, newsize); 111477142517SKonstantin Khlebnikov if (error) 111577142517SKonstantin Khlebnikov return error; 111694c1e62dSHugh Dickins i_size_write(inode, newsize); 1117078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 111894c1e62dSHugh Dickins } 1119afa2db2fSJosef Bacik if (newsize <= oldsize) { 112094c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 1121d0424c42SHugh Dickins if (oldsize > holebegin) 1122d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1123d0424c42SHugh Dickins holebegin, 0, 1); 1124d0424c42SHugh Dickins if (info->alloced) 1125d0424c42SHugh Dickins shmem_truncate_range(inode, 1126d0424c42SHugh Dickins newsize, (loff_t)-1); 112794c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1128d0424c42SHugh Dickins if (oldsize > holebegin) 1129d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1130d0424c42SHugh Dickins holebegin, 0, 1); 113194c1e62dSHugh Dickins } 11321da177e4SLinus Torvalds } 11331da177e4SLinus Torvalds 11342f221d6fSChristian Brauner setattr_copy(&init_user_ns, inode, attr); 1135db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 1136e65ce2a5SChristian Brauner error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode); 11371da177e4SLinus Torvalds return error; 11381da177e4SLinus Torvalds } 11391da177e4SLinus Torvalds 11401f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 11411da177e4SLinus Torvalds { 11421da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1143779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 11441da177e4SLinus Torvalds 114530e6a51dSHui Su if (shmem_mapping(inode->i_mapping)) { 11461da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 11471da177e4SLinus Torvalds inode->i_size = 0; 11483889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1149779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1150779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1151779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1152779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1153779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1154779750d2SKirill A. Shutemov } 1155779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1156779750d2SKirill A. Shutemov } 1157af53d3e9SHugh Dickins while (!list_empty(&info->swaplist)) { 1158af53d3e9SHugh Dickins /* Wait while shmem_unuse() is scanning this inode... */ 1159af53d3e9SHugh Dickins wait_var_event(&info->stop_eviction, 1160af53d3e9SHugh Dickins !atomic_read(&info->stop_eviction)); 1161cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1162af53d3e9SHugh Dickins /* ...but beware of the race if we peeked too early */ 1163af53d3e9SHugh Dickins if (!atomic_read(&info->stop_eviction)) 11641da177e4SLinus Torvalds list_del_init(&info->swaplist); 1165cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 11661da177e4SLinus Torvalds } 11673ed47db3SAl Viro } 1168b09e0fa4SEric Paris 116938f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 11700f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 11715b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1172dbd5768fSJan Kara clear_inode(inode); 11731da177e4SLinus Torvalds } 11741da177e4SLinus Torvalds 1175b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping, 1176b56a2d8aSVineeth Remanan Pillai pgoff_t start, unsigned int nr_entries, 1177b56a2d8aSVineeth Remanan Pillai struct page **entries, pgoff_t *indices, 117887039546SHugh Dickins unsigned int type, bool frontswap) 1179478922e2SMatthew Wilcox { 1180b56a2d8aSVineeth Remanan Pillai XA_STATE(xas, &mapping->i_pages, start); 1181b56a2d8aSVineeth Remanan Pillai struct page *page; 118287039546SHugh Dickins swp_entry_t entry; 1183b56a2d8aSVineeth Remanan Pillai unsigned int ret = 0; 1184b56a2d8aSVineeth Remanan Pillai 1185b56a2d8aSVineeth Remanan Pillai if (!nr_entries) 1186b56a2d8aSVineeth Remanan Pillai return 0; 1187478922e2SMatthew Wilcox 1188478922e2SMatthew Wilcox rcu_read_lock(); 1189b56a2d8aSVineeth Remanan Pillai xas_for_each(&xas, page, ULONG_MAX) { 1190b56a2d8aSVineeth Remanan Pillai if (xas_retry(&xas, page)) 11915b9c98f3SMike Kravetz continue; 1192b56a2d8aSVineeth Remanan Pillai 1193b56a2d8aSVineeth Remanan Pillai if (!xa_is_value(page)) 1194478922e2SMatthew Wilcox continue; 1195b56a2d8aSVineeth Remanan Pillai 119687039546SHugh Dickins entry = radix_to_swp_entry(page); 119787039546SHugh Dickins if (swp_type(entry) != type) 1198b56a2d8aSVineeth Remanan Pillai continue; 119987039546SHugh Dickins if (frontswap && 120087039546SHugh Dickins !frontswap_test(swap_info[type], swp_offset(entry))) 120187039546SHugh Dickins continue; 1202b56a2d8aSVineeth Remanan Pillai 1203b56a2d8aSVineeth Remanan Pillai indices[ret] = xas.xa_index; 1204b56a2d8aSVineeth Remanan Pillai entries[ret] = page; 1205b56a2d8aSVineeth Remanan Pillai 1206b56a2d8aSVineeth Remanan Pillai if (need_resched()) { 1207e21a2955SMatthew Wilcox xas_pause(&xas); 1208478922e2SMatthew Wilcox cond_resched_rcu(); 1209478922e2SMatthew Wilcox } 1210b56a2d8aSVineeth Remanan Pillai if (++ret == nr_entries) 1211b56a2d8aSVineeth Remanan Pillai break; 1212b56a2d8aSVineeth Remanan Pillai } 1213478922e2SMatthew Wilcox rcu_read_unlock(); 1214e21a2955SMatthew Wilcox 1215b56a2d8aSVineeth Remanan Pillai return ret; 1216b56a2d8aSVineeth Remanan Pillai } 1217b56a2d8aSVineeth Remanan Pillai 1218b56a2d8aSVineeth Remanan Pillai /* 1219b56a2d8aSVineeth Remanan Pillai * Move the swapped pages for an inode to page cache. Returns the count 1220b56a2d8aSVineeth Remanan Pillai * of pages swapped in, or the error in case of failure. 1221b56a2d8aSVineeth Remanan Pillai */ 1222b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, 1223b56a2d8aSVineeth Remanan Pillai pgoff_t *indices) 1224b56a2d8aSVineeth Remanan Pillai { 1225b56a2d8aSVineeth Remanan Pillai int i = 0; 1226b56a2d8aSVineeth Remanan Pillai int ret = 0; 1227b56a2d8aSVineeth Remanan Pillai int error = 0; 1228b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1229b56a2d8aSVineeth Remanan Pillai 1230b56a2d8aSVineeth Remanan Pillai for (i = 0; i < pvec.nr; i++) { 1231b56a2d8aSVineeth Remanan Pillai struct page *page = pvec.pages[i]; 1232b56a2d8aSVineeth Remanan Pillai 1233b56a2d8aSVineeth Remanan Pillai if (!xa_is_value(page)) 1234b56a2d8aSVineeth Remanan Pillai continue; 1235b56a2d8aSVineeth Remanan Pillai error = shmem_swapin_page(inode, indices[i], 1236b56a2d8aSVineeth Remanan Pillai &page, SGP_CACHE, 1237b56a2d8aSVineeth Remanan Pillai mapping_gfp_mask(mapping), 1238b56a2d8aSVineeth Remanan Pillai NULL, NULL); 1239b56a2d8aSVineeth Remanan Pillai if (error == 0) { 1240b56a2d8aSVineeth Remanan Pillai unlock_page(page); 1241b56a2d8aSVineeth Remanan Pillai put_page(page); 1242b56a2d8aSVineeth Remanan Pillai ret++; 1243b56a2d8aSVineeth Remanan Pillai } 1244b56a2d8aSVineeth Remanan Pillai if (error == -ENOMEM) 1245b56a2d8aSVineeth Remanan Pillai break; 1246b56a2d8aSVineeth Remanan Pillai error = 0; 1247b56a2d8aSVineeth Remanan Pillai } 1248b56a2d8aSVineeth Remanan Pillai return error ? error : ret; 1249478922e2SMatthew Wilcox } 1250478922e2SMatthew Wilcox 125146f65ec1SHugh Dickins /* 125246f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 125346f65ec1SHugh Dickins */ 1254b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_inode(struct inode *inode, unsigned int type, 1255b56a2d8aSVineeth Remanan Pillai bool frontswap, unsigned long *fs_pages_to_unuse) 12561da177e4SLinus Torvalds { 1257b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1258b56a2d8aSVineeth Remanan Pillai pgoff_t start = 0; 1259b56a2d8aSVineeth Remanan Pillai struct pagevec pvec; 1260b56a2d8aSVineeth Remanan Pillai pgoff_t indices[PAGEVEC_SIZE]; 1261b56a2d8aSVineeth Remanan Pillai bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0); 1262b56a2d8aSVineeth Remanan Pillai int ret = 0; 12631da177e4SLinus Torvalds 1264b56a2d8aSVineeth Remanan Pillai pagevec_init(&pvec); 1265b56a2d8aSVineeth Remanan Pillai do { 1266b56a2d8aSVineeth Remanan Pillai unsigned int nr_entries = PAGEVEC_SIZE; 12672e0e26c7SHugh Dickins 1268b56a2d8aSVineeth Remanan Pillai if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE) 1269b56a2d8aSVineeth Remanan Pillai nr_entries = *fs_pages_to_unuse; 12702e0e26c7SHugh Dickins 1271b56a2d8aSVineeth Remanan Pillai pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, 1272b56a2d8aSVineeth Remanan Pillai pvec.pages, indices, 127387039546SHugh Dickins type, frontswap); 1274b56a2d8aSVineeth Remanan Pillai if (pvec.nr == 0) { 1275b56a2d8aSVineeth Remanan Pillai ret = 0; 1276778dd893SHugh Dickins break; 1277b56a2d8aSVineeth Remanan Pillai } 1278b56a2d8aSVineeth Remanan Pillai 1279b56a2d8aSVineeth Remanan Pillai ret = shmem_unuse_swap_entries(inode, pvec, indices); 1280b56a2d8aSVineeth Remanan Pillai if (ret < 0) 1281b56a2d8aSVineeth Remanan Pillai break; 1282b56a2d8aSVineeth Remanan Pillai 1283b56a2d8aSVineeth Remanan Pillai if (frontswap_partial) { 1284b56a2d8aSVineeth Remanan Pillai *fs_pages_to_unuse -= ret; 1285b56a2d8aSVineeth Remanan Pillai if (*fs_pages_to_unuse == 0) { 1286b56a2d8aSVineeth Remanan Pillai ret = FRONTSWAP_PAGES_UNUSED; 1287b56a2d8aSVineeth Remanan Pillai break; 1288b56a2d8aSVineeth Remanan Pillai } 1289b56a2d8aSVineeth Remanan Pillai } 1290b56a2d8aSVineeth Remanan Pillai 1291b56a2d8aSVineeth Remanan Pillai start = indices[pvec.nr - 1]; 1292b56a2d8aSVineeth Remanan Pillai } while (true); 1293b56a2d8aSVineeth Remanan Pillai 1294b56a2d8aSVineeth Remanan Pillai return ret; 1295b56a2d8aSVineeth Remanan Pillai } 1296b56a2d8aSVineeth Remanan Pillai 1297b56a2d8aSVineeth Remanan Pillai /* 1298b56a2d8aSVineeth Remanan Pillai * Read all the shared memory data that resides in the swap 1299b56a2d8aSVineeth Remanan Pillai * device 'type' back into memory, so the swap device can be 1300b56a2d8aSVineeth Remanan Pillai * unused. 1301b56a2d8aSVineeth Remanan Pillai */ 1302b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap, 1303b56a2d8aSVineeth Remanan Pillai unsigned long *fs_pages_to_unuse) 1304b56a2d8aSVineeth Remanan Pillai { 1305b56a2d8aSVineeth Remanan Pillai struct shmem_inode_info *info, *next; 1306b56a2d8aSVineeth Remanan Pillai int error = 0; 1307b56a2d8aSVineeth Remanan Pillai 1308b56a2d8aSVineeth Remanan Pillai if (list_empty(&shmem_swaplist)) 1309b56a2d8aSVineeth Remanan Pillai return 0; 1310b56a2d8aSVineeth Remanan Pillai 1311b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1312b56a2d8aSVineeth Remanan Pillai list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1313b56a2d8aSVineeth Remanan Pillai if (!info->swapped) { 1314b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1315b56a2d8aSVineeth Remanan Pillai continue; 1316b56a2d8aSVineeth Remanan Pillai } 1317af53d3e9SHugh Dickins /* 1318af53d3e9SHugh Dickins * Drop the swaplist mutex while searching the inode for swap; 1319af53d3e9SHugh Dickins * but before doing so, make sure shmem_evict_inode() will not 1320af53d3e9SHugh Dickins * remove placeholder inode from swaplist, nor let it be freed 1321af53d3e9SHugh Dickins * (igrab() would protect from unlink, but not from unmount). 1322af53d3e9SHugh Dickins */ 1323af53d3e9SHugh Dickins atomic_inc(&info->stop_eviction); 1324b56a2d8aSVineeth Remanan Pillai mutex_unlock(&shmem_swaplist_mutex); 1325b56a2d8aSVineeth Remanan Pillai 1326af53d3e9SHugh Dickins error = shmem_unuse_inode(&info->vfs_inode, type, frontswap, 1327b56a2d8aSVineeth Remanan Pillai fs_pages_to_unuse); 1328b56a2d8aSVineeth Remanan Pillai cond_resched(); 1329b56a2d8aSVineeth Remanan Pillai 1330b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1331b56a2d8aSVineeth Remanan Pillai next = list_next_entry(info, swaplist); 1332b56a2d8aSVineeth Remanan Pillai if (!info->swapped) 1333b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1334af53d3e9SHugh Dickins if (atomic_dec_and_test(&info->stop_eviction)) 1335af53d3e9SHugh Dickins wake_up_var(&info->stop_eviction); 1336b56a2d8aSVineeth Remanan Pillai if (error) 1337b56a2d8aSVineeth Remanan Pillai break; 13381da177e4SLinus Torvalds } 1339cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1340778dd893SHugh Dickins 1341778dd893SHugh Dickins return error; 13421da177e4SLinus Torvalds } 13431da177e4SLinus Torvalds 13441da177e4SLinus Torvalds /* 13451da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 13461da177e4SLinus Torvalds */ 13471da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 13481da177e4SLinus Torvalds { 13491da177e4SLinus Torvalds struct shmem_inode_info *info; 13501da177e4SLinus Torvalds struct address_space *mapping; 13511da177e4SLinus Torvalds struct inode *inode; 13526922c0c7SHugh Dickins swp_entry_t swap; 13536922c0c7SHugh Dickins pgoff_t index; 13541da177e4SLinus Torvalds 1355800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 13561da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 13571da177e4SLinus Torvalds mapping = page->mapping; 13581da177e4SLinus Torvalds index = page->index; 13591da177e4SLinus Torvalds inode = mapping->host; 13601da177e4SLinus Torvalds info = SHMEM_I(inode); 13611da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 13621da177e4SLinus Torvalds goto redirty; 1363d9fe526aSHugh Dickins if (!total_swap_pages) 13641da177e4SLinus Torvalds goto redirty; 13651da177e4SLinus Torvalds 1366d9fe526aSHugh Dickins /* 136797b713baSChristoph Hellwig * Our capabilities prevent regular writeback or sync from ever calling 136897b713baSChristoph Hellwig * shmem_writepage; but a stacking filesystem might use ->writepage of 136997b713baSChristoph Hellwig * its underlying filesystem, in which case tmpfs should write out to 137097b713baSChristoph Hellwig * swap only in response to memory pressure, and not for the writeback 137197b713baSChristoph Hellwig * threads or sync. 1372d9fe526aSHugh Dickins */ 137348f170fbSHugh Dickins if (!wbc->for_reclaim) { 137448f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 137548f170fbSHugh Dickins goto redirty; 137648f170fbSHugh Dickins } 13771635f6a7SHugh Dickins 13781635f6a7SHugh Dickins /* 13791635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 13801635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 13811635f6a7SHugh Dickins * fallocated page arriving here is now to initialize it and write it. 13821aac1400SHugh Dickins * 13831aac1400SHugh Dickins * That's okay for a page already fallocated earlier, but if we have 13841aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 13851aac1400SHugh Dickins * of this page in case we have to undo it, and (b) it may not be a 13861aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 13871aac1400SHugh Dickins * reactivate the page, and let shmem_fallocate() quit when too many. 13881635f6a7SHugh Dickins */ 13891635f6a7SHugh Dickins if (!PageUptodate(page)) { 13901aac1400SHugh Dickins if (inode->i_private) { 13911aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 13921aac1400SHugh Dickins spin_lock(&inode->i_lock); 13931aac1400SHugh Dickins shmem_falloc = inode->i_private; 13941aac1400SHugh Dickins if (shmem_falloc && 13958e205f77SHugh Dickins !shmem_falloc->waitq && 13961aac1400SHugh Dickins index >= shmem_falloc->start && 13971aac1400SHugh Dickins index < shmem_falloc->next) 13981aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 13991aac1400SHugh Dickins else 14001aac1400SHugh Dickins shmem_falloc = NULL; 14011aac1400SHugh Dickins spin_unlock(&inode->i_lock); 14021aac1400SHugh Dickins if (shmem_falloc) 14031aac1400SHugh Dickins goto redirty; 14041aac1400SHugh Dickins } 14051635f6a7SHugh Dickins clear_highpage(page); 14061635f6a7SHugh Dickins flush_dcache_page(page); 14071635f6a7SHugh Dickins SetPageUptodate(page); 14081635f6a7SHugh Dickins } 14091635f6a7SHugh Dickins 141038d8b4e6SHuang Ying swap = get_swap_page(page); 141148f170fbSHugh Dickins if (!swap.val) 141248f170fbSHugh Dickins goto redirty; 1413d9fe526aSHugh Dickins 1414b1dea800SHugh Dickins /* 1415b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 14166922c0c7SHugh Dickins * if it's not already there. Do it now before the page is 14176922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1418b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 14196922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 14206922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1421b1dea800SHugh Dickins */ 1422b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 142305bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 1424b56a2d8aSVineeth Remanan Pillai list_add(&info->swaplist, &shmem_swaplist); 1425b1dea800SHugh Dickins 14264afab1cdSYang Shi if (add_to_swap_cache(page, swap, 14273852f676SJoonsoo Kim __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 14283852f676SJoonsoo Kim NULL) == 0) { 14294595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1430267a4c76SHugh Dickins shmem_recalc_inode(inode); 1431267a4c76SHugh Dickins info->swapped++; 14324595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1433267a4c76SHugh Dickins 1434aaa46865SHugh Dickins swap_shmem_alloc(swap); 14356922c0c7SHugh Dickins shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 14366922c0c7SHugh Dickins 14376922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1438d9fe526aSHugh Dickins BUG_ON(page_mapped(page)); 14399fab5619SHugh Dickins swap_writepage(page, wbc); 14401da177e4SLinus Torvalds return 0; 14411da177e4SLinus Torvalds } 14421da177e4SLinus Torvalds 14436922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 144475f6d6d2SMinchan Kim put_swap_page(page, swap); 14451da177e4SLinus Torvalds redirty: 14461da177e4SLinus Torvalds set_page_dirty(page); 1447d9fe526aSHugh Dickins if (wbc->for_reclaim) 1448d9fe526aSHugh Dickins return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1449d9fe526aSHugh Dickins unlock_page(page); 1450d9fe526aSHugh Dickins return 0; 14511da177e4SLinus Torvalds } 14521da177e4SLinus Torvalds 145375edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 145471fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1455680d794bSakpm@linux-foundation.org { 1456680d794bSakpm@linux-foundation.org char buffer[64]; 1457680d794bSakpm@linux-foundation.org 145871fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1459095f1fc4SLee Schermerhorn return; /* show nothing */ 1460095f1fc4SLee Schermerhorn 1461a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1462095f1fc4SLee Schermerhorn 1463095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1464680d794bSakpm@linux-foundation.org } 146571fe804bSLee Schermerhorn 146671fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 146771fe804bSLee Schermerhorn { 146871fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 146971fe804bSLee Schermerhorn if (sbinfo->mpol) { 1470bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 147171fe804bSLee Schermerhorn mpol = sbinfo->mpol; 147271fe804bSLee Schermerhorn mpol_get(mpol); 1473bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 147471fe804bSLee Schermerhorn } 147571fe804bSLee Schermerhorn return mpol; 147671fe804bSLee Schermerhorn } 147775edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 147875edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 147975edd345SHugh Dickins { 148075edd345SHugh Dickins } 148175edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 148275edd345SHugh Dickins { 148375edd345SHugh Dickins return NULL; 148475edd345SHugh Dickins } 148575edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 148675edd345SHugh Dickins #ifndef CONFIG_NUMA 148775edd345SHugh Dickins #define vm_policy vm_private_data 148875edd345SHugh Dickins #endif 1489680d794bSakpm@linux-foundation.org 1490800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1491800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1492800d8c63SKirill A. Shutemov { 1493800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 14942c4541e2SKirill A. Shutemov vma_init(vma, NULL); 1495800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1496800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1497800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1498800d8c63SKirill A. Shutemov } 1499800d8c63SKirill A. Shutemov 1500800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1501800d8c63SKirill A. Shutemov { 1502800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1503800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1504800d8c63SKirill A. Shutemov } 1505800d8c63SKirill A. Shutemov 150641ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 150741ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 15081da177e4SLinus Torvalds { 15091da177e4SLinus Torvalds struct vm_area_struct pvma; 151018a2f371SMel Gorman struct page *page; 15118c63ca5bSWill Deacon struct vm_fault vmf = { 15128c63ca5bSWill Deacon .vma = &pvma, 15138c63ca5bSWill Deacon }; 15141da177e4SLinus Torvalds 1515800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1516e9e9b7ecSMinchan Kim page = swap_cluster_readahead(swap, gfp, &vmf); 1517800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 151818a2f371SMel Gorman 1519800d8c63SKirill A. Shutemov return page; 1520800d8c63SKirill A. Shutemov } 152118a2f371SMel Gorman 152278cc8cdcSRik van Riel /* 152378cc8cdcSRik van Riel * Make sure huge_gfp is always more limited than limit_gfp. 152478cc8cdcSRik van Riel * Some of the flags set permissions, while others set limitations. 152578cc8cdcSRik van Riel */ 152678cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) 152778cc8cdcSRik van Riel { 152878cc8cdcSRik van Riel gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 152978cc8cdcSRik van Riel gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; 1530187df5ddSRik van Riel gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; 1531187df5ddSRik van Riel gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); 1532187df5ddSRik van Riel 1533187df5ddSRik van Riel /* Allow allocations only from the originally specified zones. */ 1534187df5ddSRik van Riel result |= zoneflags; 153578cc8cdcSRik van Riel 153678cc8cdcSRik van Riel /* 153778cc8cdcSRik van Riel * Minimize the result gfp by taking the union with the deny flags, 153878cc8cdcSRik van Riel * and the intersection of the allow flags. 153978cc8cdcSRik van Riel */ 154078cc8cdcSRik van Riel result |= (limit_gfp & denyflags); 154178cc8cdcSRik van Riel result |= (huge_gfp & limit_gfp) & allowflags; 154278cc8cdcSRik van Riel 154378cc8cdcSRik van Riel return result; 154478cc8cdcSRik van Riel } 154578cc8cdcSRik van Riel 1546800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp, 1547800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1548800d8c63SKirill A. Shutemov { 1549800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 15507b8d046fSMatthew Wilcox struct address_space *mapping = info->vfs_inode.i_mapping; 15517b8d046fSMatthew Wilcox pgoff_t hindex; 1552800d8c63SKirill A. Shutemov struct page *page; 1553800d8c63SKirill A. Shutemov 15544620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 15557b8d046fSMatthew Wilcox if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 15567b8d046fSMatthew Wilcox XA_PRESENT)) 1557800d8c63SKirill A. Shutemov return NULL; 1558800d8c63SKirill A. Shutemov 1559800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1560164cc4feSRik van Riel page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), 1561164cc4feSRik van Riel true); 1562800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1563800d8c63SKirill A. Shutemov if (page) 1564800d8c63SKirill A. Shutemov prep_transhuge_page(page); 1565dcdf11eeSDavid Rientjes else 1566dcdf11eeSDavid Rientjes count_vm_event(THP_FILE_FALLBACK); 156718a2f371SMel Gorman return page; 156818a2f371SMel Gorman } 156918a2f371SMel Gorman 157018a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp, 157118a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 157218a2f371SMel Gorman { 157318a2f371SMel Gorman struct vm_area_struct pvma; 157418a2f371SMel Gorman struct page *page; 157518a2f371SMel Gorman 1576800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1577800d8c63SKirill A. Shutemov page = alloc_page_vma(gfp, &pvma, 0); 1578800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 157918a2f371SMel Gorman 1580800d8c63SKirill A. Shutemov return page; 1581800d8c63SKirill A. Shutemov } 1582800d8c63SKirill A. Shutemov 1583800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 15840f079694SMike Rapoport struct inode *inode, 1585800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1586800d8c63SKirill A. Shutemov { 15870f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 1588800d8c63SKirill A. Shutemov struct page *page; 1589800d8c63SKirill A. Shutemov int nr; 1590800d8c63SKirill A. Shutemov int err = -ENOSPC; 1591800d8c63SKirill A. Shutemov 1592396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1593800d8c63SKirill A. Shutemov huge = false; 1594800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1595800d8c63SKirill A. Shutemov 15960f079694SMike Rapoport if (!shmem_inode_acct_block(inode, nr)) 1597800d8c63SKirill A. Shutemov goto failed; 1598800d8c63SKirill A. Shutemov 1599800d8c63SKirill A. Shutemov if (huge) 1600800d8c63SKirill A. Shutemov page = shmem_alloc_hugepage(gfp, info, index); 1601800d8c63SKirill A. Shutemov else 1602800d8c63SKirill A. Shutemov page = shmem_alloc_page(gfp, info, index); 160375edd345SHugh Dickins if (page) { 160475edd345SHugh Dickins __SetPageLocked(page); 160575edd345SHugh Dickins __SetPageSwapBacked(page); 1606800d8c63SKirill A. Shutemov return page; 160775edd345SHugh Dickins } 160818a2f371SMel Gorman 1609800d8c63SKirill A. Shutemov err = -ENOMEM; 16100f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr); 1611800d8c63SKirill A. Shutemov failed: 1612800d8c63SKirill A. Shutemov return ERR_PTR(err); 16131da177e4SLinus Torvalds } 161471fe804bSLee Schermerhorn 16151da177e4SLinus Torvalds /* 1616bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1617bde05d1cSHugh Dickins * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1618bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1619bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1620bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1621bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1622bde05d1cSHugh Dickins * 1623bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1624bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1625bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1626bde05d1cSHugh Dickins */ 1627bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1628bde05d1cSHugh Dickins { 1629bde05d1cSHugh Dickins return page_zonenum(page) > gfp_zone(gfp); 1630bde05d1cSHugh Dickins } 1631bde05d1cSHugh Dickins 1632bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1633bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1634bde05d1cSHugh Dickins { 1635bde05d1cSHugh Dickins struct page *oldpage, *newpage; 1636bde05d1cSHugh Dickins struct address_space *swap_mapping; 1637c1cb20d4SYu Zhao swp_entry_t entry; 1638bde05d1cSHugh Dickins pgoff_t swap_index; 1639bde05d1cSHugh Dickins int error; 1640bde05d1cSHugh Dickins 1641bde05d1cSHugh Dickins oldpage = *pagep; 1642c1cb20d4SYu Zhao entry.val = page_private(oldpage); 1643c1cb20d4SYu Zhao swap_index = swp_offset(entry); 1644bde05d1cSHugh Dickins swap_mapping = page_mapping(oldpage); 1645bde05d1cSHugh Dickins 1646bde05d1cSHugh Dickins /* 1647bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1648bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1649bde05d1cSHugh Dickins */ 1650bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1651bde05d1cSHugh Dickins newpage = shmem_alloc_page(gfp, info, index); 1652bde05d1cSHugh Dickins if (!newpage) 1653bde05d1cSHugh Dickins return -ENOMEM; 1654bde05d1cSHugh Dickins 165509cbfeafSKirill A. Shutemov get_page(newpage); 1656bde05d1cSHugh Dickins copy_highpage(newpage, oldpage); 16570142ef6cSHugh Dickins flush_dcache_page(newpage); 1658bde05d1cSHugh Dickins 16599956edf3SHugh Dickins __SetPageLocked(newpage); 16609956edf3SHugh Dickins __SetPageSwapBacked(newpage); 1661bde05d1cSHugh Dickins SetPageUptodate(newpage); 1662c1cb20d4SYu Zhao set_page_private(newpage, entry.val); 1663bde05d1cSHugh Dickins SetPageSwapCache(newpage); 1664bde05d1cSHugh Dickins 1665bde05d1cSHugh Dickins /* 1666bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1667bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1668bde05d1cSHugh Dickins */ 1669b93b0163SMatthew Wilcox xa_lock_irq(&swap_mapping->i_pages); 167062f945b6SMatthew Wilcox error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage); 16710142ef6cSHugh Dickins if (!error) { 16720d1c2072SJohannes Weiner mem_cgroup_migrate(oldpage, newpage); 16730d1c2072SJohannes Weiner __inc_lruvec_page_state(newpage, NR_FILE_PAGES); 16740d1c2072SJohannes Weiner __dec_lruvec_page_state(oldpage, NR_FILE_PAGES); 16750142ef6cSHugh Dickins } 1676b93b0163SMatthew Wilcox xa_unlock_irq(&swap_mapping->i_pages); 1677bde05d1cSHugh Dickins 16780142ef6cSHugh Dickins if (unlikely(error)) { 16790142ef6cSHugh Dickins /* 16800142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 16810142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 16820142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 16830142ef6cSHugh Dickins */ 16840142ef6cSHugh Dickins oldpage = newpage; 16850142ef6cSHugh Dickins } else { 16866058eaecSJohannes Weiner lru_cache_add(newpage); 16870142ef6cSHugh Dickins *pagep = newpage; 16880142ef6cSHugh Dickins } 1689bde05d1cSHugh Dickins 1690bde05d1cSHugh Dickins ClearPageSwapCache(oldpage); 1691bde05d1cSHugh Dickins set_page_private(oldpage, 0); 1692bde05d1cSHugh Dickins 1693bde05d1cSHugh Dickins unlock_page(oldpage); 169409cbfeafSKirill A. Shutemov put_page(oldpage); 169509cbfeafSKirill A. Shutemov put_page(oldpage); 16960142ef6cSHugh Dickins return error; 1697bde05d1cSHugh Dickins } 1698bde05d1cSHugh Dickins 1699bde05d1cSHugh Dickins /* 1700c5bf121eSVineeth Remanan Pillai * Swap in the page pointed to by *pagep. 1701c5bf121eSVineeth Remanan Pillai * Caller has to make sure that *pagep contains a valid swapped page. 1702c5bf121eSVineeth Remanan Pillai * Returns 0 and the page in pagep if success. On failure, returns the 1703af44c12fSRandy Dunlap * error code and NULL in *pagep. 17041da177e4SLinus Torvalds */ 1705c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index, 1706c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, 1707c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 17082b740303SSouptick Joarder vm_fault_t *fault_type) 17091da177e4SLinus Torvalds { 17101da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 171123f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 171204f94e3fSDan Schatzberg struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; 1713b1e1ef34SYang Shi struct page *page; 17141da177e4SLinus Torvalds swp_entry_t swap; 17151da177e4SLinus Torvalds int error; 17161da177e4SLinus Torvalds 1717c5bf121eSVineeth Remanan Pillai VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); 1718c5bf121eSVineeth Remanan Pillai swap = radix_to_swp_entry(*pagep); 1719c5bf121eSVineeth Remanan Pillai *pagep = NULL; 172054af6042SHugh Dickins 17211da177e4SLinus Torvalds /* Look it up and read it in.. */ 1722ec560175SHuang Ying page = lookup_swap_cache(swap, NULL, 0); 172327ab7006SHugh Dickins if (!page) { 17249e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 17259e18eb29SAndres Lagar-Cavilla if (fault_type) { 172668da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 17279e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 17282262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT); 17299e18eb29SAndres Lagar-Cavilla } 17309e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 173141ffe5d5SHugh Dickins page = shmem_swapin(swap, gfp, info, index); 173227ab7006SHugh Dickins if (!page) { 17331da177e4SLinus Torvalds error = -ENOMEM; 173454af6042SHugh Dickins goto failed; 1735285b2c4fSHugh Dickins } 17361da177e4SLinus Torvalds } 17371da177e4SLinus Torvalds 17381da177e4SLinus Torvalds /* We have to do this with page locked to prevent races */ 173954af6042SHugh Dickins lock_page(page); 17400142ef6cSHugh Dickins if (!PageSwapCache(page) || page_private(page) != swap.val || 1741d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1742c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1743d1899228SHugh Dickins goto unlock; 1744bde05d1cSHugh Dickins } 174527ab7006SHugh Dickins if (!PageUptodate(page)) { 17461da177e4SLinus Torvalds error = -EIO; 174754af6042SHugh Dickins goto failed; 174854af6042SHugh Dickins } 174954af6042SHugh Dickins wait_on_page_writeback(page); 175054af6042SHugh Dickins 17518a84802eSSteven Price /* 17528a84802eSSteven Price * Some architectures may have to restore extra metadata to the 17538a84802eSSteven Price * physical page after reading from swap. 17548a84802eSSteven Price */ 17558a84802eSSteven Price arch_swap_restore(swap, page); 17568a84802eSSteven Price 1757bde05d1cSHugh Dickins if (shmem_should_replace_page(page, gfp)) { 1758bde05d1cSHugh Dickins error = shmem_replace_page(&page, gfp, info, index); 1759bde05d1cSHugh Dickins if (error) 176054af6042SHugh Dickins goto failed; 17611da177e4SLinus Torvalds } 17621da177e4SLinus Torvalds 17633fea5a49SJohannes Weiner error = shmem_add_to_page_cache(page, mapping, index, 17643fea5a49SJohannes Weiner swp_to_radix_entry(swap), gfp, 17653fea5a49SJohannes Weiner charge_mm); 176654af6042SHugh Dickins if (error) 176754af6042SHugh Dickins goto failed; 176854af6042SHugh Dickins 17694595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 177054af6042SHugh Dickins info->swapped--; 177154af6042SHugh Dickins shmem_recalc_inode(inode); 17724595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 177327ab7006SHugh Dickins 177466d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 177566d2f4d2SHugh Dickins mark_page_accessed(page); 177666d2f4d2SHugh Dickins 177727ab7006SHugh Dickins delete_from_swap_cache(page); 177827ab7006SHugh Dickins set_page_dirty(page); 177927ab7006SHugh Dickins swap_free(swap); 178027ab7006SHugh Dickins 1781c5bf121eSVineeth Remanan Pillai *pagep = page; 1782c5bf121eSVineeth Remanan Pillai return 0; 1783c5bf121eSVineeth Remanan Pillai failed: 1784c5bf121eSVineeth Remanan Pillai if (!shmem_confirm_swap(mapping, index, swap)) 1785c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1786c5bf121eSVineeth Remanan Pillai unlock: 1787c5bf121eSVineeth Remanan Pillai if (page) { 1788c5bf121eSVineeth Remanan Pillai unlock_page(page); 1789c5bf121eSVineeth Remanan Pillai put_page(page); 1790c5bf121eSVineeth Remanan Pillai } 1791c5bf121eSVineeth Remanan Pillai 1792c5bf121eSVineeth Remanan Pillai return error; 1793c5bf121eSVineeth Remanan Pillai } 1794c5bf121eSVineeth Remanan Pillai 1795c5bf121eSVineeth Remanan Pillai /* 1796c5bf121eSVineeth Remanan Pillai * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1797c5bf121eSVineeth Remanan Pillai * 1798c5bf121eSVineeth Remanan Pillai * If we allocate a new one we do not mark it dirty. That's up to the 1799c5bf121eSVineeth Remanan Pillai * vm. If we swap it in we mark it dirty since we also free the swap 1800c5bf121eSVineeth Remanan Pillai * entry since a page cannot live in both the swap and page cache. 1801c5bf121eSVineeth Remanan Pillai * 1802c949b097SAxel Rasmussen * vma, vmf, and fault_type are only supplied by shmem_fault: 1803c5bf121eSVineeth Remanan Pillai * otherwise they are NULL. 1804c5bf121eSVineeth Remanan Pillai */ 1805c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1806c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1807c5bf121eSVineeth Remanan Pillai struct vm_area_struct *vma, struct vm_fault *vmf, 1808c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type) 1809c5bf121eSVineeth Remanan Pillai { 1810c5bf121eSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1811c5bf121eSVineeth Remanan Pillai struct shmem_inode_info *info = SHMEM_I(inode); 1812c5bf121eSVineeth Remanan Pillai struct shmem_sb_info *sbinfo; 1813c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm; 1814c5bf121eSVineeth Remanan Pillai struct page *page; 1815c5bf121eSVineeth Remanan Pillai enum sgp_type sgp_huge = sgp; 1816c5bf121eSVineeth Remanan Pillai pgoff_t hindex = index; 1817164cc4feSRik van Riel gfp_t huge_gfp; 1818c5bf121eSVineeth Remanan Pillai int error; 1819c5bf121eSVineeth Remanan Pillai int once = 0; 1820c5bf121eSVineeth Remanan Pillai int alloced = 0; 1821c5bf121eSVineeth Remanan Pillai 1822c5bf121eSVineeth Remanan Pillai if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 1823c5bf121eSVineeth Remanan Pillai return -EFBIG; 1824c5bf121eSVineeth Remanan Pillai if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1825c5bf121eSVineeth Remanan Pillai sgp = SGP_CACHE; 1826c5bf121eSVineeth Remanan Pillai repeat: 1827c5bf121eSVineeth Remanan Pillai if (sgp <= SGP_CACHE && 1828c5bf121eSVineeth Remanan Pillai ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1829c5bf121eSVineeth Remanan Pillai return -EINVAL; 1830c5bf121eSVineeth Remanan Pillai } 1831c5bf121eSVineeth Remanan Pillai 1832c5bf121eSVineeth Remanan Pillai sbinfo = SHMEM_SB(inode->i_sb); 183304f94e3fSDan Schatzberg charge_mm = vma ? vma->vm_mm : NULL; 1834c5bf121eSVineeth Remanan Pillai 183544835d20SMatthew Wilcox (Oracle) page = pagecache_get_page(mapping, index, 183644835d20SMatthew Wilcox (Oracle) FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0); 1837c949b097SAxel Rasmussen 1838c949b097SAxel Rasmussen if (page && vma && userfaultfd_minor(vma)) { 1839c949b097SAxel Rasmussen if (!xa_is_value(page)) { 1840c949b097SAxel Rasmussen unlock_page(page); 1841c949b097SAxel Rasmussen put_page(page); 1842c949b097SAxel Rasmussen } 1843c949b097SAxel Rasmussen *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 1844c949b097SAxel Rasmussen return 0; 1845c949b097SAxel Rasmussen } 1846c949b097SAxel Rasmussen 1847c5bf121eSVineeth Remanan Pillai if (xa_is_value(page)) { 1848c5bf121eSVineeth Remanan Pillai error = shmem_swapin_page(inode, index, &page, 1849c5bf121eSVineeth Remanan Pillai sgp, gfp, vma, fault_type); 1850c5bf121eSVineeth Remanan Pillai if (error == -EEXIST) 1851c5bf121eSVineeth Remanan Pillai goto repeat; 1852c5bf121eSVineeth Remanan Pillai 1853c5bf121eSVineeth Remanan Pillai *pagep = page; 1854c5bf121eSVineeth Remanan Pillai return error; 1855c5bf121eSVineeth Remanan Pillai } 1856c5bf121eSVineeth Remanan Pillai 185763ec1973SMatthew Wilcox (Oracle) if (page) 185863ec1973SMatthew Wilcox (Oracle) hindex = page->index; 1859c5bf121eSVineeth Remanan Pillai if (page && sgp == SGP_WRITE) 1860c5bf121eSVineeth Remanan Pillai mark_page_accessed(page); 1861c5bf121eSVineeth Remanan Pillai 1862c5bf121eSVineeth Remanan Pillai /* fallocated page? */ 1863c5bf121eSVineeth Remanan Pillai if (page && !PageUptodate(page)) { 1864c5bf121eSVineeth Remanan Pillai if (sgp != SGP_READ) 1865c5bf121eSVineeth Remanan Pillai goto clear; 1866c5bf121eSVineeth Remanan Pillai unlock_page(page); 1867c5bf121eSVineeth Remanan Pillai put_page(page); 1868c5bf121eSVineeth Remanan Pillai page = NULL; 186963ec1973SMatthew Wilcox (Oracle) hindex = index; 1870c5bf121eSVineeth Remanan Pillai } 187163ec1973SMatthew Wilcox (Oracle) if (page || sgp == SGP_READ) 187263ec1973SMatthew Wilcox (Oracle) goto out; 1873c5bf121eSVineeth Remanan Pillai 1874c5bf121eSVineeth Remanan Pillai /* 1875c5bf121eSVineeth Remanan Pillai * Fast cache lookup did not find it: 1876c5bf121eSVineeth Remanan Pillai * bring it back from swap or allocate. 1877c5bf121eSVineeth Remanan Pillai */ 1878c5bf121eSVineeth Remanan Pillai 1879cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) { 1880cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1881cfda0526SMike Rapoport return 0; 1882cfda0526SMike Rapoport } 1883cfda0526SMike Rapoport 1884800d8c63SKirill A. Shutemov /* shmem_symlink() */ 188530e6a51dSHui Su if (!shmem_mapping(mapping)) 1886800d8c63SKirill A. Shutemov goto alloc_nohuge; 1887657e3038SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1888800d8c63SKirill A. Shutemov goto alloc_nohuge; 1889800d8c63SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 1890800d8c63SKirill A. Shutemov goto alloc_huge; 1891800d8c63SKirill A. Shutemov switch (sbinfo->huge) { 1892800d8c63SKirill A. Shutemov case SHMEM_HUGE_NEVER: 1893800d8c63SKirill A. Shutemov goto alloc_nohuge; 189427d80fa2SKees Cook case SHMEM_HUGE_WITHIN_SIZE: { 189527d80fa2SKees Cook loff_t i_size; 189627d80fa2SKees Cook pgoff_t off; 189727d80fa2SKees Cook 1898800d8c63SKirill A. Shutemov off = round_up(index, HPAGE_PMD_NR); 1899800d8c63SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 1900800d8c63SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 1901800d8c63SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 1902800d8c63SKirill A. Shutemov goto alloc_huge; 190327d80fa2SKees Cook 190427d80fa2SKees Cook fallthrough; 190527d80fa2SKees Cook } 1906800d8c63SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 1907657e3038SKirill A. Shutemov if (sgp_huge == SGP_HUGE) 1908657e3038SKirill A. Shutemov goto alloc_huge; 1909657e3038SKirill A. Shutemov /* TODO: implement fadvise() hints */ 1910800d8c63SKirill A. Shutemov goto alloc_nohuge; 191159a16eadSHugh Dickins } 19121da177e4SLinus Torvalds 1913800d8c63SKirill A. Shutemov alloc_huge: 1914164cc4feSRik van Riel huge_gfp = vma_thp_gfp_mask(vma); 191578cc8cdcSRik van Riel huge_gfp = limit_gfp_mask(huge_gfp, gfp); 1916164cc4feSRik van Riel page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true); 1917800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1918c5bf121eSVineeth Remanan Pillai alloc_nohuge: 1919c5bf121eSVineeth Remanan Pillai page = shmem_alloc_and_acct_page(gfp, inode, 1920800d8c63SKirill A. Shutemov index, false); 192154af6042SHugh Dickins } 1922800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1923779750d2SKirill A. Shutemov int retry = 5; 1924c5bf121eSVineeth Remanan Pillai 1925800d8c63SKirill A. Shutemov error = PTR_ERR(page); 1926800d8c63SKirill A. Shutemov page = NULL; 1927779750d2SKirill A. Shutemov if (error != -ENOSPC) 1928c5bf121eSVineeth Remanan Pillai goto unlock; 1929779750d2SKirill A. Shutemov /* 1930c5bf121eSVineeth Remanan Pillai * Try to reclaim some space by splitting a huge page 1931779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1932779750d2SKirill A. Shutemov */ 1933779750d2SKirill A. Shutemov while (retry--) { 1934779750d2SKirill A. Shutemov int ret; 1935c5bf121eSVineeth Remanan Pillai 1936779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1937779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1938779750d2SKirill A. Shutemov break; 1939779750d2SKirill A. Shutemov if (ret) 1940779750d2SKirill A. Shutemov goto alloc_nohuge; 1941779750d2SKirill A. Shutemov } 1942c5bf121eSVineeth Remanan Pillai goto unlock; 1943800d8c63SKirill A. Shutemov } 1944800d8c63SKirill A. Shutemov 1945800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 1946800d8c63SKirill A. Shutemov hindex = round_down(index, HPAGE_PMD_NR); 1947800d8c63SKirill A. Shutemov else 1948800d8c63SKirill A. Shutemov hindex = index; 1949800d8c63SKirill A. Shutemov 195066d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1951eb39d618SHugh Dickins __SetPageReferenced(page); 195266d2f4d2SHugh Dickins 1953800d8c63SKirill A. Shutemov error = shmem_add_to_page_cache(page, mapping, hindex, 19543fea5a49SJohannes Weiner NULL, gfp & GFP_RECLAIM_MASK, 19553fea5a49SJohannes Weiner charge_mm); 19563fea5a49SJohannes Weiner if (error) 1957800d8c63SKirill A. Shutemov goto unacct; 19586058eaecSJohannes Weiner lru_cache_add(page); 195954af6042SHugh Dickins 19604595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1961d8c6546bSMatthew Wilcox (Oracle) info->alloced += compound_nr(page); 1962800d8c63SKirill A. Shutemov inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 196354af6042SHugh Dickins shmem_recalc_inode(inode); 19644595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 19651635f6a7SHugh Dickins alloced = true; 196654af6042SHugh Dickins 1967779750d2SKirill A. Shutemov if (PageTransHuge(page) && 1968779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1969779750d2SKirill A. Shutemov hindex + HPAGE_PMD_NR - 1) { 1970779750d2SKirill A. Shutemov /* 1971779750d2SKirill A. Shutemov * Part of the huge page is beyond i_size: subject 1972779750d2SKirill A. Shutemov * to shrink under memory pressure. 1973779750d2SKirill A. Shutemov */ 1974779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1975d041353dSCong Wang /* 1976d041353dSCong Wang * _careful to defend against unlocked access to 1977d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1978d041353dSCong Wang */ 1979d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1980779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1981779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1982779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1983779750d2SKirill A. Shutemov } 1984779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1985779750d2SKirill A. Shutemov } 1986779750d2SKirill A. Shutemov 1987ec9516fbSHugh Dickins /* 19881635f6a7SHugh Dickins * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 19891635f6a7SHugh Dickins */ 19901635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 19911635f6a7SHugh Dickins sgp = SGP_WRITE; 19921635f6a7SHugh Dickins clear: 19931635f6a7SHugh Dickins /* 19941635f6a7SHugh Dickins * Let SGP_WRITE caller clear ends if write does not fill page; 19951635f6a7SHugh Dickins * but SGP_FALLOC on a page fallocated earlier must initialize 19961635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 1997ec9516fbSHugh Dickins */ 1998800d8c63SKirill A. Shutemov if (sgp != SGP_WRITE && !PageUptodate(page)) { 1999800d8c63SKirill A. Shutemov int i; 2000800d8c63SKirill A. Shutemov 200163ec1973SMatthew Wilcox (Oracle) for (i = 0; i < compound_nr(page); i++) { 200263ec1973SMatthew Wilcox (Oracle) clear_highpage(page + i); 200363ec1973SMatthew Wilcox (Oracle) flush_dcache_page(page + i); 2004800d8c63SKirill A. Shutemov } 200563ec1973SMatthew Wilcox (Oracle) SetPageUptodate(page); 2006ec9516fbSHugh Dickins } 2007bde05d1cSHugh Dickins 200854af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 200975edd345SHugh Dickins if (sgp <= SGP_CACHE && 201009cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2011267a4c76SHugh Dickins if (alloced) { 2012267a4c76SHugh Dickins ClearPageDirty(page); 2013267a4c76SHugh Dickins delete_from_page_cache(page); 20144595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 2015267a4c76SHugh Dickins shmem_recalc_inode(inode); 20164595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 2017267a4c76SHugh Dickins } 201854af6042SHugh Dickins error = -EINVAL; 2019267a4c76SHugh Dickins goto unlock; 2020ff36b801SShaohua Li } 202163ec1973SMatthew Wilcox (Oracle) out: 2022800d8c63SKirill A. Shutemov *pagep = page + index - hindex; 202354af6042SHugh Dickins return 0; 2024d00806b1SNick Piggin 2025d0217ac0SNick Piggin /* 202654af6042SHugh Dickins * Error recovery. 20271da177e4SLinus Torvalds */ 202854af6042SHugh Dickins unacct: 2029d8c6546bSMatthew Wilcox (Oracle) shmem_inode_unacct_blocks(inode, compound_nr(page)); 2030800d8c63SKirill A. Shutemov 2031800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 2032800d8c63SKirill A. Shutemov unlock_page(page); 2033800d8c63SKirill A. Shutemov put_page(page); 2034800d8c63SKirill A. Shutemov goto alloc_nohuge; 2035800d8c63SKirill A. Shutemov } 2036d1899228SHugh Dickins unlock: 203727ab7006SHugh Dickins if (page) { 203854af6042SHugh Dickins unlock_page(page); 203909cbfeafSKirill A. Shutemov put_page(page); 204054af6042SHugh Dickins } 204154af6042SHugh Dickins if (error == -ENOSPC && !once++) { 20424595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 204354af6042SHugh Dickins shmem_recalc_inode(inode); 20444595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 20451da177e4SLinus Torvalds goto repeat; 2046d8dc74f2SAdrian Bunk } 20477f4446eeSMatthew Wilcox if (error == -EEXIST) 204854af6042SHugh Dickins goto repeat; 204954af6042SHugh Dickins return error; 20501da177e4SLinus Torvalds } 20511da177e4SLinus Torvalds 205210d20bd2SLinus Torvalds /* 205310d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 205410d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 205510d20bd2SLinus Torvalds * target. 205610d20bd2SLinus Torvalds */ 2057ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 205810d20bd2SLinus Torvalds { 205910d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 20602055da97SIngo Molnar list_del_init(&wait->entry); 206110d20bd2SLinus Torvalds return ret; 206210d20bd2SLinus Torvalds } 206310d20bd2SLinus Torvalds 206420acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf) 20651da177e4SLinus Torvalds { 206611bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 2067496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 20689e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 2069657e3038SKirill A. Shutemov enum sgp_type sgp; 207020acce67SSouptick Joarder int err; 207120acce67SSouptick Joarder vm_fault_t ret = VM_FAULT_LOCKED; 20721da177e4SLinus Torvalds 2073f00cdc6dSHugh Dickins /* 2074f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 2075f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 2076f00cdc6dSHugh Dickins * locks writers out with its hold on i_mutex. So refrain from 20778e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 20788e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 20798e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 20808e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 20818e205f77SHugh Dickins * 20828e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 20838e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 20848e205f77SHugh Dickins * we just need to make racing faults a rare case. 20858e205f77SHugh Dickins * 20868e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 20878e205f77SHugh Dickins * standard mutex or completion: but we cannot take i_mutex in fault, 20888e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 2089f00cdc6dSHugh Dickins */ 2090f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 2091f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 2092f00cdc6dSHugh Dickins 2093f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2094f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 20958e205f77SHugh Dickins if (shmem_falloc && 20968e205f77SHugh Dickins shmem_falloc->waitq && 20978e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 20988e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 20998897c1b1SKirill A. Shutemov struct file *fpin; 21008e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 210110d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 21028e205f77SHugh Dickins 21038e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 21048897c1b1SKirill A. Shutemov fpin = maybe_unlock_mmap_for_io(vmf, NULL); 21058897c1b1SKirill A. Shutemov if (fpin) 21068e205f77SHugh Dickins ret = VM_FAULT_RETRY; 21078e205f77SHugh Dickins 21088e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 21098e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 21108e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 21118e205f77SHugh Dickins spin_unlock(&inode->i_lock); 21128e205f77SHugh Dickins schedule(); 21138e205f77SHugh Dickins 21148e205f77SHugh Dickins /* 21158e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 21168e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 21178e205f77SHugh Dickins * is usually invalid by the time we reach here, but 21188e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 21198e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 21208e205f77SHugh Dickins */ 21218e205f77SHugh Dickins spin_lock(&inode->i_lock); 21228e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 21238e205f77SHugh Dickins spin_unlock(&inode->i_lock); 21248897c1b1SKirill A. Shutemov 21258897c1b1SKirill A. Shutemov if (fpin) 21268897c1b1SKirill A. Shutemov fput(fpin); 21278e205f77SHugh Dickins return ret; 2128f00cdc6dSHugh Dickins } 21298e205f77SHugh Dickins spin_unlock(&inode->i_lock); 2130f00cdc6dSHugh Dickins } 2131f00cdc6dSHugh Dickins 2132657e3038SKirill A. Shutemov sgp = SGP_CACHE; 213318600332SMichal Hocko 213418600332SMichal Hocko if ((vma->vm_flags & VM_NOHUGEPAGE) || 213518600332SMichal Hocko test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 2136657e3038SKirill A. Shutemov sgp = SGP_NOHUGE; 213718600332SMichal Hocko else if (vma->vm_flags & VM_HUGEPAGE) 213818600332SMichal Hocko sgp = SGP_HUGE; 2139657e3038SKirill A. Shutemov 214020acce67SSouptick Joarder err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 2141cfda0526SMike Rapoport gfp, vma, vmf, &ret); 214220acce67SSouptick Joarder if (err) 214320acce67SSouptick Joarder return vmf_error(err); 214468da9f05SHugh Dickins return ret; 21451da177e4SLinus Torvalds } 21461da177e4SLinus Torvalds 2147c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 2148c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 2149c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 2150c01d5b30SHugh Dickins { 2151c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 2152c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 2153c01d5b30SHugh Dickins unsigned long addr; 2154c01d5b30SHugh Dickins unsigned long offset; 2155c01d5b30SHugh Dickins unsigned long inflated_len; 2156c01d5b30SHugh Dickins unsigned long inflated_addr; 2157c01d5b30SHugh Dickins unsigned long inflated_offset; 2158c01d5b30SHugh Dickins 2159c01d5b30SHugh Dickins if (len > TASK_SIZE) 2160c01d5b30SHugh Dickins return -ENOMEM; 2161c01d5b30SHugh Dickins 2162c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 2163c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 2164c01d5b30SHugh Dickins 2165396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2166c01d5b30SHugh Dickins return addr; 2167c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 2168c01d5b30SHugh Dickins return addr; 2169c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 2170c01d5b30SHugh Dickins return addr; 2171c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2172c01d5b30SHugh Dickins return addr; 2173c01d5b30SHugh Dickins 2174c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2175c01d5b30SHugh Dickins return addr; 2176c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2177c01d5b30SHugh Dickins return addr; 2178c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2179c01d5b30SHugh Dickins return addr; 2180c01d5b30SHugh Dickins /* 2181c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2182c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 218399158997SKirill A. Shutemov * But if caller specified an address hint and we allocated area there 218499158997SKirill A. Shutemov * successfully, respect that as before. 2185c01d5b30SHugh Dickins */ 218699158997SKirill A. Shutemov if (uaddr == addr) 2187c01d5b30SHugh Dickins return addr; 2188c01d5b30SHugh Dickins 2189c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2190c01d5b30SHugh Dickins struct super_block *sb; 2191c01d5b30SHugh Dickins 2192c01d5b30SHugh Dickins if (file) { 2193c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2194c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2195c01d5b30SHugh Dickins } else { 2196c01d5b30SHugh Dickins /* 2197c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2198c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2199c01d5b30SHugh Dickins */ 2200c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2201c01d5b30SHugh Dickins return addr; 2202c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2203c01d5b30SHugh Dickins } 22043089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2205c01d5b30SHugh Dickins return addr; 2206c01d5b30SHugh Dickins } 2207c01d5b30SHugh Dickins 2208c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2209c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2210c01d5b30SHugh Dickins return addr; 2211c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2212c01d5b30SHugh Dickins return addr; 2213c01d5b30SHugh Dickins 2214c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2215c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2216c01d5b30SHugh Dickins return addr; 2217c01d5b30SHugh Dickins if (inflated_len < len) 2218c01d5b30SHugh Dickins return addr; 2219c01d5b30SHugh Dickins 222099158997SKirill A. Shutemov inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2221c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2222c01d5b30SHugh Dickins return addr; 2223c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2224c01d5b30SHugh Dickins return addr; 2225c01d5b30SHugh Dickins 2226c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2227c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2228c01d5b30SHugh Dickins if (inflated_offset > offset) 2229c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2230c01d5b30SHugh Dickins 2231c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2232c01d5b30SHugh Dickins return addr; 2233c01d5b30SHugh Dickins return inflated_addr; 2234c01d5b30SHugh Dickins } 2235c01d5b30SHugh Dickins 22361da177e4SLinus Torvalds #ifdef CONFIG_NUMA 223741ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 22381da177e4SLinus Torvalds { 2239496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 224041ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 22411da177e4SLinus Torvalds } 22421da177e4SLinus Torvalds 2243d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2244d8dc74f2SAdrian Bunk unsigned long addr) 22451da177e4SLinus Torvalds { 2246496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 224741ffe5d5SHugh Dickins pgoff_t index; 22481da177e4SLinus Torvalds 224941ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 225041ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 22511da177e4SLinus Torvalds } 22521da177e4SLinus Torvalds #endif 22531da177e4SLinus Torvalds 2254d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 22551da177e4SLinus Torvalds { 2256496ad9aaSAl Viro struct inode *inode = file_inode(file); 22571da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 22581da177e4SLinus Torvalds int retval = -ENOMEM; 22591da177e4SLinus Torvalds 2260ea0dfeb4SHugh Dickins /* 2261ea0dfeb4SHugh Dickins * What serializes the accesses to info->flags? 2262ea0dfeb4SHugh Dickins * ipc_lock_object() when called from shmctl_do_lock(), 2263ea0dfeb4SHugh Dickins * no serialization needed when called from shm_destroy(). 2264ea0dfeb4SHugh Dickins */ 22651da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 2266d7c9e99aSAlexey Gladkov if (!user_shm_lock(inode->i_size, ucounts)) 22671da177e4SLinus Torvalds goto out_nomem; 22681da177e4SLinus Torvalds info->flags |= VM_LOCKED; 226989e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 22701da177e4SLinus Torvalds } 2271d7c9e99aSAlexey Gladkov if (!lock && (info->flags & VM_LOCKED) && ucounts) { 2272d7c9e99aSAlexey Gladkov user_shm_unlock(inode->i_size, ucounts); 22731da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 227489e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 22751da177e4SLinus Torvalds } 22761da177e4SLinus Torvalds retval = 0; 227789e004eaSLee Schermerhorn 22781da177e4SLinus Torvalds out_nomem: 22791da177e4SLinus Torvalds return retval; 22801da177e4SLinus Torvalds } 22811da177e4SLinus Torvalds 22829b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 22831da177e4SLinus Torvalds { 2284ab3948f5SJoel Fernandes (Google) struct shmem_inode_info *info = SHMEM_I(file_inode(file)); 228522247efdSPeter Xu int ret; 2286ab3948f5SJoel Fernandes (Google) 228722247efdSPeter Xu ret = seal_check_future_write(info->seals, vma); 228822247efdSPeter Xu if (ret) 228922247efdSPeter Xu return ret; 2290ab3948f5SJoel Fernandes (Google) 229151b0bff2SCatalin Marinas /* arm64 - allow memory tagging on RAM-based files */ 229251b0bff2SCatalin Marinas vma->vm_flags |= VM_MTE_ALLOWED; 229351b0bff2SCatalin Marinas 22941da177e4SLinus Torvalds file_accessed(file); 22951da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2296396bcc52SMatthew Wilcox (Oracle) if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 2297f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2298f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 2299f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 2300f3f0e1d2SKirill A. Shutemov } 23011da177e4SLinus Torvalds return 0; 23021da177e4SLinus Torvalds } 23031da177e4SLinus Torvalds 2304454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 230509208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 23061da177e4SLinus Torvalds { 23071da177e4SLinus Torvalds struct inode *inode; 23081da177e4SLinus Torvalds struct shmem_inode_info *info; 23091da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2310e809d5f0SChris Down ino_t ino; 23111da177e4SLinus Torvalds 2312e809d5f0SChris Down if (shmem_reserve_inode(sb, &ino)) 23131da177e4SLinus Torvalds return NULL; 23141da177e4SLinus Torvalds 23151da177e4SLinus Torvalds inode = new_inode(sb); 23161da177e4SLinus Torvalds if (inode) { 2317e809d5f0SChris Down inode->i_ino = ino; 231821cb47beSChristian Brauner inode_init_owner(&init_user_ns, inode, dir, mode); 23191da177e4SLinus Torvalds inode->i_blocks = 0; 2320078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 232146c9a946SArnd Bergmann inode->i_generation = prandom_u32(); 23221da177e4SLinus Torvalds info = SHMEM_I(inode); 23231da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 23241da177e4SLinus Torvalds spin_lock_init(&info->lock); 2325af53d3e9SHugh Dickins atomic_set(&info->stop_eviction, 0); 232640e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 23270b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2328779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 23291da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 233038f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 233172c04902SAl Viro cache_no_acl(inode); 23321da177e4SLinus Torvalds 23331da177e4SLinus Torvalds switch (mode & S_IFMT) { 23341da177e4SLinus Torvalds default: 233539f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 23361da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 23371da177e4SLinus Torvalds break; 23381da177e4SLinus Torvalds case S_IFREG: 233914fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 23401da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 23411da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 234271fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 234371fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 23441da177e4SLinus Torvalds break; 23451da177e4SLinus Torvalds case S_IFDIR: 2346d8c76e6fSDave Hansen inc_nlink(inode); 23471da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 23481da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 23491da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 23501da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 23511da177e4SLinus Torvalds break; 23521da177e4SLinus Torvalds case S_IFLNK: 23531da177e4SLinus Torvalds /* 23541da177e4SLinus Torvalds * Must not load anything in the rbtree, 23551da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 23561da177e4SLinus Torvalds */ 235771fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 23581da177e4SLinus Torvalds break; 23591da177e4SLinus Torvalds } 2360b45d71fbSJoel Fernandes (Google) 2361b45d71fbSJoel Fernandes (Google) lockdep_annotate_inode_mutex_key(inode); 23625b04c689SPavel Emelyanov } else 23635b04c689SPavel Emelyanov shmem_free_inode(sb); 23641da177e4SLinus Torvalds return inode; 23651da177e4SLinus Torvalds } 23661da177e4SLinus Torvalds 23673460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD 23683460f6e5SAxel Rasmussen int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 23694c27fe4cSMike Rapoport pmd_t *dst_pmd, 23704c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 23714c27fe4cSMike Rapoport unsigned long dst_addr, 23724c27fe4cSMike Rapoport unsigned long src_addr, 23738d103963SMike Rapoport bool zeropage, 23744c27fe4cSMike Rapoport struct page **pagep) 23754c27fe4cSMike Rapoport { 23764c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 23774c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 23784c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 23794c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 23804c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 23814c27fe4cSMike Rapoport void *page_kaddr; 23824c27fe4cSMike Rapoport struct page *page; 23834c27fe4cSMike Rapoport int ret; 23843460f6e5SAxel Rasmussen pgoff_t max_off; 23854c27fe4cSMike Rapoport 23867ed9d238SAxel Rasmussen if (!shmem_inode_acct_block(inode, 1)) { 23877ed9d238SAxel Rasmussen /* 23887ed9d238SAxel Rasmussen * We may have got a page, returned -ENOENT triggering a retry, 23897ed9d238SAxel Rasmussen * and now we find ourselves with -ENOMEM. Release the page, to 23907ed9d238SAxel Rasmussen * avoid a BUG_ON in our caller. 23917ed9d238SAxel Rasmussen */ 23927ed9d238SAxel Rasmussen if (unlikely(*pagep)) { 23937ed9d238SAxel Rasmussen put_page(*pagep); 23947ed9d238SAxel Rasmussen *pagep = NULL; 23957ed9d238SAxel Rasmussen } 23967d64ae3aSAxel Rasmussen return -ENOMEM; 23977ed9d238SAxel Rasmussen } 23984c27fe4cSMike Rapoport 2399cb658a45SAndrea Arcangeli if (!*pagep) { 24007d64ae3aSAxel Rasmussen ret = -ENOMEM; 24014c27fe4cSMike Rapoport page = shmem_alloc_page(gfp, info, pgoff); 24024c27fe4cSMike Rapoport if (!page) 24030f079694SMike Rapoport goto out_unacct_blocks; 24044c27fe4cSMike Rapoport 24053460f6e5SAxel Rasmussen if (!zeropage) { /* COPY */ 24064c27fe4cSMike Rapoport page_kaddr = kmap_atomic(page); 24078d103963SMike Rapoport ret = copy_from_user(page_kaddr, 24088d103963SMike Rapoport (const void __user *)src_addr, 24094c27fe4cSMike Rapoport PAGE_SIZE); 24104c27fe4cSMike Rapoport kunmap_atomic(page_kaddr); 24114c27fe4cSMike Rapoport 2412c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 24134c27fe4cSMike Rapoport if (unlikely(ret)) { 24144c27fe4cSMike Rapoport *pagep = page; 24157d64ae3aSAxel Rasmussen ret = -ENOENT; 24164c27fe4cSMike Rapoport /* don't free the page */ 24177d64ae3aSAxel Rasmussen goto out_unacct_blocks; 24184c27fe4cSMike Rapoport } 24193460f6e5SAxel Rasmussen } else { /* ZEROPAGE */ 24208d103963SMike Rapoport clear_highpage(page); 24218d103963SMike Rapoport } 24224c27fe4cSMike Rapoport } else { 24234c27fe4cSMike Rapoport page = *pagep; 24244c27fe4cSMike Rapoport *pagep = NULL; 24254c27fe4cSMike Rapoport } 24264c27fe4cSMike Rapoport 24273460f6e5SAxel Rasmussen VM_BUG_ON(PageLocked(page)); 24283460f6e5SAxel Rasmussen VM_BUG_ON(PageSwapBacked(page)); 24299cc90c66SAndrea Arcangeli __SetPageLocked(page); 24309cc90c66SAndrea Arcangeli __SetPageSwapBacked(page); 2431a425d358SAndrea Arcangeli __SetPageUptodate(page); 24329cc90c66SAndrea Arcangeli 2433e2a50c1fSAndrea Arcangeli ret = -EFAULT; 2434e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 24353460f6e5SAxel Rasmussen if (unlikely(pgoff >= max_off)) 2436e2a50c1fSAndrea Arcangeli goto out_release; 2437e2a50c1fSAndrea Arcangeli 24383fea5a49SJohannes Weiner ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 24393fea5a49SJohannes Weiner gfp & GFP_RECLAIM_MASK, dst_mm); 24404c27fe4cSMike Rapoport if (ret) 24414c27fe4cSMike Rapoport goto out_release; 24424c27fe4cSMike Rapoport 24437d64ae3aSAxel Rasmussen ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 24447d64ae3aSAxel Rasmussen page, true, false); 24457d64ae3aSAxel Rasmussen if (ret) 24467d64ae3aSAxel Rasmussen goto out_delete_from_cache; 24474c27fe4cSMike Rapoport 244894b7cc01SYang Shi spin_lock_irq(&info->lock); 24494c27fe4cSMike Rapoport info->alloced++; 24504c27fe4cSMike Rapoport inode->i_blocks += BLOCKS_PER_PAGE; 24514c27fe4cSMike Rapoport shmem_recalc_inode(inode); 245294b7cc01SYang Shi spin_unlock_irq(&info->lock); 24534c27fe4cSMike Rapoport 24547d64ae3aSAxel Rasmussen SetPageDirty(page); 2455e2a50c1fSAndrea Arcangeli unlock_page(page); 24567d64ae3aSAxel Rasmussen return 0; 24577d64ae3aSAxel Rasmussen out_delete_from_cache: 2458e2a50c1fSAndrea Arcangeli delete_from_page_cache(page); 24594c27fe4cSMike Rapoport out_release: 24609cc90c66SAndrea Arcangeli unlock_page(page); 24614c27fe4cSMike Rapoport put_page(page); 24624c27fe4cSMike Rapoport out_unacct_blocks: 24630f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 24647d64ae3aSAxel Rasmussen return ret; 24654c27fe4cSMike Rapoport } 24663460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */ 24678d103963SMike Rapoport 24681da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 246992e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 247069f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 24711da177e4SLinus Torvalds 24726d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR 24736d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 24746d9d88d0SJarkko Sakkinen #else 24756d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL 24766d9d88d0SJarkko Sakkinen #endif 24776d9d88d0SJarkko Sakkinen 24781da177e4SLinus Torvalds static int 2479800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 2480800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 2481800d15a5SNick Piggin struct page **pagep, void **fsdata) 24821da177e4SLinus Torvalds { 2483800d15a5SNick Piggin struct inode *inode = mapping->host; 248440e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 248509cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 248640e041a2SDavid Herrmann 248740e041a2SDavid Herrmann /* i_mutex is held by caller */ 2488ab3948f5SJoel Fernandes (Google) if (unlikely(info->seals & (F_SEAL_GROW | 2489ab3948f5SJoel Fernandes (Google) F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 2490ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 249140e041a2SDavid Herrmann return -EPERM; 249240e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 249340e041a2SDavid Herrmann return -EPERM; 249440e041a2SDavid Herrmann } 249540e041a2SDavid Herrmann 24969e18eb29SAndres Lagar-Cavilla return shmem_getpage(inode, index, pagep, SGP_WRITE); 2497800d15a5SNick Piggin } 2498800d15a5SNick Piggin 2499800d15a5SNick Piggin static int 2500800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2501800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2502800d15a5SNick Piggin struct page *page, void *fsdata) 2503800d15a5SNick Piggin { 2504800d15a5SNick Piggin struct inode *inode = mapping->host; 2505800d15a5SNick Piggin 2506800d15a5SNick Piggin if (pos + copied > inode->i_size) 2507800d15a5SNick Piggin i_size_write(inode, pos + copied); 2508800d15a5SNick Piggin 2509ec9516fbSHugh Dickins if (!PageUptodate(page)) { 2510800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 2511800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 2512800d8c63SKirill A. Shutemov int i; 2513800d8c63SKirill A. Shutemov 2514800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2515800d8c63SKirill A. Shutemov if (head + i == page) 2516800d8c63SKirill A. Shutemov continue; 2517800d8c63SKirill A. Shutemov clear_highpage(head + i); 2518800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 2519800d8c63SKirill A. Shutemov } 2520800d8c63SKirill A. Shutemov } 252109cbfeafSKirill A. Shutemov if (copied < PAGE_SIZE) { 252209cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2523ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 252409cbfeafSKirill A. Shutemov from + copied, PAGE_SIZE); 2525ec9516fbSHugh Dickins } 2526800d8c63SKirill A. Shutemov SetPageUptodate(head); 2527ec9516fbSHugh Dickins } 2528d3602444SHugh Dickins set_page_dirty(page); 25296746aff7SWu Fengguang unlock_page(page); 253009cbfeafSKirill A. Shutemov put_page(page); 2531d3602444SHugh Dickins 2532800d15a5SNick Piggin return copied; 25331da177e4SLinus Torvalds } 25341da177e4SLinus Torvalds 25352ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 25361da177e4SLinus Torvalds { 25376e58e79dSAl Viro struct file *file = iocb->ki_filp; 25386e58e79dSAl Viro struct inode *inode = file_inode(file); 25391da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 254041ffe5d5SHugh Dickins pgoff_t index; 254141ffe5d5SHugh Dickins unsigned long offset; 2542a0ee5ec5SHugh Dickins enum sgp_type sgp = SGP_READ; 2543f7c1d074SGeert Uytterhoeven int error = 0; 2544cb66a7a1SAl Viro ssize_t retval = 0; 25456e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2546a0ee5ec5SHugh Dickins 2547a0ee5ec5SHugh Dickins /* 2548a0ee5ec5SHugh Dickins * Might this read be for a stacking filesystem? Then when reading 2549a0ee5ec5SHugh Dickins * holes of a sparse file, we actually need to allocate those pages, 2550a0ee5ec5SHugh Dickins * and even mark them dirty, so it cannot exceed the max_blocks limit. 2551a0ee5ec5SHugh Dickins */ 2552777eda2cSAl Viro if (!iter_is_iovec(to)) 255375edd345SHugh Dickins sgp = SGP_CACHE; 25541da177e4SLinus Torvalds 255509cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 255609cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 25571da177e4SLinus Torvalds 25581da177e4SLinus Torvalds for (;;) { 25591da177e4SLinus Torvalds struct page *page = NULL; 256041ffe5d5SHugh Dickins pgoff_t end_index; 256141ffe5d5SHugh Dickins unsigned long nr, ret; 25621da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 25631da177e4SLinus Torvalds 256409cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 25651da177e4SLinus Torvalds if (index > end_index) 25661da177e4SLinus Torvalds break; 25671da177e4SLinus Torvalds if (index == end_index) { 256809cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 25691da177e4SLinus Torvalds if (nr <= offset) 25701da177e4SLinus Torvalds break; 25711da177e4SLinus Torvalds } 25721da177e4SLinus Torvalds 25739e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, sgp); 25746e58e79dSAl Viro if (error) { 25756e58e79dSAl Viro if (error == -EINVAL) 25766e58e79dSAl Viro error = 0; 25771da177e4SLinus Torvalds break; 25781da177e4SLinus Torvalds } 257975edd345SHugh Dickins if (page) { 258075edd345SHugh Dickins if (sgp == SGP_CACHE) 258175edd345SHugh Dickins set_page_dirty(page); 2582d3602444SHugh Dickins unlock_page(page); 258375edd345SHugh Dickins } 25841da177e4SLinus Torvalds 25851da177e4SLinus Torvalds /* 25861da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 25871b1dcc1bSJes Sorensen * are called without i_mutex protection against truncate 25881da177e4SLinus Torvalds */ 258909cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 25901da177e4SLinus Torvalds i_size = i_size_read(inode); 259109cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 25921da177e4SLinus Torvalds if (index == end_index) { 259309cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 25941da177e4SLinus Torvalds if (nr <= offset) { 25951da177e4SLinus Torvalds if (page) 259609cbfeafSKirill A. Shutemov put_page(page); 25971da177e4SLinus Torvalds break; 25981da177e4SLinus Torvalds } 25991da177e4SLinus Torvalds } 26001da177e4SLinus Torvalds nr -= offset; 26011da177e4SLinus Torvalds 26021da177e4SLinus Torvalds if (page) { 26031da177e4SLinus Torvalds /* 26041da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 26051da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 26061da177e4SLinus Torvalds * before reading the page on the kernel side. 26071da177e4SLinus Torvalds */ 26081da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 26091da177e4SLinus Torvalds flush_dcache_page(page); 26101da177e4SLinus Torvalds /* 26111da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 26121da177e4SLinus Torvalds */ 26131da177e4SLinus Torvalds if (!offset) 26141da177e4SLinus Torvalds mark_page_accessed(page); 2615b5810039SNick Piggin } else { 26161da177e4SLinus Torvalds page = ZERO_PAGE(0); 261709cbfeafSKirill A. Shutemov get_page(page); 2618b5810039SNick Piggin } 26191da177e4SLinus Torvalds 26201da177e4SLinus Torvalds /* 26211da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 26221da177e4SLinus Torvalds * now we can copy it to user space... 26231da177e4SLinus Torvalds */ 26242ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 26256e58e79dSAl Viro retval += ret; 26261da177e4SLinus Torvalds offset += ret; 262709cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 262809cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 26291da177e4SLinus Torvalds 263009cbfeafSKirill A. Shutemov put_page(page); 26312ba5bbedSAl Viro if (!iov_iter_count(to)) 26321da177e4SLinus Torvalds break; 26336e58e79dSAl Viro if (ret < nr) { 26346e58e79dSAl Viro error = -EFAULT; 26356e58e79dSAl Viro break; 26366e58e79dSAl Viro } 26371da177e4SLinus Torvalds cond_resched(); 26381da177e4SLinus Torvalds } 26391da177e4SLinus Torvalds 264009cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 26416e58e79dSAl Viro file_accessed(file); 26426e58e79dSAl Viro return retval ? retval : error; 26431da177e4SLinus Torvalds } 26441da177e4SLinus Torvalds 2645965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2646220f2ac9SHugh Dickins { 2647220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2648220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2649220f2ac9SHugh Dickins 2650965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2651965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2652220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 265341139aa4SMatthew Wilcox (Oracle) if (offset < 0) 265441139aa4SMatthew Wilcox (Oracle) return -ENXIO; 265541139aa4SMatthew Wilcox (Oracle) 26565955102cSAl Viro inode_lock(inode); 2657220f2ac9SHugh Dickins /* We're holding i_mutex so we can access i_size directly */ 265841139aa4SMatthew Wilcox (Oracle) offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); 2659387aae6fSHugh Dickins if (offset >= 0) 266046a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 26615955102cSAl Viro inode_unlock(inode); 2662220f2ac9SHugh Dickins return offset; 2663220f2ac9SHugh Dickins } 2664220f2ac9SHugh Dickins 266583e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 266683e4fa9cSHugh Dickins loff_t len) 266783e4fa9cSHugh Dickins { 2668496ad9aaSAl Viro struct inode *inode = file_inode(file); 2669e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 267040e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 26711aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2672d144bf62SHugh Dickins pgoff_t start, index, end, undo_fallocend; 2673e2d12e22SHugh Dickins int error; 267483e4fa9cSHugh Dickins 267513ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 267613ace4d0SHugh Dickins return -EOPNOTSUPP; 267713ace4d0SHugh Dickins 26785955102cSAl Viro inode_lock(inode); 267983e4fa9cSHugh Dickins 268083e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 268183e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 268283e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 268383e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 26848e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 268583e4fa9cSHugh Dickins 268640e041a2SDavid Herrmann /* protected by i_mutex */ 2687ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 268840e041a2SDavid Herrmann error = -EPERM; 268940e041a2SDavid Herrmann goto out; 269040e041a2SDavid Herrmann } 269140e041a2SDavid Herrmann 26928e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2693aa71ecd8SChen Jun shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 2694f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2695f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2696f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2697f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2698f00cdc6dSHugh Dickins 269983e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 270083e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 270183e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 270283e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 270383e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 27048e205f77SHugh Dickins 27058e205f77SHugh Dickins spin_lock(&inode->i_lock); 27068e205f77SHugh Dickins inode->i_private = NULL; 27078e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 27082055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 27098e205f77SHugh Dickins spin_unlock(&inode->i_lock); 271083e4fa9cSHugh Dickins error = 0; 27118e205f77SHugh Dickins goto out; 271283e4fa9cSHugh Dickins } 271383e4fa9cSHugh Dickins 2714e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2715e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2716e2d12e22SHugh Dickins if (error) 2717e2d12e22SHugh Dickins goto out; 2718e2d12e22SHugh Dickins 271940e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 272040e041a2SDavid Herrmann error = -EPERM; 272140e041a2SDavid Herrmann goto out; 272240e041a2SDavid Herrmann } 272340e041a2SDavid Herrmann 272409cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 272509cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2726e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2727e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2728e2d12e22SHugh Dickins error = -ENOSPC; 2729e2d12e22SHugh Dickins goto out; 2730e2d12e22SHugh Dickins } 2731e2d12e22SHugh Dickins 27328e205f77SHugh Dickins shmem_falloc.waitq = NULL; 27331aac1400SHugh Dickins shmem_falloc.start = start; 27341aac1400SHugh Dickins shmem_falloc.next = start; 27351aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 27361aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 27371aac1400SHugh Dickins spin_lock(&inode->i_lock); 27381aac1400SHugh Dickins inode->i_private = &shmem_falloc; 27391aac1400SHugh Dickins spin_unlock(&inode->i_lock); 27401aac1400SHugh Dickins 2741d144bf62SHugh Dickins /* 2742d144bf62SHugh Dickins * info->fallocend is only relevant when huge pages might be 2743d144bf62SHugh Dickins * involved: to prevent split_huge_page() freeing fallocated 2744d144bf62SHugh Dickins * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. 2745d144bf62SHugh Dickins */ 2746d144bf62SHugh Dickins undo_fallocend = info->fallocend; 2747d144bf62SHugh Dickins if (info->fallocend < end) 2748d144bf62SHugh Dickins info->fallocend = end; 2749d144bf62SHugh Dickins 2750050dcb5cSHugh Dickins for (index = start; index < end; ) { 2751e2d12e22SHugh Dickins struct page *page; 2752e2d12e22SHugh Dickins 2753e2d12e22SHugh Dickins /* 2754e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2755e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2756e2d12e22SHugh Dickins */ 2757e2d12e22SHugh Dickins if (signal_pending(current)) 2758e2d12e22SHugh Dickins error = -EINTR; 27591aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 27601aac1400SHugh Dickins error = -ENOMEM; 2761e2d12e22SHugh Dickins else 27629e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2763e2d12e22SHugh Dickins if (error) { 2764d144bf62SHugh Dickins info->fallocend = undo_fallocend; 27651635f6a7SHugh Dickins /* Remove the !PageUptodate pages we added */ 27667f556567SHugh Dickins if (index > start) { 27671635f6a7SHugh Dickins shmem_undo_range(inode, 276809cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2769b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 27707f556567SHugh Dickins } 27711aac1400SHugh Dickins goto undone; 2772e2d12e22SHugh Dickins } 2773e2d12e22SHugh Dickins 2774050dcb5cSHugh Dickins index++; 2775050dcb5cSHugh Dickins /* 2776050dcb5cSHugh Dickins * Here is a more important optimization than it appears: 2777050dcb5cSHugh Dickins * a second SGP_FALLOC on the same huge page will clear it, 2778050dcb5cSHugh Dickins * making it PageUptodate and un-undoable if we fail later. 2779050dcb5cSHugh Dickins */ 2780050dcb5cSHugh Dickins if (PageTransCompound(page)) { 2781050dcb5cSHugh Dickins index = round_up(index, HPAGE_PMD_NR); 2782050dcb5cSHugh Dickins /* Beware 32-bit wraparound */ 2783050dcb5cSHugh Dickins if (!index) 2784050dcb5cSHugh Dickins index--; 2785050dcb5cSHugh Dickins } 2786050dcb5cSHugh Dickins 2787e2d12e22SHugh Dickins /* 27881aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 27891aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 27901aac1400SHugh Dickins */ 27911aac1400SHugh Dickins if (!PageUptodate(page)) 2792050dcb5cSHugh Dickins shmem_falloc.nr_falloced += index - shmem_falloc.next; 2793050dcb5cSHugh Dickins shmem_falloc.next = index; 27941aac1400SHugh Dickins 27951aac1400SHugh Dickins /* 27961635f6a7SHugh Dickins * If !PageUptodate, leave it that way so that freeable pages 27971635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 27981635f6a7SHugh Dickins * But set_page_dirty so that memory pressure will swap rather 2799e2d12e22SHugh Dickins * than free the pages we are allocating (and SGP_CACHE pages 2800e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2801e2d12e22SHugh Dickins */ 2802e2d12e22SHugh Dickins set_page_dirty(page); 2803e2d12e22SHugh Dickins unlock_page(page); 280409cbfeafSKirill A. Shutemov put_page(page); 2805e2d12e22SHugh Dickins cond_resched(); 2806e2d12e22SHugh Dickins } 2807e2d12e22SHugh Dickins 2808e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2809e2d12e22SHugh Dickins i_size_write(inode, offset + len); 2810078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 28111aac1400SHugh Dickins undone: 28121aac1400SHugh Dickins spin_lock(&inode->i_lock); 28131aac1400SHugh Dickins inode->i_private = NULL; 28141aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2815e2d12e22SHugh Dickins out: 28165955102cSAl Viro inode_unlock(inode); 281783e4fa9cSHugh Dickins return error; 281883e4fa9cSHugh Dickins } 281983e4fa9cSHugh Dickins 2820726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 28211da177e4SLinus Torvalds { 2822726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 28231da177e4SLinus Torvalds 28241da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 282509cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 28261da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 28270edd73b3SHugh Dickins if (sbinfo->max_blocks) { 28281da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 282941ffe5d5SHugh Dickins buf->f_bavail = 283041ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 283141ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 28320edd73b3SHugh Dickins } 28330edd73b3SHugh Dickins if (sbinfo->max_inodes) { 28341da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 28351da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 28361da177e4SLinus Torvalds } 28371da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 283859cda49eSAmir Goldstein 283959cda49eSAmir Goldstein buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); 284059cda49eSAmir Goldstein 28411da177e4SLinus Torvalds return 0; 28421da177e4SLinus Torvalds } 28431da177e4SLinus Torvalds 28441da177e4SLinus Torvalds /* 28451da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 28461da177e4SLinus Torvalds */ 28471da177e4SLinus Torvalds static int 2848549c7297SChristian Brauner shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir, 2849549c7297SChristian Brauner struct dentry *dentry, umode_t mode, dev_t dev) 28501da177e4SLinus Torvalds { 28510b0a0806SHugh Dickins struct inode *inode; 28521da177e4SLinus Torvalds int error = -ENOSPC; 28531da177e4SLinus Torvalds 2854454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 28551da177e4SLinus Torvalds if (inode) { 2856feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2857feda821eSChristoph Hellwig if (error) 2858feda821eSChristoph Hellwig goto out_iput; 28592a7dba39SEric Paris error = security_inode_init_security(inode, dir, 28609d8f13baSMimi Zohar &dentry->d_name, 28616d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2862feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2863feda821eSChristoph Hellwig goto out_iput; 286437ec43cdSMimi Zohar 2865718deb6bSAl Viro error = 0; 28661da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2867078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 28681da177e4SLinus Torvalds d_instantiate(dentry, inode); 28691da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 28701da177e4SLinus Torvalds } 28711da177e4SLinus Torvalds return error; 2872feda821eSChristoph Hellwig out_iput: 2873feda821eSChristoph Hellwig iput(inode); 2874feda821eSChristoph Hellwig return error; 28751da177e4SLinus Torvalds } 28761da177e4SLinus Torvalds 287760545d0dSAl Viro static int 2878549c7297SChristian Brauner shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, 2879549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 288060545d0dSAl Viro { 288160545d0dSAl Viro struct inode *inode; 288260545d0dSAl Viro int error = -ENOSPC; 288360545d0dSAl Viro 288460545d0dSAl Viro inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 288560545d0dSAl Viro if (inode) { 288660545d0dSAl Viro error = security_inode_init_security(inode, dir, 288760545d0dSAl Viro NULL, 288860545d0dSAl Viro shmem_initxattrs, NULL); 2889feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2890feda821eSChristoph Hellwig goto out_iput; 2891feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2892feda821eSChristoph Hellwig if (error) 2893feda821eSChristoph Hellwig goto out_iput; 289460545d0dSAl Viro d_tmpfile(dentry, inode); 289560545d0dSAl Viro } 289660545d0dSAl Viro return error; 2897feda821eSChristoph Hellwig out_iput: 2898feda821eSChristoph Hellwig iput(inode); 2899feda821eSChristoph Hellwig return error; 290060545d0dSAl Viro } 290160545d0dSAl Viro 2902549c7297SChristian Brauner static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 2903549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 29041da177e4SLinus Torvalds { 29051da177e4SLinus Torvalds int error; 29061da177e4SLinus Torvalds 2907549c7297SChristian Brauner if ((error = shmem_mknod(&init_user_ns, dir, dentry, 2908549c7297SChristian Brauner mode | S_IFDIR, 0))) 29091da177e4SLinus Torvalds return error; 2910d8c76e6fSDave Hansen inc_nlink(dir); 29111da177e4SLinus Torvalds return 0; 29121da177e4SLinus Torvalds } 29131da177e4SLinus Torvalds 2914549c7297SChristian Brauner static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir, 2915549c7297SChristian Brauner struct dentry *dentry, umode_t mode, bool excl) 29161da177e4SLinus Torvalds { 2917549c7297SChristian Brauner return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 29181da177e4SLinus Torvalds } 29191da177e4SLinus Torvalds 29201da177e4SLinus Torvalds /* 29211da177e4SLinus Torvalds * Link a file.. 29221da177e4SLinus Torvalds */ 29231da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 29241da177e4SLinus Torvalds { 292575c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 292629b00e60SDarrick J. Wong int ret = 0; 29271da177e4SLinus Torvalds 29281da177e4SLinus Torvalds /* 29291da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 29301da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 29311da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 29321062af92SDarrick J. Wong * But if an O_TMPFILE file is linked into the tmpfs, the 29331062af92SDarrick J. Wong * first link must skip that, to get the accounting right. 29341da177e4SLinus Torvalds */ 29351062af92SDarrick J. Wong if (inode->i_nlink) { 2936e809d5f0SChris Down ret = shmem_reserve_inode(inode->i_sb, NULL); 29375b04c689SPavel Emelyanov if (ret) 29385b04c689SPavel Emelyanov goto out; 29391062af92SDarrick J. Wong } 29401da177e4SLinus Torvalds 29411da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2942078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 2943d8c76e6fSDave Hansen inc_nlink(inode); 29447de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 29451da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 29461da177e4SLinus Torvalds d_instantiate(dentry, inode); 29475b04c689SPavel Emelyanov out: 29485b04c689SPavel Emelyanov return ret; 29491da177e4SLinus Torvalds } 29501da177e4SLinus Torvalds 29511da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 29521da177e4SLinus Torvalds { 295375c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 29541da177e4SLinus Torvalds 29555b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 29565b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 29571da177e4SLinus Torvalds 29581da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 2959078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 29609a53c3a7SDave Hansen drop_nlink(inode); 29611da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 29621da177e4SLinus Torvalds return 0; 29631da177e4SLinus Torvalds } 29641da177e4SLinus Torvalds 29651da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 29661da177e4SLinus Torvalds { 29671da177e4SLinus Torvalds if (!simple_empty(dentry)) 29681da177e4SLinus Torvalds return -ENOTEMPTY; 29691da177e4SLinus Torvalds 297075c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 29719a53c3a7SDave Hansen drop_nlink(dir); 29721da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 29731da177e4SLinus Torvalds } 29741da177e4SLinus Torvalds 297537456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 297637456771SMiklos Szeredi { 2977e36cb0b8SDavid Howells bool old_is_dir = d_is_dir(old_dentry); 2978e36cb0b8SDavid Howells bool new_is_dir = d_is_dir(new_dentry); 297937456771SMiklos Szeredi 298037456771SMiklos Szeredi if (old_dir != new_dir && old_is_dir != new_is_dir) { 298137456771SMiklos Szeredi if (old_is_dir) { 298237456771SMiklos Szeredi drop_nlink(old_dir); 298337456771SMiklos Szeredi inc_nlink(new_dir); 298437456771SMiklos Szeredi } else { 298537456771SMiklos Szeredi drop_nlink(new_dir); 298637456771SMiklos Szeredi inc_nlink(old_dir); 298737456771SMiklos Szeredi } 298837456771SMiklos Szeredi } 298937456771SMiklos Szeredi old_dir->i_ctime = old_dir->i_mtime = 299037456771SMiklos Szeredi new_dir->i_ctime = new_dir->i_mtime = 299175c3cfa8SDavid Howells d_inode(old_dentry)->i_ctime = 2992078cd827SDeepa Dinamani d_inode(new_dentry)->i_ctime = current_time(old_dir); 299337456771SMiklos Szeredi 299437456771SMiklos Szeredi return 0; 299537456771SMiklos Szeredi } 299637456771SMiklos Szeredi 2997549c7297SChristian Brauner static int shmem_whiteout(struct user_namespace *mnt_userns, 2998549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry) 299946fdb794SMiklos Szeredi { 300046fdb794SMiklos Szeredi struct dentry *whiteout; 300146fdb794SMiklos Szeredi int error; 300246fdb794SMiklos Szeredi 300346fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 300446fdb794SMiklos Szeredi if (!whiteout) 300546fdb794SMiklos Szeredi return -ENOMEM; 300646fdb794SMiklos Szeredi 3007549c7297SChristian Brauner error = shmem_mknod(&init_user_ns, old_dir, whiteout, 300846fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 300946fdb794SMiklos Szeredi dput(whiteout); 301046fdb794SMiklos Szeredi if (error) 301146fdb794SMiklos Szeredi return error; 301246fdb794SMiklos Szeredi 301346fdb794SMiklos Szeredi /* 301446fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 301546fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 301646fdb794SMiklos Szeredi * 301746fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 301846fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 301946fdb794SMiklos Szeredi */ 302046fdb794SMiklos Szeredi d_rehash(whiteout); 302146fdb794SMiklos Szeredi return 0; 302246fdb794SMiklos Szeredi } 302346fdb794SMiklos Szeredi 30241da177e4SLinus Torvalds /* 30251da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 30261da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 30271da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 30281da177e4SLinus Torvalds * gets overwritten. 30291da177e4SLinus Torvalds */ 3030549c7297SChristian Brauner static int shmem_rename2(struct user_namespace *mnt_userns, 3031549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry, 3032549c7297SChristian Brauner struct inode *new_dir, struct dentry *new_dentry, 3033549c7297SChristian Brauner unsigned int flags) 30341da177e4SLinus Torvalds { 303575c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 30361da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 30371da177e4SLinus Torvalds 303846fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 30393b69ff51SMiklos Szeredi return -EINVAL; 30403b69ff51SMiklos Szeredi 304137456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 304237456771SMiklos Szeredi return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 304337456771SMiklos Szeredi 30441da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 30451da177e4SLinus Torvalds return -ENOTEMPTY; 30461da177e4SLinus Torvalds 304746fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 304846fdb794SMiklos Szeredi int error; 304946fdb794SMiklos Szeredi 3050549c7297SChristian Brauner error = shmem_whiteout(&init_user_ns, old_dir, old_dentry); 305146fdb794SMiklos Szeredi if (error) 305246fdb794SMiklos Szeredi return error; 305346fdb794SMiklos Szeredi } 305446fdb794SMiklos Szeredi 305575c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 30561da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 3057b928095bSMiklos Szeredi if (they_are_dirs) { 305875c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 30599a53c3a7SDave Hansen drop_nlink(old_dir); 3060b928095bSMiklos Szeredi } 30611da177e4SLinus Torvalds } else if (they_are_dirs) { 30629a53c3a7SDave Hansen drop_nlink(old_dir); 3063d8c76e6fSDave Hansen inc_nlink(new_dir); 30641da177e4SLinus Torvalds } 30651da177e4SLinus Torvalds 30661da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 30671da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 30681da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 30691da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 3070078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 30711da177e4SLinus Torvalds return 0; 30721da177e4SLinus Torvalds } 30731da177e4SLinus Torvalds 3074549c7297SChristian Brauner static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir, 3075549c7297SChristian Brauner struct dentry *dentry, const char *symname) 30761da177e4SLinus Torvalds { 30771da177e4SLinus Torvalds int error; 30781da177e4SLinus Torvalds int len; 30791da177e4SLinus Torvalds struct inode *inode; 30809276aad6SHugh Dickins struct page *page; 30811da177e4SLinus Torvalds 30821da177e4SLinus Torvalds len = strlen(symname) + 1; 308309cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 30841da177e4SLinus Torvalds return -ENAMETOOLONG; 30851da177e4SLinus Torvalds 30860825a6f9SJoe Perches inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 30870825a6f9SJoe Perches VM_NORESERVE); 30881da177e4SLinus Torvalds if (!inode) 30891da177e4SLinus Torvalds return -ENOSPC; 30901da177e4SLinus Torvalds 30919d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 30926d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3093343c3d7fSMateusz Nosek if (error && error != -EOPNOTSUPP) { 3094570bc1c2SStephen Smalley iput(inode); 3095570bc1c2SStephen Smalley return error; 3096570bc1c2SStephen Smalley } 3097570bc1c2SStephen Smalley 30981da177e4SLinus Torvalds inode->i_size = len-1; 309969f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 31003ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 31013ed47db3SAl Viro if (!inode->i_link) { 310269f07ec9SHugh Dickins iput(inode); 310369f07ec9SHugh Dickins return -ENOMEM; 310469f07ec9SHugh Dickins } 310569f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 31061da177e4SLinus Torvalds } else { 3107e8ecde25SAl Viro inode_nohighmem(inode); 31089e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_WRITE); 31091da177e4SLinus Torvalds if (error) { 31101da177e4SLinus Torvalds iput(inode); 31111da177e4SLinus Torvalds return error; 31121da177e4SLinus Torvalds } 311314fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 31141da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 311521fc61c7SAl Viro memcpy(page_address(page), symname, len); 3116ec9516fbSHugh Dickins SetPageUptodate(page); 31171da177e4SLinus Torvalds set_page_dirty(page); 31186746aff7SWu Fengguang unlock_page(page); 311909cbfeafSKirill A. Shutemov put_page(page); 31201da177e4SLinus Torvalds } 31211da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3122078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 31231da177e4SLinus Torvalds d_instantiate(dentry, inode); 31241da177e4SLinus Torvalds dget(dentry); 31251da177e4SLinus Torvalds return 0; 31261da177e4SLinus Torvalds } 31271da177e4SLinus Torvalds 3128fceef393SAl Viro static void shmem_put_link(void *arg) 3129fceef393SAl Viro { 3130fceef393SAl Viro mark_page_accessed(arg); 3131fceef393SAl Viro put_page(arg); 3132fceef393SAl Viro } 3133fceef393SAl Viro 31346b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3135fceef393SAl Viro struct inode *inode, 3136fceef393SAl Viro struct delayed_call *done) 31371da177e4SLinus Torvalds { 31381da177e4SLinus Torvalds struct page *page = NULL; 31396b255391SAl Viro int error; 31406a6c9904SAl Viro if (!dentry) { 31416a6c9904SAl Viro page = find_get_page(inode->i_mapping, 0); 31426a6c9904SAl Viro if (!page) 31436b255391SAl Viro return ERR_PTR(-ECHILD); 31446a6c9904SAl Viro if (!PageUptodate(page)) { 31456a6c9904SAl Viro put_page(page); 31466a6c9904SAl Viro return ERR_PTR(-ECHILD); 31476a6c9904SAl Viro } 31486a6c9904SAl Viro } else { 31499e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_READ); 3150680baacbSAl Viro if (error) 3151680baacbSAl Viro return ERR_PTR(error); 3152d3602444SHugh Dickins unlock_page(page); 31531da177e4SLinus Torvalds } 3154fceef393SAl Viro set_delayed_call(done, shmem_put_link, page); 315521fc61c7SAl Viro return page_address(page); 31561da177e4SLinus Torvalds } 31571da177e4SLinus Torvalds 3158b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3159b09e0fa4SEric Paris /* 3160b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3161b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3162b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3163b09e0fa4SEric Paris * filesystem level, though. 3164b09e0fa4SEric Paris */ 3165b09e0fa4SEric Paris 31666d9d88d0SJarkko Sakkinen /* 31676d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 31686d9d88d0SJarkko Sakkinen */ 31696d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 31706d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 31716d9d88d0SJarkko Sakkinen void *fs_info) 31726d9d88d0SJarkko Sakkinen { 31736d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 31746d9d88d0SJarkko Sakkinen const struct xattr *xattr; 317538f38657SAristeu Rozanski struct simple_xattr *new_xattr; 31766d9d88d0SJarkko Sakkinen size_t len; 31776d9d88d0SJarkko Sakkinen 31786d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 317938f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 31806d9d88d0SJarkko Sakkinen if (!new_xattr) 31816d9d88d0SJarkko Sakkinen return -ENOMEM; 31826d9d88d0SJarkko Sakkinen 31836d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 31846d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 31856d9d88d0SJarkko Sakkinen GFP_KERNEL); 31866d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 31873bef735aSChengguang Xu kvfree(new_xattr); 31886d9d88d0SJarkko Sakkinen return -ENOMEM; 31896d9d88d0SJarkko Sakkinen } 31906d9d88d0SJarkko Sakkinen 31916d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 31926d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 31936d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 31946d9d88d0SJarkko Sakkinen xattr->name, len); 31956d9d88d0SJarkko Sakkinen 319638f38657SAristeu Rozanski simple_xattr_list_add(&info->xattrs, new_xattr); 31976d9d88d0SJarkko Sakkinen } 31986d9d88d0SJarkko Sakkinen 31996d9d88d0SJarkko Sakkinen return 0; 32006d9d88d0SJarkko Sakkinen } 32016d9d88d0SJarkko Sakkinen 3202aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3203b296821aSAl Viro struct dentry *unused, struct inode *inode, 3204b296821aSAl Viro const char *name, void *buffer, size_t size) 3205aa7c5241SAndreas Gruenbacher { 3206b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3207aa7c5241SAndreas Gruenbacher 3208aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3209aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3210aa7c5241SAndreas Gruenbacher } 3211aa7c5241SAndreas Gruenbacher 3212aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 3213e65ce2a5SChristian Brauner struct user_namespace *mnt_userns, 321459301226SAl Viro struct dentry *unused, struct inode *inode, 321559301226SAl Viro const char *name, const void *value, 321659301226SAl Viro size_t size, int flags) 3217aa7c5241SAndreas Gruenbacher { 321859301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3219aa7c5241SAndreas Gruenbacher 3220aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3221a46a2295SDaniel Xu return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); 3222aa7c5241SAndreas Gruenbacher } 3223aa7c5241SAndreas Gruenbacher 3224aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3225aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3226aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3227aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3228aa7c5241SAndreas Gruenbacher }; 3229aa7c5241SAndreas Gruenbacher 3230aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3231aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3232aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3233aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3234aa7c5241SAndreas Gruenbacher }; 3235aa7c5241SAndreas Gruenbacher 3236b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3237b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3238feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3239feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3240b09e0fa4SEric Paris #endif 3241aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3242aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3243b09e0fa4SEric Paris NULL 3244b09e0fa4SEric Paris }; 3245b09e0fa4SEric Paris 3246b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3247b09e0fa4SEric Paris { 324875c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3249786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3250b09e0fa4SEric Paris } 3251b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3252b09e0fa4SEric Paris 325369f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 32546b255391SAl Viro .get_link = simple_get_link, 3255b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3256b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3257b09e0fa4SEric Paris #endif 32581da177e4SLinus Torvalds }; 32591da177e4SLinus Torvalds 326092e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 32616b255391SAl Viro .get_link = shmem_get_link, 3262b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3263b09e0fa4SEric Paris .listxattr = shmem_listxattr, 326439f0247dSAndreas Gruenbacher #endif 3265b09e0fa4SEric Paris }; 326639f0247dSAndreas Gruenbacher 326791828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 326891828a40SDavid M. Grimes { 326991828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 327091828a40SDavid M. Grimes } 327191828a40SDavid M. Grimes 327291828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 327391828a40SDavid M. Grimes { 327491828a40SDavid M. Grimes __u32 *fh = vfh; 327591828a40SDavid M. Grimes __u64 inum = fh[2]; 327691828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 327791828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 327891828a40SDavid M. Grimes } 327991828a40SDavid M. Grimes 328012ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */ 328112ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode) 328212ba780dSAmir Goldstein { 328312ba780dSAmir Goldstein struct dentry *alias = d_find_alias(inode); 328412ba780dSAmir Goldstein 328512ba780dSAmir Goldstein return alias ?: d_find_any_alias(inode); 328612ba780dSAmir Goldstein } 328712ba780dSAmir Goldstein 328812ba780dSAmir Goldstein 3289480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3290480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 329191828a40SDavid M. Grimes { 329291828a40SDavid M. Grimes struct inode *inode; 3293480b116cSChristoph Hellwig struct dentry *dentry = NULL; 329435c2a7f4SHugh Dickins u64 inum; 329591828a40SDavid M. Grimes 3296480b116cSChristoph Hellwig if (fh_len < 3) 3297480b116cSChristoph Hellwig return NULL; 3298480b116cSChristoph Hellwig 329935c2a7f4SHugh Dickins inum = fid->raw[2]; 330035c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 330135c2a7f4SHugh Dickins 3302480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3303480b116cSChristoph Hellwig shmem_match, fid->raw); 330491828a40SDavid M. Grimes if (inode) { 330512ba780dSAmir Goldstein dentry = shmem_find_alias(inode); 330691828a40SDavid M. Grimes iput(inode); 330791828a40SDavid M. Grimes } 330891828a40SDavid M. Grimes 3309480b116cSChristoph Hellwig return dentry; 331091828a40SDavid M. Grimes } 331191828a40SDavid M. Grimes 3312b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3313b0b0382bSAl Viro struct inode *parent) 331491828a40SDavid M. Grimes { 33155fe0c237SAneesh Kumar K.V if (*len < 3) { 33165fe0c237SAneesh Kumar K.V *len = 3; 331794e07a75SNamjae Jeon return FILEID_INVALID; 33185fe0c237SAneesh Kumar K.V } 331991828a40SDavid M. Grimes 33201d3382cbSAl Viro if (inode_unhashed(inode)) { 332191828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 332291828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 332391828a40SDavid M. Grimes * time, we need a lock to ensure we only try 332491828a40SDavid M. Grimes * to do it once 332591828a40SDavid M. Grimes */ 332691828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 332791828a40SDavid M. Grimes spin_lock(&lock); 33281d3382cbSAl Viro if (inode_unhashed(inode)) 332991828a40SDavid M. Grimes __insert_inode_hash(inode, 333091828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 333191828a40SDavid M. Grimes spin_unlock(&lock); 333291828a40SDavid M. Grimes } 333391828a40SDavid M. Grimes 333491828a40SDavid M. Grimes fh[0] = inode->i_generation; 333591828a40SDavid M. Grimes fh[1] = inode->i_ino; 333691828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 333791828a40SDavid M. Grimes 333891828a40SDavid M. Grimes *len = 3; 333991828a40SDavid M. Grimes return 1; 334091828a40SDavid M. Grimes } 334191828a40SDavid M. Grimes 334239655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 334391828a40SDavid M. Grimes .get_parent = shmem_get_parent, 334491828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3345480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 334691828a40SDavid M. Grimes }; 334791828a40SDavid M. Grimes 3348626c3920SAl Viro enum shmem_param { 3349626c3920SAl Viro Opt_gid, 3350626c3920SAl Viro Opt_huge, 3351626c3920SAl Viro Opt_mode, 3352626c3920SAl Viro Opt_mpol, 3353626c3920SAl Viro Opt_nr_blocks, 3354626c3920SAl Viro Opt_nr_inodes, 3355626c3920SAl Viro Opt_size, 3356626c3920SAl Viro Opt_uid, 3357ea3271f7SChris Down Opt_inode32, 3358ea3271f7SChris Down Opt_inode64, 3359626c3920SAl Viro }; 33601da177e4SLinus Torvalds 33615eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = { 33622710c957SAl Viro {"never", SHMEM_HUGE_NEVER }, 33632710c957SAl Viro {"always", SHMEM_HUGE_ALWAYS }, 33642710c957SAl Viro {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 33652710c957SAl Viro {"advise", SHMEM_HUGE_ADVISE }, 33662710c957SAl Viro {} 33672710c957SAl Viro }; 33682710c957SAl Viro 3369d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = { 3370626c3920SAl Viro fsparam_u32 ("gid", Opt_gid), 33712710c957SAl Viro fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 3372626c3920SAl Viro fsparam_u32oct("mode", Opt_mode), 3373626c3920SAl Viro fsparam_string("mpol", Opt_mpol), 3374626c3920SAl Viro fsparam_string("nr_blocks", Opt_nr_blocks), 3375626c3920SAl Viro fsparam_string("nr_inodes", Opt_nr_inodes), 3376626c3920SAl Viro fsparam_string("size", Opt_size), 3377626c3920SAl Viro fsparam_u32 ("uid", Opt_uid), 3378ea3271f7SChris Down fsparam_flag ("inode32", Opt_inode32), 3379ea3271f7SChris Down fsparam_flag ("inode64", Opt_inode64), 3380626c3920SAl Viro {} 3381626c3920SAl Viro }; 3382626c3920SAl Viro 3383f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 3384626c3920SAl Viro { 3385f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3386626c3920SAl Viro struct fs_parse_result result; 3387e04dc423SAl Viro unsigned long long size; 3388626c3920SAl Viro char *rest; 3389626c3920SAl Viro int opt; 3390626c3920SAl Viro 3391d7167b14SAl Viro opt = fs_parse(fc, shmem_fs_parameters, param, &result); 3392f3235626SDavid Howells if (opt < 0) 3393626c3920SAl Viro return opt; 3394626c3920SAl Viro 3395626c3920SAl Viro switch (opt) { 3396626c3920SAl Viro case Opt_size: 3397626c3920SAl Viro size = memparse(param->string, &rest); 3398e04dc423SAl Viro if (*rest == '%') { 3399e04dc423SAl Viro size <<= PAGE_SHIFT; 3400e04dc423SAl Viro size *= totalram_pages(); 3401e04dc423SAl Viro do_div(size, 100); 3402e04dc423SAl Viro rest++; 3403e04dc423SAl Viro } 3404e04dc423SAl Viro if (*rest) 3405626c3920SAl Viro goto bad_value; 3406e04dc423SAl Viro ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 3407e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3408626c3920SAl Viro break; 3409626c3920SAl Viro case Opt_nr_blocks: 3410626c3920SAl Viro ctx->blocks = memparse(param->string, &rest); 3411e04dc423SAl Viro if (*rest) 3412626c3920SAl Viro goto bad_value; 3413e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3414626c3920SAl Viro break; 3415626c3920SAl Viro case Opt_nr_inodes: 3416626c3920SAl Viro ctx->inodes = memparse(param->string, &rest); 3417e04dc423SAl Viro if (*rest) 3418626c3920SAl Viro goto bad_value; 3419e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_INODES; 3420626c3920SAl Viro break; 3421626c3920SAl Viro case Opt_mode: 3422626c3920SAl Viro ctx->mode = result.uint_32 & 07777; 3423626c3920SAl Viro break; 3424626c3920SAl Viro case Opt_uid: 3425626c3920SAl Viro ctx->uid = make_kuid(current_user_ns(), result.uint_32); 3426e04dc423SAl Viro if (!uid_valid(ctx->uid)) 3427626c3920SAl Viro goto bad_value; 3428626c3920SAl Viro break; 3429626c3920SAl Viro case Opt_gid: 3430626c3920SAl Viro ctx->gid = make_kgid(current_user_ns(), result.uint_32); 3431e04dc423SAl Viro if (!gid_valid(ctx->gid)) 3432626c3920SAl Viro goto bad_value; 3433626c3920SAl Viro break; 3434626c3920SAl Viro case Opt_huge: 3435626c3920SAl Viro ctx->huge = result.uint_32; 3436626c3920SAl Viro if (ctx->huge != SHMEM_HUGE_NEVER && 3437396bcc52SMatthew Wilcox (Oracle) !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 3438626c3920SAl Viro has_transparent_hugepage())) 3439626c3920SAl Viro goto unsupported_parameter; 3440e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_HUGE; 3441626c3920SAl Viro break; 3442626c3920SAl Viro case Opt_mpol: 3443626c3920SAl Viro if (IS_ENABLED(CONFIG_NUMA)) { 3444e04dc423SAl Viro mpol_put(ctx->mpol); 3445e04dc423SAl Viro ctx->mpol = NULL; 3446626c3920SAl Viro if (mpol_parse_str(param->string, &ctx->mpol)) 3447626c3920SAl Viro goto bad_value; 3448626c3920SAl Viro break; 3449626c3920SAl Viro } 3450626c3920SAl Viro goto unsupported_parameter; 3451ea3271f7SChris Down case Opt_inode32: 3452ea3271f7SChris Down ctx->full_inums = false; 3453ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3454ea3271f7SChris Down break; 3455ea3271f7SChris Down case Opt_inode64: 3456ea3271f7SChris Down if (sizeof(ino_t) < 8) { 3457ea3271f7SChris Down return invalfc(fc, 3458ea3271f7SChris Down "Cannot use inode64 with <64bit inums in kernel\n"); 3459ea3271f7SChris Down } 3460ea3271f7SChris Down ctx->full_inums = true; 3461ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3462ea3271f7SChris Down break; 3463e04dc423SAl Viro } 3464e04dc423SAl Viro return 0; 3465e04dc423SAl Viro 3466626c3920SAl Viro unsupported_parameter: 3467f35aa2bcSAl Viro return invalfc(fc, "Unsupported parameter '%s'", param->key); 3468626c3920SAl Viro bad_value: 3469f35aa2bcSAl Viro return invalfc(fc, "Bad value for '%s'", param->key); 3470e04dc423SAl Viro } 3471e04dc423SAl Viro 3472f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data) 3473e04dc423SAl Viro { 3474f3235626SDavid Howells char *options = data; 3475f3235626SDavid Howells 347633f37c64SAl Viro if (options) { 347733f37c64SAl Viro int err = security_sb_eat_lsm_opts(options, &fc->security); 347833f37c64SAl Viro if (err) 347933f37c64SAl Viro return err; 348033f37c64SAl Viro } 348133f37c64SAl Viro 3482b00dc3adSHugh Dickins while (options != NULL) { 3483626c3920SAl Viro char *this_char = options; 3484b00dc3adSHugh Dickins for (;;) { 3485b00dc3adSHugh Dickins /* 3486b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3487b00dc3adSHugh Dickins * mount options form a comma-separated list, 3488b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3489b00dc3adSHugh Dickins */ 3490b00dc3adSHugh Dickins options = strchr(options, ','); 3491b00dc3adSHugh Dickins if (options == NULL) 3492b00dc3adSHugh Dickins break; 3493b00dc3adSHugh Dickins options++; 3494b00dc3adSHugh Dickins if (!isdigit(*options)) { 3495b00dc3adSHugh Dickins options[-1] = '\0'; 3496b00dc3adSHugh Dickins break; 3497b00dc3adSHugh Dickins } 3498b00dc3adSHugh Dickins } 3499626c3920SAl Viro if (*this_char) { 3500626c3920SAl Viro char *value = strchr(this_char, '='); 3501f3235626SDavid Howells size_t len = 0; 3502626c3920SAl Viro int err; 3503626c3920SAl Viro 3504626c3920SAl Viro if (value) { 3505626c3920SAl Viro *value++ = '\0'; 3506f3235626SDavid Howells len = strlen(value); 35071da177e4SLinus Torvalds } 3508f3235626SDavid Howells err = vfs_parse_fs_string(fc, this_char, value, len); 3509f3235626SDavid Howells if (err < 0) 3510f3235626SDavid Howells return err; 35111da177e4SLinus Torvalds } 3512626c3920SAl Viro } 35131da177e4SLinus Torvalds return 0; 35141da177e4SLinus Torvalds } 35151da177e4SLinus Torvalds 3516f3235626SDavid Howells /* 3517f3235626SDavid Howells * Reconfigure a shmem filesystem. 3518f3235626SDavid Howells * 3519f3235626SDavid Howells * Note that we disallow change from limited->unlimited blocks/inodes while any 3520f3235626SDavid Howells * are in use; but we must separately disallow unlimited->limited, because in 3521f3235626SDavid Howells * that case we have no record of how much is already in use. 3522f3235626SDavid Howells */ 3523f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc) 35241da177e4SLinus Torvalds { 3525f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3526f3235626SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 35270edd73b3SHugh Dickins unsigned long inodes; 3528bf11b9a8SSebastian Andrzej Siewior struct mempolicy *mpol = NULL; 3529f3235626SDavid Howells const char *err; 35300edd73b3SHugh Dickins 3531bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 35320edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 3533f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 3534f3235626SDavid Howells if (!sbinfo->max_blocks) { 3535f3235626SDavid Howells err = "Cannot retroactively limit size"; 35360edd73b3SHugh Dickins goto out; 35370b5071ddSAl Viro } 3538f3235626SDavid Howells if (percpu_counter_compare(&sbinfo->used_blocks, 3539f3235626SDavid Howells ctx->blocks) > 0) { 3540f3235626SDavid Howells err = "Too small a size for current use"; 35410b5071ddSAl Viro goto out; 3542f3235626SDavid Howells } 3543f3235626SDavid Howells } 3544f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 3545f3235626SDavid Howells if (!sbinfo->max_inodes) { 3546f3235626SDavid Howells err = "Cannot retroactively limit inodes"; 35470b5071ddSAl Viro goto out; 35480b5071ddSAl Viro } 3549f3235626SDavid Howells if (ctx->inodes < inodes) { 3550f3235626SDavid Howells err = "Too few inodes for current use"; 3551f3235626SDavid Howells goto out; 3552f3235626SDavid Howells } 3553f3235626SDavid Howells } 35540edd73b3SHugh Dickins 3555ea3271f7SChris Down if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 3556ea3271f7SChris Down sbinfo->next_ino > UINT_MAX) { 3557ea3271f7SChris Down err = "Current inum too high to switch to 32-bit inums"; 3558ea3271f7SChris Down goto out; 3559ea3271f7SChris Down } 3560ea3271f7SChris Down 3561f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_HUGE) 3562f3235626SDavid Howells sbinfo->huge = ctx->huge; 3563ea3271f7SChris Down if (ctx->seen & SHMEM_SEEN_INUMS) 3564ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 3565f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_BLOCKS) 3566f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3567f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_INODES) { 3568f3235626SDavid Howells sbinfo->max_inodes = ctx->inodes; 3569f3235626SDavid Howells sbinfo->free_inodes = ctx->inodes - inodes; 35700b5071ddSAl Viro } 357171fe804bSLee Schermerhorn 35725f00110fSGreg Thelen /* 35735f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 35745f00110fSGreg Thelen */ 3575f3235626SDavid Howells if (ctx->mpol) { 3576bf11b9a8SSebastian Andrzej Siewior mpol = sbinfo->mpol; 3577f3235626SDavid Howells sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 3578f3235626SDavid Howells ctx->mpol = NULL; 35795f00110fSGreg Thelen } 3580bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3581bf11b9a8SSebastian Andrzej Siewior mpol_put(mpol); 3582f3235626SDavid Howells return 0; 35830edd73b3SHugh Dickins out: 3584bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3585f35aa2bcSAl Viro return invalfc(fc, "%s", err); 35861da177e4SLinus Torvalds } 3587680d794bSakpm@linux-foundation.org 358834c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3589680d794bSakpm@linux-foundation.org { 359034c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3591680d794bSakpm@linux-foundation.org 3592680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3593680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 359409cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3595680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3596680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 35970825a6f9SJoe Perches if (sbinfo->mode != (0777 | S_ISVTX)) 359809208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 35998751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 36008751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 36018751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 36028751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 36038751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 36048751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3605ea3271f7SChris Down 3606ea3271f7SChris Down /* 3607ea3271f7SChris Down * Showing inode{64,32} might be useful even if it's the system default, 3608ea3271f7SChris Down * since then people don't have to resort to checking both here and 3609ea3271f7SChris Down * /proc/config.gz to confirm 64-bit inums were successfully applied 3610ea3271f7SChris Down * (which may not even exist if IKCONFIG_PROC isn't enabled). 3611ea3271f7SChris Down * 3612ea3271f7SChris Down * We hide it when inode64 isn't the default and we are using 32-bit 3613ea3271f7SChris Down * inodes, since that probably just means the feature isn't even under 3614ea3271f7SChris Down * consideration. 3615ea3271f7SChris Down * 3616ea3271f7SChris Down * As such: 3617ea3271f7SChris Down * 3618ea3271f7SChris Down * +-----------------+-----------------+ 3619ea3271f7SChris Down * | TMPFS_INODE64=y | TMPFS_INODE64=n | 3620ea3271f7SChris Down * +------------------+-----------------+-----------------+ 3621ea3271f7SChris Down * | full_inums=true | show | show | 3622ea3271f7SChris Down * | full_inums=false | show | hide | 3623ea3271f7SChris Down * +------------------+-----------------+-----------------+ 3624ea3271f7SChris Down * 3625ea3271f7SChris Down */ 3626ea3271f7SChris Down if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 3627ea3271f7SChris Down seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 3628396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 36295a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 36305a6e75f8SKirill A. Shutemov if (sbinfo->huge) 36315a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 36325a6e75f8SKirill A. Shutemov #endif 363371fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 3634680d794bSakpm@linux-foundation.org return 0; 3635680d794bSakpm@linux-foundation.org } 36369183df25SDavid Herrmann 3637680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 36381da177e4SLinus Torvalds 36391da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 36401da177e4SLinus Torvalds { 3641602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3642602586a8SHugh Dickins 3643e809d5f0SChris Down free_percpu(sbinfo->ino_batch); 3644602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 364549cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3646602586a8SHugh Dickins kfree(sbinfo); 36471da177e4SLinus Torvalds sb->s_fs_info = NULL; 36481da177e4SLinus Torvalds } 36491da177e4SLinus Torvalds 3650f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 36511da177e4SLinus Torvalds { 3652f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 36531da177e4SLinus Torvalds struct inode *inode; 36540edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3655680d794bSakpm@linux-foundation.org 3656680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3657425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3658680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3659680d794bSakpm@linux-foundation.org if (!sbinfo) 3660680d794bSakpm@linux-foundation.org return -ENOMEM; 3661680d794bSakpm@linux-foundation.org 3662680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 36631da177e4SLinus Torvalds 36640edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 36651da177e4SLinus Torvalds /* 36661da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 36671da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 36681da177e4SLinus Torvalds * but the internal instance is left unlimited. 36691da177e4SLinus Torvalds */ 36701751e8a6SLinus Torvalds if (!(sb->s_flags & SB_KERNMOUNT)) { 3671f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 3672f3235626SDavid Howells ctx->blocks = shmem_default_max_blocks(); 3673f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_INODES)) 3674f3235626SDavid Howells ctx->inodes = shmem_default_max_inodes(); 3675ea3271f7SChris Down if (!(ctx->seen & SHMEM_SEEN_INUMS)) 3676ea3271f7SChris Down ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 3677ca4e0519SAl Viro } else { 36781751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 36791da177e4SLinus Torvalds } 368091828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 36811751e8a6SLinus Torvalds sb->s_flags |= SB_NOSEC; 36820edd73b3SHugh Dickins #else 36831751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 36840edd73b3SHugh Dickins #endif 3685f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3686f3235626SDavid Howells sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; 3687e809d5f0SChris Down if (sb->s_flags & SB_KERNMOUNT) { 3688e809d5f0SChris Down sbinfo->ino_batch = alloc_percpu(ino_t); 3689e809d5f0SChris Down if (!sbinfo->ino_batch) 3690e809d5f0SChris Down goto failed; 3691e809d5f0SChris Down } 3692f3235626SDavid Howells sbinfo->uid = ctx->uid; 3693f3235626SDavid Howells sbinfo->gid = ctx->gid; 3694ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 3695f3235626SDavid Howells sbinfo->mode = ctx->mode; 3696f3235626SDavid Howells sbinfo->huge = ctx->huge; 3697f3235626SDavid Howells sbinfo->mpol = ctx->mpol; 3698f3235626SDavid Howells ctx->mpol = NULL; 36991da177e4SLinus Torvalds 3700bf11b9a8SSebastian Andrzej Siewior raw_spin_lock_init(&sbinfo->stat_lock); 3701908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3702602586a8SHugh Dickins goto failed; 3703779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3704779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 37051da177e4SLinus Torvalds 3706285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 370709cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 370809cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 37091da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 37101da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3711cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3712b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 371339f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3714b09e0fa4SEric Paris #endif 3715b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 37161751e8a6SLinus Torvalds sb->s_flags |= SB_POSIXACL; 371739f0247dSAndreas Gruenbacher #endif 37182b4db796SAmir Goldstein uuid_gen(&sb->s_uuid); 37190edd73b3SHugh Dickins 3720454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 37211da177e4SLinus Torvalds if (!inode) 37221da177e4SLinus Torvalds goto failed; 3723680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3724680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3725318ceed0SAl Viro sb->s_root = d_make_root(inode); 3726318ceed0SAl Viro if (!sb->s_root) 372748fde701SAl Viro goto failed; 37281da177e4SLinus Torvalds return 0; 37291da177e4SLinus Torvalds 37301da177e4SLinus Torvalds failed: 37311da177e4SLinus Torvalds shmem_put_super(sb); 3732f2b346e4SMiaohe Lin return -ENOMEM; 37331da177e4SLinus Torvalds } 37341da177e4SLinus Torvalds 3735f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc) 3736f3235626SDavid Howells { 3737f3235626SDavid Howells return get_tree_nodev(fc, shmem_fill_super); 3738f3235626SDavid Howells } 3739f3235626SDavid Howells 3740f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc) 3741f3235626SDavid Howells { 3742f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3743f3235626SDavid Howells 3744f3235626SDavid Howells if (ctx) { 3745f3235626SDavid Howells mpol_put(ctx->mpol); 3746f3235626SDavid Howells kfree(ctx); 3747f3235626SDavid Howells } 3748f3235626SDavid Howells } 3749f3235626SDavid Howells 3750f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = { 3751f3235626SDavid Howells .free = shmem_free_fc, 3752f3235626SDavid Howells .get_tree = shmem_get_tree, 3753f3235626SDavid Howells #ifdef CONFIG_TMPFS 3754f3235626SDavid Howells .parse_monolithic = shmem_parse_options, 3755f3235626SDavid Howells .parse_param = shmem_parse_one, 3756f3235626SDavid Howells .reconfigure = shmem_reconfigure, 3757f3235626SDavid Howells #endif 3758f3235626SDavid Howells }; 3759f3235626SDavid Howells 3760fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 37611da177e4SLinus Torvalds 37621da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 37631da177e4SLinus Torvalds { 376441ffe5d5SHugh Dickins struct shmem_inode_info *info; 376541ffe5d5SHugh Dickins info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 376641ffe5d5SHugh Dickins if (!info) 37671da177e4SLinus Torvalds return NULL; 376841ffe5d5SHugh Dickins return &info->vfs_inode; 37691da177e4SLinus Torvalds } 37701da177e4SLinus Torvalds 377174b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode) 3772fa0d7e3dSNick Piggin { 377384e710daSAl Viro if (S_ISLNK(inode->i_mode)) 37743ed47db3SAl Viro kfree(inode->i_link); 3775fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3776fa0d7e3dSNick Piggin } 3777fa0d7e3dSNick Piggin 37781da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 37791da177e4SLinus Torvalds { 378009208d15SAl Viro if (S_ISREG(inode->i_mode)) 37811da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 37821da177e4SLinus Torvalds } 37831da177e4SLinus Torvalds 378441ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 37851da177e4SLinus Torvalds { 378641ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 378741ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 37881da177e4SLinus Torvalds } 37891da177e4SLinus Torvalds 37909a8ec03eSweiping zhang static void shmem_init_inodecache(void) 37911da177e4SLinus Torvalds { 37921da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 37931da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 37945d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 37951da177e4SLinus Torvalds } 37961da177e4SLinus Torvalds 379741ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 37981da177e4SLinus Torvalds { 37991a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 38001da177e4SLinus Torvalds } 38011da177e4SLinus Torvalds 380230e6a51dSHui Su const struct address_space_operations shmem_aops = { 38031da177e4SLinus Torvalds .writepage = shmem_writepage, 380476719325SKen Chen .set_page_dirty = __set_page_dirty_no_writeback, 38051da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3806800d15a5SNick Piggin .write_begin = shmem_write_begin, 3807800d15a5SNick Piggin .write_end = shmem_write_end, 38081da177e4SLinus Torvalds #endif 38091c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 3810304dbdb7SLee Schermerhorn .migratepage = migrate_page, 38111c93923cSAndrew Morton #endif 3812aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 38131da177e4SLinus Torvalds }; 381430e6a51dSHui Su EXPORT_SYMBOL(shmem_aops); 38151da177e4SLinus Torvalds 381615ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 38171da177e4SLinus Torvalds .mmap = shmem_mmap, 3818c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 38191da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3820220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 38212ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 38228174202bSAl Viro .write_iter = generic_file_write_iter, 38231b061d92SChristoph Hellwig .fsync = noop_fsync, 382482c156f8SAl Viro .splice_read = generic_file_splice_read, 3825f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 382683e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 38271da177e4SLinus Torvalds #endif 38281da177e4SLinus Torvalds }; 38291da177e4SLinus Torvalds 383092e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 383144a30220SYu Zhao .getattr = shmem_getattr, 383294c1e62dSHugh Dickins .setattr = shmem_setattr, 3833b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3834b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3835feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3836b09e0fa4SEric Paris #endif 38371da177e4SLinus Torvalds }; 38381da177e4SLinus Torvalds 383992e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 38401da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 38411da177e4SLinus Torvalds .create = shmem_create, 38421da177e4SLinus Torvalds .lookup = simple_lookup, 38431da177e4SLinus Torvalds .link = shmem_link, 38441da177e4SLinus Torvalds .unlink = shmem_unlink, 38451da177e4SLinus Torvalds .symlink = shmem_symlink, 38461da177e4SLinus Torvalds .mkdir = shmem_mkdir, 38471da177e4SLinus Torvalds .rmdir = shmem_rmdir, 38481da177e4SLinus Torvalds .mknod = shmem_mknod, 38492773bf00SMiklos Szeredi .rename = shmem_rename2, 385060545d0dSAl Viro .tmpfile = shmem_tmpfile, 38511da177e4SLinus Torvalds #endif 3852b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3853b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3854b09e0fa4SEric Paris #endif 385539f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 385694c1e62dSHugh Dickins .setattr = shmem_setattr, 3857feda821eSChristoph Hellwig .set_acl = simple_set_acl, 385839f0247dSAndreas Gruenbacher #endif 385939f0247dSAndreas Gruenbacher }; 386039f0247dSAndreas Gruenbacher 386192e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 3862b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3863b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3864b09e0fa4SEric Paris #endif 386539f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 386694c1e62dSHugh Dickins .setattr = shmem_setattr, 3867feda821eSChristoph Hellwig .set_acl = simple_set_acl, 386839f0247dSAndreas Gruenbacher #endif 38691da177e4SLinus Torvalds }; 38701da177e4SLinus Torvalds 3871759b9775SHugh Dickins static const struct super_operations shmem_ops = { 38721da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 387374b1da56SAl Viro .free_inode = shmem_free_in_core_inode, 38741da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 38751da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 38761da177e4SLinus Torvalds .statfs = shmem_statfs, 3877680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 38781da177e4SLinus Torvalds #endif 38791f895f75SAl Viro .evict_inode = shmem_evict_inode, 38801da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 38811da177e4SLinus Torvalds .put_super = shmem_put_super, 3882396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3883779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 3884779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 3885779750d2SKirill A. Shutemov #endif 38861da177e4SLinus Torvalds }; 38871da177e4SLinus Torvalds 3888f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 388954cb8821SNick Piggin .fault = shmem_fault, 3890d7c17551SNing Qu .map_pages = filemap_map_pages, 38911da177e4SLinus Torvalds #ifdef CONFIG_NUMA 38921da177e4SLinus Torvalds .set_policy = shmem_set_policy, 38931da177e4SLinus Torvalds .get_policy = shmem_get_policy, 38941da177e4SLinus Torvalds #endif 38951da177e4SLinus Torvalds }; 38961da177e4SLinus Torvalds 3897f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc) 38981da177e4SLinus Torvalds { 3899f3235626SDavid Howells struct shmem_options *ctx; 3900f3235626SDavid Howells 3901f3235626SDavid Howells ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 3902f3235626SDavid Howells if (!ctx) 3903f3235626SDavid Howells return -ENOMEM; 3904f3235626SDavid Howells 3905f3235626SDavid Howells ctx->mode = 0777 | S_ISVTX; 3906f3235626SDavid Howells ctx->uid = current_fsuid(); 3907f3235626SDavid Howells ctx->gid = current_fsgid(); 3908f3235626SDavid Howells 3909f3235626SDavid Howells fc->fs_private = ctx; 3910f3235626SDavid Howells fc->ops = &shmem_fs_context_ops; 3911f3235626SDavid Howells return 0; 39121da177e4SLinus Torvalds } 39131da177e4SLinus Torvalds 391441ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 39151da177e4SLinus Torvalds .owner = THIS_MODULE, 39161da177e4SLinus Torvalds .name = "tmpfs", 3917f3235626SDavid Howells .init_fs_context = shmem_init_fs_context, 3918f3235626SDavid Howells #ifdef CONFIG_TMPFS 3919d7167b14SAl Viro .parameters = shmem_fs_parameters, 3920f3235626SDavid Howells #endif 39211da177e4SLinus Torvalds .kill_sb = kill_litter_super, 392201c70267SMatthew Wilcox (Oracle) .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, 39231da177e4SLinus Torvalds }; 39241da177e4SLinus Torvalds 392541ffe5d5SHugh Dickins int __init shmem_init(void) 39261da177e4SLinus Torvalds { 39271da177e4SLinus Torvalds int error; 39281da177e4SLinus Torvalds 39299a8ec03eSweiping zhang shmem_init_inodecache(); 39301da177e4SLinus Torvalds 393141ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 39321da177e4SLinus Torvalds if (error) { 39331170532bSJoe Perches pr_err("Could not register tmpfs\n"); 39341da177e4SLinus Torvalds goto out2; 39351da177e4SLinus Torvalds } 393695dc112aSGreg Kroah-Hartman 3937ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 39381da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 39391da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 39401170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 39411da177e4SLinus Torvalds goto out1; 39421da177e4SLinus Torvalds } 39435a6e75f8SKirill A. Shutemov 3944396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3945435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 39465a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 39475a6e75f8SKirill A. Shutemov else 39485a6e75f8SKirill A. Shutemov shmem_huge = 0; /* just in case it was patched */ 39495a6e75f8SKirill A. Shutemov #endif 39501da177e4SLinus Torvalds return 0; 39511da177e4SLinus Torvalds 39521da177e4SLinus Torvalds out1: 395341ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 39541da177e4SLinus Torvalds out2: 395541ffe5d5SHugh Dickins shmem_destroy_inodecache(); 39561da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 39571da177e4SLinus Torvalds return error; 39581da177e4SLinus Torvalds } 3959853ac43aSMatt Mackall 3960396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 39615a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 39625a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 39635a6e75f8SKirill A. Shutemov { 396426083eb6SColin Ian King static const int values[] = { 39655a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 39665a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 39675a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 39685a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 39695a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 39705a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 39715a6e75f8SKirill A. Shutemov }; 397279d4d38aSJoe Perches int len = 0; 397379d4d38aSJoe Perches int i; 39745a6e75f8SKirill A. Shutemov 397579d4d38aSJoe Perches for (i = 0; i < ARRAY_SIZE(values); i++) { 397679d4d38aSJoe Perches len += sysfs_emit_at(buf, len, 397779d4d38aSJoe Perches shmem_huge == values[i] ? "%s[%s]" : "%s%s", 397879d4d38aSJoe Perches i ? " " : "", 39795a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 39805a6e75f8SKirill A. Shutemov } 398179d4d38aSJoe Perches 398279d4d38aSJoe Perches len += sysfs_emit_at(buf, len, "\n"); 398379d4d38aSJoe Perches 398479d4d38aSJoe Perches return len; 39855a6e75f8SKirill A. Shutemov } 39865a6e75f8SKirill A. Shutemov 39875a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 39885a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 39895a6e75f8SKirill A. Shutemov { 39905a6e75f8SKirill A. Shutemov char tmp[16]; 39915a6e75f8SKirill A. Shutemov int huge; 39925a6e75f8SKirill A. Shutemov 39935a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 39945a6e75f8SKirill A. Shutemov return -EINVAL; 39955a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 39965a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 39975a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 39985a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 39995a6e75f8SKirill A. Shutemov 40005a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 40015a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 40025a6e75f8SKirill A. Shutemov return -EINVAL; 40035a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 40045a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 40055a6e75f8SKirill A. Shutemov return -EINVAL; 40065a6e75f8SKirill A. Shutemov 40075a6e75f8SKirill A. Shutemov shmem_huge = huge; 4008435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY) 40095a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 40105a6e75f8SKirill A. Shutemov return count; 40115a6e75f8SKirill A. Shutemov } 40125a6e75f8SKirill A. Shutemov 40135a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr = 40145a6e75f8SKirill A. Shutemov __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 4015396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 4016f3f0e1d2SKirill A. Shutemov 4017853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 4018853ac43aSMatt Mackall 4019853ac43aSMatt Mackall /* 4020853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4021853ac43aSMatt Mackall * 4022853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 4023853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 4024853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 4025853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 4026853ac43aSMatt Mackall */ 4027853ac43aSMatt Mackall 402841ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 4029853ac43aSMatt Mackall .name = "tmpfs", 4030f3235626SDavid Howells .init_fs_context = ramfs_init_fs_context, 4031d7167b14SAl Viro .parameters = ramfs_fs_parameters, 4032853ac43aSMatt Mackall .kill_sb = kill_litter_super, 40332b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 4034853ac43aSMatt Mackall }; 4035853ac43aSMatt Mackall 403641ffe5d5SHugh Dickins int __init shmem_init(void) 4037853ac43aSMatt Mackall { 403841ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4039853ac43aSMatt Mackall 404041ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 4041853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 4042853ac43aSMatt Mackall 4043853ac43aSMatt Mackall return 0; 4044853ac43aSMatt Mackall } 4045853ac43aSMatt Mackall 4046b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap, 4047b56a2d8aSVineeth Remanan Pillai unsigned long *fs_pages_to_unuse) 4048853ac43aSMatt Mackall { 4049853ac43aSMatt Mackall return 0; 4050853ac43aSMatt Mackall } 4051853ac43aSMatt Mackall 4052d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 40533f96b79aSHugh Dickins { 40543f96b79aSHugh Dickins return 0; 40553f96b79aSHugh Dickins } 40563f96b79aSHugh Dickins 405724513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 405824513264SHugh Dickins { 405924513264SHugh Dickins } 406024513264SHugh Dickins 4061c01d5b30SHugh Dickins #ifdef CONFIG_MMU 4062c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 4063c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 4064c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 4065c01d5b30SHugh Dickins { 4066c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4067c01d5b30SHugh Dickins } 4068c01d5b30SHugh Dickins #endif 4069c01d5b30SHugh Dickins 407041ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 407194c1e62dSHugh Dickins { 407241ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 407394c1e62dSHugh Dickins } 407494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 407594c1e62dSHugh Dickins 4076853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 40770b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 4078454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 40790b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 40800b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 4081853ac43aSMatt Mackall 4082853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 4083853ac43aSMatt Mackall 4084853ac43aSMatt Mackall /* common code */ 40851da177e4SLinus Torvalds 4086703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 4087c7277090SEric Paris unsigned long flags, unsigned int i_flags) 40881da177e4SLinus Torvalds { 40891da177e4SLinus Torvalds struct inode *inode; 409093dec2daSAl Viro struct file *res; 40911da177e4SLinus Torvalds 4092703321b6SMatthew Auld if (IS_ERR(mnt)) 4093703321b6SMatthew Auld return ERR_CAST(mnt); 40941da177e4SLinus Torvalds 4095285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 40961da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 40971da177e4SLinus Torvalds 40981da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 40991da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 41001da177e4SLinus Torvalds 410193dec2daSAl Viro inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, 410293dec2daSAl Viro flags); 4103dac2d1f6SAl Viro if (unlikely(!inode)) { 4104dac2d1f6SAl Viro shmem_unacct_size(flags, size); 4105dac2d1f6SAl Viro return ERR_PTR(-ENOSPC); 4106dac2d1f6SAl Viro } 4107c7277090SEric Paris inode->i_flags |= i_flags; 41081da177e4SLinus Torvalds inode->i_size = size; 41096d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 411026567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 411193dec2daSAl Viro if (!IS_ERR(res)) 411293dec2daSAl Viro res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 41134b42af81SAl Viro &shmem_file_operations); 41146b4d0b27SAl Viro if (IS_ERR(res)) 411593dec2daSAl Viro iput(inode); 41166b4d0b27SAl Viro return res; 41171da177e4SLinus Torvalds } 4118c7277090SEric Paris 4119c7277090SEric Paris /** 4120c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4121c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4122c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4123e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4124e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4125c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4126c7277090SEric Paris * @size: size to be set for the file 4127c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4128c7277090SEric Paris */ 4129c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4130c7277090SEric Paris { 4131703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 4132c7277090SEric Paris } 4133c7277090SEric Paris 4134c7277090SEric Paris /** 4135c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4136c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4137c7277090SEric Paris * @size: size to be set for the file 4138c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4139c7277090SEric Paris */ 4140c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4141c7277090SEric Paris { 4142703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, 0); 4143c7277090SEric Paris } 4144395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 41451da177e4SLinus Torvalds 414646711810SRandy Dunlap /** 4147703321b6SMatthew Auld * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 4148703321b6SMatthew Auld * @mnt: the tmpfs mount where the file will be created 4149703321b6SMatthew Auld * @name: name for dentry (to be seen in /proc/<pid>/maps 4150703321b6SMatthew Auld * @size: size to be set for the file 4151703321b6SMatthew Auld * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4152703321b6SMatthew Auld */ 4153703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 4154703321b6SMatthew Auld loff_t size, unsigned long flags) 4155703321b6SMatthew Auld { 4156703321b6SMatthew Auld return __shmem_file_setup(mnt, name, size, flags, 0); 4157703321b6SMatthew Auld } 4158703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4159703321b6SMatthew Auld 4160703321b6SMatthew Auld /** 41611da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 416245e55300SPeter Collingbourne * @vma: the vma to be mmapped is prepared by do_mmap 41631da177e4SLinus Torvalds */ 41641da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 41651da177e4SLinus Torvalds { 41661da177e4SLinus Torvalds struct file *file; 41671da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 41681da177e4SLinus Torvalds 416966fc1303SHugh Dickins /* 4170c1e8d7c6SMichel Lespinasse * Cloning a new file under mmap_lock leads to a lock ordering conflict 417166fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 417266fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 417366fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 417466fc1303SHugh Dickins */ 4175703321b6SMatthew Auld file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 41761da177e4SLinus Torvalds if (IS_ERR(file)) 41771da177e4SLinus Torvalds return PTR_ERR(file); 41781da177e4SLinus Torvalds 41791da177e4SLinus Torvalds if (vma->vm_file) 41801da177e4SLinus Torvalds fput(vma->vm_file); 41811da177e4SLinus Torvalds vma->vm_file = file; 41821da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 4183f3f0e1d2SKirill A. Shutemov 4184396bcc52SMatthew Wilcox (Oracle) if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4185f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 4186f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 4187f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 4188f3f0e1d2SKirill A. Shutemov } 4189f3f0e1d2SKirill A. Shutemov 41901da177e4SLinus Torvalds return 0; 41911da177e4SLinus Torvalds } 4192d9d90e5eSHugh Dickins 4193d9d90e5eSHugh Dickins /** 4194d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4195d9d90e5eSHugh Dickins * @mapping: the page's address_space 4196d9d90e5eSHugh Dickins * @index: the page index 4197d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4198d9d90e5eSHugh Dickins * 4199d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4200d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 4201d9d90e5eSHugh Dickins * But read_cache_page_gfp() uses the ->readpage() method: which does not 4202d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4203d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4204d9d90e5eSHugh Dickins * 420568da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 420668da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4207d9d90e5eSHugh Dickins */ 4208d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4209d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4210d9d90e5eSHugh Dickins { 421168da9f05SHugh Dickins #ifdef CONFIG_SHMEM 421268da9f05SHugh Dickins struct inode *inode = mapping->host; 42139276aad6SHugh Dickins struct page *page; 421468da9f05SHugh Dickins int error; 421568da9f05SHugh Dickins 421630e6a51dSHui Su BUG_ON(!shmem_mapping(mapping)); 42179e18eb29SAndres Lagar-Cavilla error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, 4218cfda0526SMike Rapoport gfp, NULL, NULL, NULL); 421968da9f05SHugh Dickins if (error) 422068da9f05SHugh Dickins page = ERR_PTR(error); 422168da9f05SHugh Dickins else 422268da9f05SHugh Dickins unlock_page(page); 422368da9f05SHugh Dickins return page; 422468da9f05SHugh Dickins #else 422568da9f05SHugh Dickins /* 422668da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 422768da9f05SHugh Dickins */ 4228d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 422968da9f05SHugh Dickins #endif 4230d9d90e5eSHugh Dickins } 4231d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4232