11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31e408e695STheodore Ts'o #include <linux/fileattr.h> 32853ac43aSMatt Mackall #include <linux/mm.h> 3346c9a946SArnd Bergmann #include <linux/random.h> 34174cd4b1SIngo Molnar #include <linux/sched/signal.h> 35b95f1b31SPaul Gortmaker #include <linux/export.h> 36853ac43aSMatt Mackall #include <linux/swap.h> 37e2e40f2cSChristoph Hellwig #include <linux/uio.h> 38749df87bSMike Kravetz #include <linux/hugetlb.h> 39626c3920SAl Viro #include <linux/fs_parser.h> 4086a2f3f2SMiaohe Lin #include <linux/swapfile.h> 4136f05cabSJeff Layton #include <linux/iversion.h> 42014bb1deSNeilBrown #include "swap.h" 4395cc09d6SAndrea Arcangeli 44853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 45853ac43aSMatt Mackall 46853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 471da177e4SLinus Torvalds /* 481da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 491da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 501da177e4SLinus Torvalds * which makes it a completely usable filesystem. 511da177e4SLinus Torvalds */ 521da177e4SLinus Torvalds 5339f0247dSAndreas Gruenbacher #include <linux/xattr.h> 54a5694255SChristoph Hellwig #include <linux/exportfs.h> 551c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 56feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 571da177e4SLinus Torvalds #include <linux/mman.h> 581da177e4SLinus Torvalds #include <linux/string.h> 591da177e4SLinus Torvalds #include <linux/slab.h> 601da177e4SLinus Torvalds #include <linux/backing-dev.h> 611da177e4SLinus Torvalds #include <linux/shmem_fs.h> 621da177e4SLinus Torvalds #include <linux/writeback.h> 63bda97eabSHugh Dickins #include <linux/pagevec.h> 6441ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 6583e4fa9cSHugh Dickins #include <linux/falloc.h> 66708e3508SHugh Dickins #include <linux/splice.h> 671da177e4SLinus Torvalds #include <linux/security.h> 681da177e4SLinus Torvalds #include <linux/swapops.h> 691da177e4SLinus Torvalds #include <linux/mempolicy.h> 701da177e4SLinus Torvalds #include <linux/namei.h> 71b00dc3adSHugh Dickins #include <linux/ctype.h> 72304dbdb7SLee Schermerhorn #include <linux/migrate.h> 73c1f60a5aSChristoph Lameter #include <linux/highmem.h> 74680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 7592562927SMimi Zohar #include <linux/magic.h> 769183df25SDavid Herrmann #include <linux/syscalls.h> 7740e041a2SDavid Herrmann #include <linux/fcntl.h> 789183df25SDavid Herrmann #include <uapi/linux/memfd.h> 79cfda0526SMike Rapoport #include <linux/userfaultfd_k.h> 804c27fe4cSMike Rapoport #include <linux/rmap.h> 812b4db796SAmir Goldstein #include <linux/uuid.h> 82304dbdb7SLee Schermerhorn 837c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 841da177e4SLinus Torvalds 85dd56b046SMel Gorman #include "internal.h" 86dd56b046SMel Gorman 8709cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8809cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 911da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 921da177e4SLinus Torvalds 9369f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 9469f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 9569f07ec9SHugh Dickins 961aac1400SHugh Dickins /* 97f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 989608703eSJan Kara * inode->i_private (with i_rwsem making sure that it has only one user at 99f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 1001aac1400SHugh Dickins */ 1011aac1400SHugh Dickins struct shmem_falloc { 1028e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 1031aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 1041aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 1051aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 1061aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1071aac1400SHugh Dickins }; 1081aac1400SHugh Dickins 1090b5071ddSAl Viro struct shmem_options { 1100b5071ddSAl Viro unsigned long long blocks; 1110b5071ddSAl Viro unsigned long long inodes; 1120b5071ddSAl Viro struct mempolicy *mpol; 1130b5071ddSAl Viro kuid_t uid; 1140b5071ddSAl Viro kgid_t gid; 1150b5071ddSAl Viro umode_t mode; 116ea3271f7SChris Down bool full_inums; 1170b5071ddSAl Viro int huge; 1180b5071ddSAl Viro int seen; 1190b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1 1200b5071ddSAl Viro #define SHMEM_SEEN_INODES 2 1210b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4 122ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8 1230b5071ddSAl Viro }; 1240b5071ddSAl Viro 125b76db735SAndrew Morton #ifdef CONFIG_TMPFS 126680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 127680d794bSakpm@linux-foundation.org { 128ca79b0c2SArun KS return totalram_pages() / 2; 129680d794bSakpm@linux-foundation.org } 130680d794bSakpm@linux-foundation.org 131680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 132680d794bSakpm@linux-foundation.org { 133ca79b0c2SArun KS unsigned long nr_pages = totalram_pages(); 134ca79b0c2SArun KS 135ca79b0c2SArun KS return min(nr_pages - totalhigh_pages(), nr_pages / 2); 136680d794bSakpm@linux-foundation.org } 137b76db735SAndrew Morton #endif 138680d794bSakpm@linux-foundation.org 139da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 140da08e9b7SMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, 141c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 142c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type); 1431da177e4SLinus Torvalds 1441da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1451da177e4SLinus Torvalds { 1461da177e4SLinus Torvalds return sb->s_fs_info; 1471da177e4SLinus Torvalds } 1481da177e4SLinus Torvalds 1491da177e4SLinus Torvalds /* 1501da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1511da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1521da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1531da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1541da177e4SLinus Torvalds */ 1551da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1561da177e4SLinus Torvalds { 1570b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 158191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1591da177e4SLinus Torvalds } 1601da177e4SLinus Torvalds 1611da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1621da177e4SLinus Torvalds { 1630b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1641da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1651da177e4SLinus Torvalds } 1661da177e4SLinus Torvalds 16777142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 16877142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 16977142517SKonstantin Khlebnikov { 17077142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 17177142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 17277142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 17377142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 17477142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 17577142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 17677142517SKonstantin Khlebnikov } 17777142517SKonstantin Khlebnikov return 0; 17877142517SKonstantin Khlebnikov } 17977142517SKonstantin Khlebnikov 1801da177e4SLinus Torvalds /* 1811da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 18275edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 183923e2f0eSMatthew Wilcox (Oracle) * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1841da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1851da177e4SLinus Torvalds */ 186800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 1871da177e4SLinus Torvalds { 188800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 189800d8c63SKirill A. Shutemov return 0; 190800d8c63SKirill A. Shutemov 191800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 192800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 1931da177e4SLinus Torvalds } 1941da177e4SLinus Torvalds 1951da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 1961da177e4SLinus Torvalds { 1970b0a0806SHugh Dickins if (flags & VM_NORESERVE) 19809cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 1991da177e4SLinus Torvalds } 2001da177e4SLinus Torvalds 2010f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 2020f079694SMike Rapoport { 2030f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2040f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2050f079694SMike Rapoport 2060f079694SMike Rapoport if (shmem_acct_block(info->flags, pages)) 2070f079694SMike Rapoport return false; 2080f079694SMike Rapoport 2090f079694SMike Rapoport if (sbinfo->max_blocks) { 2100f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2110f079694SMike Rapoport sbinfo->max_blocks - pages) > 0) 2120f079694SMike Rapoport goto unacct; 2130f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages); 2140f079694SMike Rapoport } 2150f079694SMike Rapoport 2160f079694SMike Rapoport return true; 2170f079694SMike Rapoport 2180f079694SMike Rapoport unacct: 2190f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2200f079694SMike Rapoport return false; 2210f079694SMike Rapoport } 2220f079694SMike Rapoport 2230f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 2240f079694SMike Rapoport { 2250f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2260f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2270f079694SMike Rapoport 2280f079694SMike Rapoport if (sbinfo->max_blocks) 2290f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages); 2300f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2310f079694SMike Rapoport } 2320f079694SMike Rapoport 233759b9775SHugh Dickins static const struct super_operations shmem_ops; 23430e6a51dSHui Su const struct address_space_operations shmem_aops; 23515ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 23692e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 23792e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 23892e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 239f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 240d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops; 241779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 2421da177e4SLinus Torvalds 243d09e8ca6SPasha Tatashin bool vma_is_anon_shmem(struct vm_area_struct *vma) 244d09e8ca6SPasha Tatashin { 245d09e8ca6SPasha Tatashin return vma->vm_ops == &shmem_anon_vm_ops; 246d09e8ca6SPasha Tatashin } 247d09e8ca6SPasha Tatashin 248b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma) 249b0506e48SMike Rapoport { 250d09e8ca6SPasha Tatashin return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; 251b0506e48SMike Rapoport } 252b0506e48SMike Rapoport 2531da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 254cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 2551da177e4SLinus Torvalds 256e809d5f0SChris Down /* 257e809d5f0SChris Down * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 258e809d5f0SChris Down * produces a novel ino for the newly allocated inode. 259e809d5f0SChris Down * 260e809d5f0SChris Down * It may also be called when making a hard link to permit the space needed by 261e809d5f0SChris Down * each dentry. However, in that case, no new inode number is needed since that 262e809d5f0SChris Down * internally draws from another pool of inode numbers (currently global 263e809d5f0SChris Down * get_next_ino()). This case is indicated by passing NULL as inop. 264e809d5f0SChris Down */ 265e809d5f0SChris Down #define SHMEM_INO_BATCH 1024 266e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 2675b04c689SPavel Emelyanov { 2685b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 269e809d5f0SChris Down ino_t ino; 270e809d5f0SChris Down 271e809d5f0SChris Down if (!(sb->s_flags & SB_KERNMOUNT)) { 272bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 273bb3e96d6SByron Stanoszek if (sbinfo->max_inodes) { 2745b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 275bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 2765b04c689SPavel Emelyanov return -ENOSPC; 2775b04c689SPavel Emelyanov } 2785b04c689SPavel Emelyanov sbinfo->free_inodes--; 279bb3e96d6SByron Stanoszek } 280e809d5f0SChris Down if (inop) { 281e809d5f0SChris Down ino = sbinfo->next_ino++; 282e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 283e809d5f0SChris Down ino = sbinfo->next_ino++; 284ea3271f7SChris Down if (unlikely(!sbinfo->full_inums && 285ea3271f7SChris Down ino > UINT_MAX)) { 286e809d5f0SChris Down /* 287e809d5f0SChris Down * Emulate get_next_ino uint wraparound for 288e809d5f0SChris Down * compatibility 289e809d5f0SChris Down */ 290ea3271f7SChris Down if (IS_ENABLED(CONFIG_64BIT)) 291ea3271f7SChris Down pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 292ea3271f7SChris Down __func__, MINOR(sb->s_dev)); 293ea3271f7SChris Down sbinfo->next_ino = 1; 294ea3271f7SChris Down ino = sbinfo->next_ino++; 2955b04c689SPavel Emelyanov } 296e809d5f0SChris Down *inop = ino; 297e809d5f0SChris Down } 298bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 299e809d5f0SChris Down } else if (inop) { 300e809d5f0SChris Down /* 301e809d5f0SChris Down * __shmem_file_setup, one of our callers, is lock-free: it 302e809d5f0SChris Down * doesn't hold stat_lock in shmem_reserve_inode since 303e809d5f0SChris Down * max_inodes is always 0, and is called from potentially 304e809d5f0SChris Down * unknown contexts. As such, use a per-cpu batched allocator 305e809d5f0SChris Down * which doesn't require the per-sb stat_lock unless we are at 306e809d5f0SChris Down * the batch boundary. 307ea3271f7SChris Down * 308ea3271f7SChris Down * We don't need to worry about inode{32,64} since SB_KERNMOUNT 309ea3271f7SChris Down * shmem mounts are not exposed to userspace, so we don't need 310ea3271f7SChris Down * to worry about things like glibc compatibility. 311e809d5f0SChris Down */ 312e809d5f0SChris Down ino_t *next_ino; 313bf11b9a8SSebastian Andrzej Siewior 314e809d5f0SChris Down next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 315e809d5f0SChris Down ino = *next_ino; 316e809d5f0SChris Down if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 317bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 318e809d5f0SChris Down ino = sbinfo->next_ino; 319e809d5f0SChris Down sbinfo->next_ino += SHMEM_INO_BATCH; 320bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 321e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 322e809d5f0SChris Down ino++; 323e809d5f0SChris Down } 324e809d5f0SChris Down *inop = ino; 325e809d5f0SChris Down *next_ino = ++ino; 326e809d5f0SChris Down put_cpu(); 327e809d5f0SChris Down } 328e809d5f0SChris Down 3295b04c689SPavel Emelyanov return 0; 3305b04c689SPavel Emelyanov } 3315b04c689SPavel Emelyanov 3325b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 3335b04c689SPavel Emelyanov { 3345b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3355b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 336bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 3375b04c689SPavel Emelyanov sbinfo->free_inodes++; 338bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3395b04c689SPavel Emelyanov } 3405b04c689SPavel Emelyanov } 3415b04c689SPavel Emelyanov 34246711810SRandy Dunlap /** 34341ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 3441da177e4SLinus Torvalds * @inode: inode to recalc 3451da177e4SLinus Torvalds * 3461da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 3471da177e4SLinus Torvalds * undirtied hole pages behind our back. 3481da177e4SLinus Torvalds * 3491da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 3501da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 3511da177e4SLinus Torvalds * 3521da177e4SLinus Torvalds * It has to be called with the spinlock held. 3531da177e4SLinus Torvalds */ 3541da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 3551da177e4SLinus Torvalds { 3561da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 3571da177e4SLinus Torvalds long freed; 3581da177e4SLinus Torvalds 3591da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 3601da177e4SLinus Torvalds if (freed > 0) { 3611da177e4SLinus Torvalds info->alloced -= freed; 36254af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 3630f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed); 3641da177e4SLinus Torvalds } 3651da177e4SLinus Torvalds } 3661da177e4SLinus Torvalds 367800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 368800d8c63SKirill A. Shutemov { 369800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3704595ef88SKirill A. Shutemov unsigned long flags; 371800d8c63SKirill A. Shutemov 3720f079694SMike Rapoport if (!shmem_inode_acct_block(inode, pages)) 373800d8c63SKirill A. Shutemov return false; 374b1cc94abSMike Rapoport 375aaa52e34SHugh Dickins /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 376aaa52e34SHugh Dickins inode->i_mapping->nrpages += pages; 377aaa52e34SHugh Dickins 3784595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 379800d8c63SKirill A. Shutemov info->alloced += pages; 380800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 381800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3824595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 383800d8c63SKirill A. Shutemov 384800d8c63SKirill A. Shutemov return true; 385800d8c63SKirill A. Shutemov } 386800d8c63SKirill A. Shutemov 387800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 388800d8c63SKirill A. Shutemov { 389800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3904595ef88SKirill A. Shutemov unsigned long flags; 391800d8c63SKirill A. Shutemov 3926ffcd825SMatthew Wilcox (Oracle) /* nrpages adjustment done by __filemap_remove_folio() or caller */ 393aaa52e34SHugh Dickins 3944595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 395800d8c63SKirill A. Shutemov info->alloced -= pages; 396800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 397800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3984595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 399800d8c63SKirill A. Shutemov 4000f079694SMike Rapoport shmem_inode_unacct_blocks(inode, pages); 401800d8c63SKirill A. Shutemov } 402800d8c63SKirill A. Shutemov 4037a5d0fbbSHugh Dickins /* 40462f945b6SMatthew Wilcox * Replace item expected in xarray by a new item, while holding xa_lock. 4057a5d0fbbSHugh Dickins */ 40662f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping, 4077a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 4087a5d0fbbSHugh Dickins { 40962f945b6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 4106dbaf22cSJohannes Weiner void *item; 4117a5d0fbbSHugh Dickins 4127a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 4136dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 41462f945b6SMatthew Wilcox item = xas_load(&xas); 4157a5d0fbbSHugh Dickins if (item != expected) 4167a5d0fbbSHugh Dickins return -ENOENT; 41762f945b6SMatthew Wilcox xas_store(&xas, replacement); 4187a5d0fbbSHugh Dickins return 0; 4197a5d0fbbSHugh Dickins } 4207a5d0fbbSHugh Dickins 4217a5d0fbbSHugh Dickins /* 422d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 423d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 424d1899228SHugh Dickins * 425d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 426d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 427d1899228SHugh Dickins */ 428d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 429d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 430d1899228SHugh Dickins { 431a12831bfSMatthew Wilcox return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 432d1899228SHugh Dickins } 433d1899228SHugh Dickins 434d1899228SHugh Dickins /* 4355a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 4365a6e75f8SKirill A. Shutemov * 4375a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 4385a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 4395a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 4405a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 4415a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 4425a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 4435a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 4445a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 4455a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 4465a6e75f8SKirill A. Shutemov */ 4475a6e75f8SKirill A. Shutemov 4485a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 4495a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 4505a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 4515a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 4525a6e75f8SKirill A. Shutemov 4535a6e75f8SKirill A. Shutemov /* 4545a6e75f8SKirill A. Shutemov * Special values. 4555a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 4565a6e75f8SKirill A. Shutemov * 4575a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 4585a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 4595a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 4605a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 4615a6e75f8SKirill A. Shutemov * 4625a6e75f8SKirill A. Shutemov */ 4635a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 4645a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 4655a6e75f8SKirill A. Shutemov 466396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4675a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 4685a6e75f8SKirill A. Shutemov 4695e6e5a12SHugh Dickins static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; 4705a6e75f8SKirill A. Shutemov 4717c6c6cc4SZach O'Keefe bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode, 4727c6c6cc4SZach O'Keefe pgoff_t index, bool shmem_huge_force) 473c852023eSHugh Dickins { 474c852023eSHugh Dickins loff_t i_size; 475c852023eSHugh Dickins 476f7cd16a5SXavier Roche if (!S_ISREG(inode->i_mode)) 477f7cd16a5SXavier Roche return false; 4785e6e5a12SHugh Dickins if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) || 4795e6e5a12SHugh Dickins test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))) 480c852023eSHugh Dickins return false; 4817c6c6cc4SZach O'Keefe if (shmem_huge == SHMEM_HUGE_DENY) 4827c6c6cc4SZach O'Keefe return false; 4833de0c269SZach O'Keefe if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) 4843de0c269SZach O'Keefe return true; 4855e6e5a12SHugh Dickins 4865e6e5a12SHugh Dickins switch (SHMEM_SB(inode->i_sb)->huge) { 487c852023eSHugh Dickins case SHMEM_HUGE_ALWAYS: 488c852023eSHugh Dickins return true; 489c852023eSHugh Dickins case SHMEM_HUGE_WITHIN_SIZE: 490de6ee659SLiu Yuntao index = round_up(index + 1, HPAGE_PMD_NR); 491c852023eSHugh Dickins i_size = round_up(i_size_read(inode), PAGE_SIZE); 492de6ee659SLiu Yuntao if (i_size >> PAGE_SHIFT >= index) 493c852023eSHugh Dickins return true; 494c852023eSHugh Dickins fallthrough; 495c852023eSHugh Dickins case SHMEM_HUGE_ADVISE: 4965e6e5a12SHugh Dickins if (vma && (vma->vm_flags & VM_HUGEPAGE)) 4975e6e5a12SHugh Dickins return true; 4985e6e5a12SHugh Dickins fallthrough; 499c852023eSHugh Dickins default: 500c852023eSHugh Dickins return false; 501c852023eSHugh Dickins } 502c852023eSHugh Dickins } 5035a6e75f8SKirill A. Shutemov 504e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) 5055a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 5065a6e75f8SKirill A. Shutemov { 5075a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 5085a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 5095a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 5105a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 5115a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 5125a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 5135a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 5145a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 5155a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 5165a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 5175a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 5185a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 5195a6e75f8SKirill A. Shutemov return -EINVAL; 5205a6e75f8SKirill A. Shutemov } 521e5f2249aSArnd Bergmann #endif 5225a6e75f8SKirill A. Shutemov 523e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 5245a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 5255a6e75f8SKirill A. Shutemov { 5265a6e75f8SKirill A. Shutemov switch (huge) { 5275a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 5285a6e75f8SKirill A. Shutemov return "never"; 5295a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 5305a6e75f8SKirill A. Shutemov return "always"; 5315a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 5325a6e75f8SKirill A. Shutemov return "within_size"; 5335a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 5345a6e75f8SKirill A. Shutemov return "advise"; 5355a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 5365a6e75f8SKirill A. Shutemov return "deny"; 5375a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 5385a6e75f8SKirill A. Shutemov return "force"; 5395a6e75f8SKirill A. Shutemov default: 5405a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 5415a6e75f8SKirill A. Shutemov return "bad_val"; 5425a6e75f8SKirill A. Shutemov } 5435a6e75f8SKirill A. Shutemov } 544f1f5929cSJérémy Lefaure #endif 5455a6e75f8SKirill A. Shutemov 546779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 547779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 548779750d2SKirill A. Shutemov { 549779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 550253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 551779750d2SKirill A. Shutemov struct inode *inode; 552779750d2SKirill A. Shutemov struct shmem_inode_info *info; 55305624571SMatthew Wilcox (Oracle) struct folio *folio; 554779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 55562c9827cSGang Li int split = 0; 556779750d2SKirill A. Shutemov 557779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 558779750d2SKirill A. Shutemov return SHRINK_STOP; 559779750d2SKirill A. Shutemov 560779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 561779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 562779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 563779750d2SKirill A. Shutemov 564779750d2SKirill A. Shutemov /* pin the inode */ 565779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 566779750d2SKirill A. Shutemov 567779750d2SKirill A. Shutemov /* inode is about to be evicted */ 568779750d2SKirill A. Shutemov if (!inode) { 569779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 570779750d2SKirill A. Shutemov goto next; 571779750d2SKirill A. Shutemov } 572779750d2SKirill A. Shutemov 573779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 574779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 575779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 576253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 577779750d2SKirill A. Shutemov goto next; 578779750d2SKirill A. Shutemov } 579779750d2SKirill A. Shutemov 580779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 581779750d2SKirill A. Shutemov next: 58262c9827cSGang Li sbinfo->shrinklist_len--; 583779750d2SKirill A. Shutemov if (!--batch) 584779750d2SKirill A. Shutemov break; 585779750d2SKirill A. Shutemov } 586779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 587779750d2SKirill A. Shutemov 588253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 589253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 590253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 591253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 592253fd0f0SKirill A. Shutemov iput(inode); 593253fd0f0SKirill A. Shutemov } 594253fd0f0SKirill A. Shutemov 595779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 596779750d2SKirill A. Shutemov int ret; 59705624571SMatthew Wilcox (Oracle) pgoff_t index; 598779750d2SKirill A. Shutemov 599779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 600779750d2SKirill A. Shutemov inode = &info->vfs_inode; 601779750d2SKirill A. Shutemov 602b3cd54b2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) 60362c9827cSGang Li goto move_back; 604779750d2SKirill A. Shutemov 60505624571SMatthew Wilcox (Oracle) index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; 60605624571SMatthew Wilcox (Oracle) folio = filemap_get_folio(inode->i_mapping, index); 60705624571SMatthew Wilcox (Oracle) if (!folio) 608779750d2SKirill A. Shutemov goto drop; 609779750d2SKirill A. Shutemov 610b3cd54b2SKirill A. Shutemov /* No huge page at the end of the file: nothing to split */ 61105624571SMatthew Wilcox (Oracle) if (!folio_test_large(folio)) { 61205624571SMatthew Wilcox (Oracle) folio_put(folio); 613779750d2SKirill A. Shutemov goto drop; 614779750d2SKirill A. Shutemov } 615779750d2SKirill A. Shutemov 616b3cd54b2SKirill A. Shutemov /* 61762c9827cSGang Li * Move the inode on the list back to shrinklist if we failed 61862c9827cSGang Li * to lock the page at this time. 619b3cd54b2SKirill A. Shutemov * 620b3cd54b2SKirill A. Shutemov * Waiting for the lock may lead to deadlock in the 621b3cd54b2SKirill A. Shutemov * reclaim path. 622b3cd54b2SKirill A. Shutemov */ 62305624571SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) { 62405624571SMatthew Wilcox (Oracle) folio_put(folio); 62562c9827cSGang Li goto move_back; 626b3cd54b2SKirill A. Shutemov } 627b3cd54b2SKirill A. Shutemov 628d788f5b3SMatthew Wilcox (Oracle) ret = split_folio(folio); 62905624571SMatthew Wilcox (Oracle) folio_unlock(folio); 63005624571SMatthew Wilcox (Oracle) folio_put(folio); 631779750d2SKirill A. Shutemov 63262c9827cSGang Li /* If split failed move the inode on the list back to shrinklist */ 633b3cd54b2SKirill A. Shutemov if (ret) 63462c9827cSGang Li goto move_back; 635779750d2SKirill A. Shutemov 636779750d2SKirill A. Shutemov split++; 637779750d2SKirill A. Shutemov drop: 638779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 63962c9827cSGang Li goto put; 64062c9827cSGang Li move_back: 64162c9827cSGang Li /* 64262c9827cSGang Li * Make sure the inode is either on the global list or deleted 64362c9827cSGang Li * from any local list before iput() since it could be deleted 64462c9827cSGang Li * in another thread once we put the inode (then the local list 64562c9827cSGang Li * is corrupted). 64662c9827cSGang Li */ 64762c9827cSGang Li spin_lock(&sbinfo->shrinklist_lock); 64862c9827cSGang Li list_move(&info->shrinklist, &sbinfo->shrinklist); 64962c9827cSGang Li sbinfo->shrinklist_len++; 65062c9827cSGang Li spin_unlock(&sbinfo->shrinklist_lock); 65162c9827cSGang Li put: 652779750d2SKirill A. Shutemov iput(inode); 653779750d2SKirill A. Shutemov } 654779750d2SKirill A. Shutemov 655779750d2SKirill A. Shutemov return split; 656779750d2SKirill A. Shutemov } 657779750d2SKirill A. Shutemov 658779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 659779750d2SKirill A. Shutemov struct shrink_control *sc) 660779750d2SKirill A. Shutemov { 661779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 662779750d2SKirill A. Shutemov 663779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 664779750d2SKirill A. Shutemov return SHRINK_STOP; 665779750d2SKirill A. Shutemov 666779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 667779750d2SKirill A. Shutemov } 668779750d2SKirill A. Shutemov 669779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 670779750d2SKirill A. Shutemov struct shrink_control *sc) 671779750d2SKirill A. Shutemov { 672779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 673779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 674779750d2SKirill A. Shutemov } 675396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 6765a6e75f8SKirill A. Shutemov 6775a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 6785a6e75f8SKirill A. Shutemov 6797c6c6cc4SZach O'Keefe bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode, 6807c6c6cc4SZach O'Keefe pgoff_t index, bool shmem_huge_force) 6815e6e5a12SHugh Dickins { 6825e6e5a12SHugh Dickins return false; 6835e6e5a12SHugh Dickins } 6845e6e5a12SHugh Dickins 685779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 686779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 687779750d2SKirill A. Shutemov { 688779750d2SKirill A. Shutemov return 0; 689779750d2SKirill A. Shutemov } 690396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 6915a6e75f8SKirill A. Shutemov 6925a6e75f8SKirill A. Shutemov /* 6932bb876b5SMatthew Wilcox (Oracle) * Like filemap_add_folio, but error if expected item has gone. 69446f65ec1SHugh Dickins */ 695b7dd44a1SMatthew Wilcox (Oracle) static int shmem_add_to_page_cache(struct folio *folio, 69646f65ec1SHugh Dickins struct address_space *mapping, 6973fea5a49SJohannes Weiner pgoff_t index, void *expected, gfp_t gfp, 6983fea5a49SJohannes Weiner struct mm_struct *charge_mm) 69946f65ec1SHugh Dickins { 700b7dd44a1SMatthew Wilcox (Oracle) XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); 701b7dd44a1SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio); 7023fea5a49SJohannes Weiner int error; 70346f65ec1SHugh Dickins 704b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); 705b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 706b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); 707b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON(expected && folio_test_large(folio)); 70846f65ec1SHugh Dickins 709b7dd44a1SMatthew Wilcox (Oracle) folio_ref_add(folio, nr); 710b7dd44a1SMatthew Wilcox (Oracle) folio->mapping = mapping; 711b7dd44a1SMatthew Wilcox (Oracle) folio->index = index; 71246f65ec1SHugh Dickins 713b7dd44a1SMatthew Wilcox (Oracle) if (!folio_test_swapcache(folio)) { 714b7dd44a1SMatthew Wilcox (Oracle) error = mem_cgroup_charge(folio, charge_mm, gfp); 7153fea5a49SJohannes Weiner if (error) { 716b7dd44a1SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio)) { 7173fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK); 7183fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK_CHARGE); 7193fea5a49SJohannes Weiner } 7203fea5a49SJohannes Weiner goto error; 7213fea5a49SJohannes Weiner } 7224c6355b2SJohannes Weiner } 723b7dd44a1SMatthew Wilcox (Oracle) folio_throttle_swaprate(folio, gfp); 7243fea5a49SJohannes Weiner 725552446a4SMatthew Wilcox do { 726552446a4SMatthew Wilcox xas_lock_irq(&xas); 7276b24ca4aSMatthew Wilcox (Oracle) if (expected != xas_find_conflict(&xas)) { 728552446a4SMatthew Wilcox xas_set_err(&xas, -EEXIST); 7296b24ca4aSMatthew Wilcox (Oracle) goto unlock; 7306b24ca4aSMatthew Wilcox (Oracle) } 7316b24ca4aSMatthew Wilcox (Oracle) if (expected && xas_find_conflict(&xas)) { 7326b24ca4aSMatthew Wilcox (Oracle) xas_set_err(&xas, -EEXIST); 7336b24ca4aSMatthew Wilcox (Oracle) goto unlock; 7346b24ca4aSMatthew Wilcox (Oracle) } 735b7dd44a1SMatthew Wilcox (Oracle) xas_store(&xas, folio); 736552446a4SMatthew Wilcox if (xas_error(&xas)) 737552446a4SMatthew Wilcox goto unlock; 738b7dd44a1SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio)) { 739800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 740b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); 741552446a4SMatthew Wilcox } 742552446a4SMatthew Wilcox mapping->nrpages += nr; 743b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 744b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); 745552446a4SMatthew Wilcox unlock: 746552446a4SMatthew Wilcox xas_unlock_irq(&xas); 747552446a4SMatthew Wilcox } while (xas_nomem(&xas, gfp)); 748552446a4SMatthew Wilcox 749552446a4SMatthew Wilcox if (xas_error(&xas)) { 7503fea5a49SJohannes Weiner error = xas_error(&xas); 7513fea5a49SJohannes Weiner goto error; 75246f65ec1SHugh Dickins } 753552446a4SMatthew Wilcox 754552446a4SMatthew Wilcox return 0; 7553fea5a49SJohannes Weiner error: 756b7dd44a1SMatthew Wilcox (Oracle) folio->mapping = NULL; 757b7dd44a1SMatthew Wilcox (Oracle) folio_ref_sub(folio, nr); 7583fea5a49SJohannes Weiner return error; 75946f65ec1SHugh Dickins } 76046f65ec1SHugh Dickins 76146f65ec1SHugh Dickins /* 7624cd400fdSMatthew Wilcox (Oracle) * Like delete_from_page_cache, but substitutes swap for @folio. 7636922c0c7SHugh Dickins */ 7644cd400fdSMatthew Wilcox (Oracle) static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) 7656922c0c7SHugh Dickins { 7664cd400fdSMatthew Wilcox (Oracle) struct address_space *mapping = folio->mapping; 7674cd400fdSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio); 7686922c0c7SHugh Dickins int error; 7696922c0c7SHugh Dickins 770b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 7714cd400fdSMatthew Wilcox (Oracle) error = shmem_replace_entry(mapping, folio->index, folio, radswap); 7724cd400fdSMatthew Wilcox (Oracle) folio->mapping = NULL; 7734cd400fdSMatthew Wilcox (Oracle) mapping->nrpages -= nr; 7744cd400fdSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 7754cd400fdSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 776b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 7774cd400fdSMatthew Wilcox (Oracle) folio_put(folio); 7786922c0c7SHugh Dickins BUG_ON(error); 7796922c0c7SHugh Dickins } 7806922c0c7SHugh Dickins 7816922c0c7SHugh Dickins /* 782c121d3bbSMatthew Wilcox * Remove swap entry from page cache, free the swap and its page cache. 7837a5d0fbbSHugh Dickins */ 7847a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 7857a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 7867a5d0fbbSHugh Dickins { 7876dbaf22cSJohannes Weiner void *old; 7887a5d0fbbSHugh Dickins 78955f3f7eaSMatthew Wilcox old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 7906dbaf22cSJohannes Weiner if (old != radswap) 7916dbaf22cSJohannes Weiner return -ENOENT; 7927a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 7936dbaf22cSJohannes Weiner return 0; 7947a5d0fbbSHugh Dickins } 7957a5d0fbbSHugh Dickins 7967a5d0fbbSHugh Dickins /* 7976a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 79848131e03SVlastimil Babka * given offsets are swapped out. 7996a15a370SVlastimil Babka * 8009608703eSJan Kara * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 8016a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 8026a15a370SVlastimil Babka */ 80348131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 80448131e03SVlastimil Babka pgoff_t start, pgoff_t end) 8056a15a370SVlastimil Babka { 8067ae3424fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 8076a15a370SVlastimil Babka struct page *page; 80848131e03SVlastimil Babka unsigned long swapped = 0; 8096a15a370SVlastimil Babka 8106a15a370SVlastimil Babka rcu_read_lock(); 8117ae3424fSMatthew Wilcox xas_for_each(&xas, page, end - 1) { 8127ae3424fSMatthew Wilcox if (xas_retry(&xas, page)) 8132cf938aaSMatthew Wilcox continue; 8143159f943SMatthew Wilcox if (xa_is_value(page)) 8156a15a370SVlastimil Babka swapped++; 8166a15a370SVlastimil Babka 8176a15a370SVlastimil Babka if (need_resched()) { 8187ae3424fSMatthew Wilcox xas_pause(&xas); 8196a15a370SVlastimil Babka cond_resched_rcu(); 8206a15a370SVlastimil Babka } 8216a15a370SVlastimil Babka } 8226a15a370SVlastimil Babka 8236a15a370SVlastimil Babka rcu_read_unlock(); 8246a15a370SVlastimil Babka 8256a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 8266a15a370SVlastimil Babka } 8276a15a370SVlastimil Babka 8286a15a370SVlastimil Babka /* 82948131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 83048131e03SVlastimil Babka * given vma is swapped out. 83148131e03SVlastimil Babka * 8329608703eSJan Kara * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 83348131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 83448131e03SVlastimil Babka */ 83548131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 83648131e03SVlastimil Babka { 83748131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 83848131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 83948131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 84048131e03SVlastimil Babka unsigned long swapped; 84148131e03SVlastimil Babka 84248131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 84348131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 84448131e03SVlastimil Babka 84548131e03SVlastimil Babka /* 84648131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 84748131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 84848131e03SVlastimil Babka * already track. 84948131e03SVlastimil Babka */ 85048131e03SVlastimil Babka if (!swapped) 85148131e03SVlastimil Babka return 0; 85248131e03SVlastimil Babka 85348131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 85448131e03SVlastimil Babka return swapped << PAGE_SHIFT; 85548131e03SVlastimil Babka 85648131e03SVlastimil Babka /* Here comes the more involved part */ 85702399c88SPeter Xu return shmem_partial_swap_usage(mapping, vma->vm_pgoff, 85802399c88SPeter Xu vma->vm_pgoff + vma_pages(vma)); 85948131e03SVlastimil Babka } 86048131e03SVlastimil Babka 86148131e03SVlastimil Babka /* 86224513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 86324513264SHugh Dickins */ 86424513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 86524513264SHugh Dickins { 866105c988fSMatthew Wilcox (Oracle) struct folio_batch fbatch; 86724513264SHugh Dickins pgoff_t index = 0; 86824513264SHugh Dickins 869105c988fSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 87024513264SHugh Dickins /* 87124513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 87224513264SHugh Dickins */ 873105c988fSMatthew Wilcox (Oracle) while (!mapping_unevictable(mapping) && 874105c988fSMatthew Wilcox (Oracle) filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { 875105c988fSMatthew Wilcox (Oracle) check_move_unevictable_folios(&fbatch); 876105c988fSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 87724513264SHugh Dickins cond_resched(); 87824513264SHugh Dickins } 8797a5d0fbbSHugh Dickins } 8807a5d0fbbSHugh Dickins 881b9a8a419SMatthew Wilcox (Oracle) static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) 88271725ed1SHugh Dickins { 883b9a8a419SMatthew Wilcox (Oracle) struct folio *folio; 88471725ed1SHugh Dickins 885b9a8a419SMatthew Wilcox (Oracle) /* 886a7f5862cSMatthew Wilcox (Oracle) * At first avoid shmem_get_folio(,,,SGP_READ): that fails 887b9a8a419SMatthew Wilcox (Oracle) * beyond i_size, and reports fallocated pages as holes. 888b9a8a419SMatthew Wilcox (Oracle) */ 889b9a8a419SMatthew Wilcox (Oracle) folio = __filemap_get_folio(inode->i_mapping, index, 890b9a8a419SMatthew Wilcox (Oracle) FGP_ENTRY | FGP_LOCK, 0); 891b9a8a419SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 892b9a8a419SMatthew Wilcox (Oracle) return folio; 893b9a8a419SMatthew Wilcox (Oracle) /* 894b9a8a419SMatthew Wilcox (Oracle) * But read a page back from swap if any of it is within i_size 895b9a8a419SMatthew Wilcox (Oracle) * (although in some cases this is just a waste of time). 896b9a8a419SMatthew Wilcox (Oracle) */ 897a7f5862cSMatthew Wilcox (Oracle) folio = NULL; 898a7f5862cSMatthew Wilcox (Oracle) shmem_get_folio(inode, index, &folio, SGP_READ); 899a7f5862cSMatthew Wilcox (Oracle) return folio; 90071725ed1SHugh Dickins } 90171725ed1SHugh Dickins 90271725ed1SHugh Dickins /* 9037f4446eeSMatthew Wilcox * Remove range of pages and swap entries from page cache, and free them. 9041635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 9057a5d0fbbSHugh Dickins */ 9061635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 9071635f6a7SHugh Dickins bool unfalloc) 9081da177e4SLinus Torvalds { 909285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 9101da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 91109cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 91209cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 9130e499ed3SMatthew Wilcox (Oracle) struct folio_batch fbatch; 9147a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 915b9a8a419SMatthew Wilcox (Oracle) struct folio *folio; 916b9a8a419SMatthew Wilcox (Oracle) bool same_folio; 9177a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 918285b2c4fSHugh Dickins pgoff_t index; 919bda97eabSHugh Dickins int i; 9201da177e4SLinus Torvalds 92183e4fa9cSHugh Dickins if (lend == -1) 92283e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 923bda97eabSHugh Dickins 924d144bf62SHugh Dickins if (info->fallocend > start && info->fallocend <= end && !unfalloc) 925d144bf62SHugh Dickins info->fallocend = start; 926d144bf62SHugh Dickins 92751dcbdacSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 928bda97eabSHugh Dickins index = start; 9293392ca12SVishal Moola (Oracle) while (index < end && find_lock_entries(mapping, &index, end - 1, 93051dcbdacSMatthew Wilcox (Oracle) &fbatch, indices)) { 93151dcbdacSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) { 932b9a8a419SMatthew Wilcox (Oracle) folio = fbatch.folios[i]; 933bda97eabSHugh Dickins 9347b774aabSMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 9351635f6a7SHugh Dickins if (unfalloc) 9361635f6a7SHugh Dickins continue; 9377a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 9383392ca12SVishal Moola (Oracle) indices[i], folio); 9397a5d0fbbSHugh Dickins continue; 9407a5d0fbbSHugh Dickins } 9417a5d0fbbSHugh Dickins 9427b774aabSMatthew Wilcox (Oracle) if (!unfalloc || !folio_test_uptodate(folio)) 9431e84a3d9SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio); 9447b774aabSMatthew Wilcox (Oracle) folio_unlock(folio); 945bda97eabSHugh Dickins } 94651dcbdacSMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch); 94751dcbdacSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 948bda97eabSHugh Dickins cond_resched(); 949bda97eabSHugh Dickins } 950bda97eabSHugh Dickins 95144bcabd7SHugh Dickins /* 95244bcabd7SHugh Dickins * When undoing a failed fallocate, we want none of the partial folio 95344bcabd7SHugh Dickins * zeroing and splitting below, but shall want to truncate the whole 95444bcabd7SHugh Dickins * folio when !uptodate indicates that it was added by this fallocate, 95544bcabd7SHugh Dickins * even when [lstart, lend] covers only a part of the folio. 95644bcabd7SHugh Dickins */ 95744bcabd7SHugh Dickins if (unfalloc) 95844bcabd7SHugh Dickins goto whole_folios; 95944bcabd7SHugh Dickins 960b9a8a419SMatthew Wilcox (Oracle) same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 961b9a8a419SMatthew Wilcox (Oracle) folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); 962b9a8a419SMatthew Wilcox (Oracle) if (folio) { 963b9a8a419SMatthew Wilcox (Oracle) same_folio = lend < folio_pos(folio) + folio_size(folio); 964b9a8a419SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 965b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend)) { 966b9a8a419SMatthew Wilcox (Oracle) start = folio->index + folio_nr_pages(folio); 967b9a8a419SMatthew Wilcox (Oracle) if (same_folio) 968b9a8a419SMatthew Wilcox (Oracle) end = folio->index; 96983e4fa9cSHugh Dickins } 970b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio); 971b9a8a419SMatthew Wilcox (Oracle) folio_put(folio); 972b9a8a419SMatthew Wilcox (Oracle) folio = NULL; 973bda97eabSHugh Dickins } 974b9a8a419SMatthew Wilcox (Oracle) 975b9a8a419SMatthew Wilcox (Oracle) if (!same_folio) 976b9a8a419SMatthew Wilcox (Oracle) folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); 977b9a8a419SMatthew Wilcox (Oracle) if (folio) { 978b9a8a419SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 979b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend)) 980b9a8a419SMatthew Wilcox (Oracle) end = folio->index; 981b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio); 982b9a8a419SMatthew Wilcox (Oracle) folio_put(folio); 983bda97eabSHugh Dickins } 984bda97eabSHugh Dickins 98544bcabd7SHugh Dickins whole_folios: 98644bcabd7SHugh Dickins 987bda97eabSHugh Dickins index = start; 988b1a36650SHugh Dickins while (index < end) { 989bda97eabSHugh Dickins cond_resched(); 9900cd6144aSJohannes Weiner 9919fb6beeaSVishal Moola (Oracle) if (!find_get_entries(mapping, &index, end - 1, &fbatch, 992cf2039afSMatthew Wilcox (Oracle) indices)) { 993b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 994b1a36650SHugh Dickins if (index == start || end != -1) 995bda97eabSHugh Dickins break; 996b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 997bda97eabSHugh Dickins index = start; 998bda97eabSHugh Dickins continue; 999bda97eabSHugh Dickins } 10000e499ed3SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) { 1001b9a8a419SMatthew Wilcox (Oracle) folio = fbatch.folios[i]; 1002bda97eabSHugh Dickins 10030e499ed3SMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 10041635f6a7SHugh Dickins if (unfalloc) 10051635f6a7SHugh Dickins continue; 10069fb6beeaSVishal Moola (Oracle) if (shmem_free_swap(mapping, indices[i], folio)) { 1007b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 10089fb6beeaSVishal Moola (Oracle) index = indices[i]; 1009b1a36650SHugh Dickins break; 1010b1a36650SHugh Dickins } 1011b1a36650SHugh Dickins nr_swaps_freed++; 10127a5d0fbbSHugh Dickins continue; 10137a5d0fbbSHugh Dickins } 10147a5d0fbbSHugh Dickins 10150e499ed3SMatthew Wilcox (Oracle) folio_lock(folio); 1016800d8c63SKirill A. Shutemov 10170e499ed3SMatthew Wilcox (Oracle) if (!unfalloc || !folio_test_uptodate(folio)) { 10180e499ed3SMatthew Wilcox (Oracle) if (folio_mapping(folio) != mapping) { 1019b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 10200e499ed3SMatthew Wilcox (Oracle) folio_unlock(folio); 10219fb6beeaSVishal Moola (Oracle) index = indices[i]; 1022b1a36650SHugh Dickins break; 10237a5d0fbbSHugh Dickins } 10240e499ed3SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_writeback(folio), 10250e499ed3SMatthew Wilcox (Oracle) folio); 10260e499ed3SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio); 102771725ed1SHugh Dickins } 10280e499ed3SMatthew Wilcox (Oracle) folio_unlock(folio); 1029bda97eabSHugh Dickins } 10300e499ed3SMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch); 10310e499ed3SMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 1032bda97eabSHugh Dickins } 103394c1e62dSHugh Dickins 10344595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 10357a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 10361da177e4SLinus Torvalds shmem_recalc_inode(inode); 10374595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 10381635f6a7SHugh Dickins } 10391da177e4SLinus Torvalds 10401635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 10411635f6a7SHugh Dickins { 10421635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 1043078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 104436f05cabSJeff Layton inode_inc_iversion(inode); 10451da177e4SLinus Torvalds } 104694c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 10471da177e4SLinus Torvalds 1048549c7297SChristian Brauner static int shmem_getattr(struct user_namespace *mnt_userns, 1049549c7297SChristian Brauner const struct path *path, struct kstat *stat, 1050a528d35eSDavid Howells u32 request_mask, unsigned int query_flags) 105144a30220SYu Zhao { 1052a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode; 105344a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 105444a30220SYu Zhao 1055d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 10564595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 105744a30220SYu Zhao shmem_recalc_inode(inode); 10584595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1059d0424c42SHugh Dickins } 1060e408e695STheodore Ts'o if (info->fsflags & FS_APPEND_FL) 1061e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_APPEND; 1062e408e695STheodore Ts'o if (info->fsflags & FS_IMMUTABLE_FL) 1063e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_IMMUTABLE; 1064e408e695STheodore Ts'o if (info->fsflags & FS_NODUMP_FL) 1065e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_NODUMP; 1066e408e695STheodore Ts'o stat->attributes_mask |= (STATX_ATTR_APPEND | 1067e408e695STheodore Ts'o STATX_ATTR_IMMUTABLE | 1068e408e695STheodore Ts'o STATX_ATTR_NODUMP); 10690d56a451SChristian Brauner generic_fillattr(&init_user_ns, inode, stat); 107089fdcd26SYang Shi 10717c6c6cc4SZach O'Keefe if (shmem_is_huge(NULL, inode, 0, false)) 107289fdcd26SYang Shi stat->blksize = HPAGE_PMD_SIZE; 107389fdcd26SYang Shi 1074f7cd16a5SXavier Roche if (request_mask & STATX_BTIME) { 1075f7cd16a5SXavier Roche stat->result_mask |= STATX_BTIME; 1076f7cd16a5SXavier Roche stat->btime.tv_sec = info->i_crtime.tv_sec; 1077f7cd16a5SXavier Roche stat->btime.tv_nsec = info->i_crtime.tv_nsec; 1078f7cd16a5SXavier Roche } 1079f7cd16a5SXavier Roche 108044a30220SYu Zhao return 0; 108144a30220SYu Zhao } 108244a30220SYu Zhao 1083549c7297SChristian Brauner static int shmem_setattr(struct user_namespace *mnt_userns, 1084549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 10851da177e4SLinus Torvalds { 108675c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 108740e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 10881da177e4SLinus Torvalds int error; 108936f05cabSJeff Layton bool update_mtime = false; 109036f05cabSJeff Layton bool update_ctime = true; 10911da177e4SLinus Torvalds 10922f221d6fSChristian Brauner error = setattr_prepare(&init_user_ns, dentry, attr); 1093db78b877SChristoph Hellwig if (error) 1094db78b877SChristoph Hellwig return error; 1095db78b877SChristoph Hellwig 10966fd73538SDaniel Verkamp if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { 10976fd73538SDaniel Verkamp if ((inode->i_mode ^ attr->ia_mode) & 0111) { 10986fd73538SDaniel Verkamp return -EPERM; 10996fd73538SDaniel Verkamp } 11006fd73538SDaniel Verkamp } 11016fd73538SDaniel Verkamp 110294c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 110394c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 110494c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 11053889e6e7Snpiggin@suse.de 11069608703eSJan Kara /* protected by i_rwsem */ 110740e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 110840e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 110940e041a2SDavid Herrmann return -EPERM; 111040e041a2SDavid Herrmann 111194c1e62dSHugh Dickins if (newsize != oldsize) { 111277142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 111377142517SKonstantin Khlebnikov oldsize, newsize); 111477142517SKonstantin Khlebnikov if (error) 111577142517SKonstantin Khlebnikov return error; 111694c1e62dSHugh Dickins i_size_write(inode, newsize); 111736f05cabSJeff Layton update_mtime = true; 111836f05cabSJeff Layton } else { 111936f05cabSJeff Layton update_ctime = false; 112094c1e62dSHugh Dickins } 1121afa2db2fSJosef Bacik if (newsize <= oldsize) { 112294c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 1123d0424c42SHugh Dickins if (oldsize > holebegin) 1124d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1125d0424c42SHugh Dickins holebegin, 0, 1); 1126d0424c42SHugh Dickins if (info->alloced) 1127d0424c42SHugh Dickins shmem_truncate_range(inode, 1128d0424c42SHugh Dickins newsize, (loff_t)-1); 112994c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1130d0424c42SHugh Dickins if (oldsize > holebegin) 1131d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1132d0424c42SHugh Dickins holebegin, 0, 1); 113394c1e62dSHugh Dickins } 11341da177e4SLinus Torvalds } 11351da177e4SLinus Torvalds 11362f221d6fSChristian Brauner setattr_copy(&init_user_ns, inode, attr); 1137db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 1138138060baSChristian Brauner error = posix_acl_chmod(&init_user_ns, dentry, inode->i_mode); 113936f05cabSJeff Layton if (!error && update_ctime) { 114036f05cabSJeff Layton inode->i_ctime = current_time(inode); 114136f05cabSJeff Layton if (update_mtime) 114236f05cabSJeff Layton inode->i_mtime = inode->i_ctime; 114336f05cabSJeff Layton inode_inc_iversion(inode); 114436f05cabSJeff Layton } 11451da177e4SLinus Torvalds return error; 11461da177e4SLinus Torvalds } 11471da177e4SLinus Torvalds 11481f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 11491da177e4SLinus Torvalds { 11501da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1151779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 11521da177e4SLinus Torvalds 115330e6a51dSHui Su if (shmem_mapping(inode->i_mapping)) { 11541da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 11551da177e4SLinus Torvalds inode->i_size = 0; 1156bc786390SHugh Dickins mapping_set_exiting(inode->i_mapping); 11573889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1158779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1159779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1160779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1161779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1162779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1163779750d2SKirill A. Shutemov } 1164779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1165779750d2SKirill A. Shutemov } 1166af53d3e9SHugh Dickins while (!list_empty(&info->swaplist)) { 1167af53d3e9SHugh Dickins /* Wait while shmem_unuse() is scanning this inode... */ 1168af53d3e9SHugh Dickins wait_var_event(&info->stop_eviction, 1169af53d3e9SHugh Dickins !atomic_read(&info->stop_eviction)); 1170cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1171af53d3e9SHugh Dickins /* ...but beware of the race if we peeked too early */ 1172af53d3e9SHugh Dickins if (!atomic_read(&info->stop_eviction)) 11731da177e4SLinus Torvalds list_del_init(&info->swaplist); 1174cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 11751da177e4SLinus Torvalds } 11763ed47db3SAl Viro } 1177b09e0fa4SEric Paris 117838f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 11790f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 11805b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1181dbd5768fSJan Kara clear_inode(inode); 11821da177e4SLinus Torvalds } 11831da177e4SLinus Torvalds 1184b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping, 1185da08e9b7SMatthew Wilcox (Oracle) pgoff_t start, struct folio_batch *fbatch, 1186da08e9b7SMatthew Wilcox (Oracle) pgoff_t *indices, unsigned int type) 1187478922e2SMatthew Wilcox { 1188b56a2d8aSVineeth Remanan Pillai XA_STATE(xas, &mapping->i_pages, start); 1189da08e9b7SMatthew Wilcox (Oracle) struct folio *folio; 119087039546SHugh Dickins swp_entry_t entry; 1191478922e2SMatthew Wilcox 1192478922e2SMatthew Wilcox rcu_read_lock(); 1193da08e9b7SMatthew Wilcox (Oracle) xas_for_each(&xas, folio, ULONG_MAX) { 1194da08e9b7SMatthew Wilcox (Oracle) if (xas_retry(&xas, folio)) 11955b9c98f3SMike Kravetz continue; 1196b56a2d8aSVineeth Remanan Pillai 1197da08e9b7SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 1198478922e2SMatthew Wilcox continue; 1199b56a2d8aSVineeth Remanan Pillai 1200da08e9b7SMatthew Wilcox (Oracle) entry = radix_to_swp_entry(folio); 12016cec2b95SMiaohe Lin /* 12026cec2b95SMiaohe Lin * swapin error entries can be found in the mapping. But they're 12036cec2b95SMiaohe Lin * deliberately ignored here as we've done everything we can do. 12046cec2b95SMiaohe Lin */ 120587039546SHugh Dickins if (swp_type(entry) != type) 1206b56a2d8aSVineeth Remanan Pillai continue; 1207b56a2d8aSVineeth Remanan Pillai 1208e384200eSHugh Dickins indices[folio_batch_count(fbatch)] = xas.xa_index; 1209da08e9b7SMatthew Wilcox (Oracle) if (!folio_batch_add(fbatch, folio)) 1210da08e9b7SMatthew Wilcox (Oracle) break; 1211b56a2d8aSVineeth Remanan Pillai 1212b56a2d8aSVineeth Remanan Pillai if (need_resched()) { 1213e21a2955SMatthew Wilcox xas_pause(&xas); 1214478922e2SMatthew Wilcox cond_resched_rcu(); 1215478922e2SMatthew Wilcox } 1216b56a2d8aSVineeth Remanan Pillai } 1217478922e2SMatthew Wilcox rcu_read_unlock(); 1218e21a2955SMatthew Wilcox 1219da08e9b7SMatthew Wilcox (Oracle) return xas.xa_index; 1220b56a2d8aSVineeth Remanan Pillai } 1221b56a2d8aSVineeth Remanan Pillai 1222b56a2d8aSVineeth Remanan Pillai /* 1223b56a2d8aSVineeth Remanan Pillai * Move the swapped pages for an inode to page cache. Returns the count 1224b56a2d8aSVineeth Remanan Pillai * of pages swapped in, or the error in case of failure. 1225b56a2d8aSVineeth Remanan Pillai */ 1226da08e9b7SMatthew Wilcox (Oracle) static int shmem_unuse_swap_entries(struct inode *inode, 1227da08e9b7SMatthew Wilcox (Oracle) struct folio_batch *fbatch, pgoff_t *indices) 1228b56a2d8aSVineeth Remanan Pillai { 1229b56a2d8aSVineeth Remanan Pillai int i = 0; 1230b56a2d8aSVineeth Remanan Pillai int ret = 0; 1231b56a2d8aSVineeth Remanan Pillai int error = 0; 1232b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1233b56a2d8aSVineeth Remanan Pillai 1234da08e9b7SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(fbatch); i++) { 1235da08e9b7SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 1236b56a2d8aSVineeth Remanan Pillai 1237da08e9b7SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 1238b56a2d8aSVineeth Remanan Pillai continue; 1239da08e9b7SMatthew Wilcox (Oracle) error = shmem_swapin_folio(inode, indices[i], 1240da08e9b7SMatthew Wilcox (Oracle) &folio, SGP_CACHE, 1241b56a2d8aSVineeth Remanan Pillai mapping_gfp_mask(mapping), 1242b56a2d8aSVineeth Remanan Pillai NULL, NULL); 1243b56a2d8aSVineeth Remanan Pillai if (error == 0) { 1244da08e9b7SMatthew Wilcox (Oracle) folio_unlock(folio); 1245da08e9b7SMatthew Wilcox (Oracle) folio_put(folio); 1246b56a2d8aSVineeth Remanan Pillai ret++; 1247b56a2d8aSVineeth Remanan Pillai } 1248b56a2d8aSVineeth Remanan Pillai if (error == -ENOMEM) 1249b56a2d8aSVineeth Remanan Pillai break; 1250b56a2d8aSVineeth Remanan Pillai error = 0; 1251b56a2d8aSVineeth Remanan Pillai } 1252b56a2d8aSVineeth Remanan Pillai return error ? error : ret; 1253478922e2SMatthew Wilcox } 1254478922e2SMatthew Wilcox 125546f65ec1SHugh Dickins /* 125646f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 125746f65ec1SHugh Dickins */ 125810a9c496SChristoph Hellwig static int shmem_unuse_inode(struct inode *inode, unsigned int type) 12591da177e4SLinus Torvalds { 1260b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1261b56a2d8aSVineeth Remanan Pillai pgoff_t start = 0; 1262da08e9b7SMatthew Wilcox (Oracle) struct folio_batch fbatch; 1263b56a2d8aSVineeth Remanan Pillai pgoff_t indices[PAGEVEC_SIZE]; 1264b56a2d8aSVineeth Remanan Pillai int ret = 0; 12651da177e4SLinus Torvalds 1266b56a2d8aSVineeth Remanan Pillai do { 1267da08e9b7SMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 1268da08e9b7SMatthew Wilcox (Oracle) shmem_find_swap_entries(mapping, start, &fbatch, indices, type); 1269da08e9b7SMatthew Wilcox (Oracle) if (folio_batch_count(&fbatch) == 0) { 1270b56a2d8aSVineeth Remanan Pillai ret = 0; 1271778dd893SHugh Dickins break; 1272b56a2d8aSVineeth Remanan Pillai } 1273b56a2d8aSVineeth Remanan Pillai 1274da08e9b7SMatthew Wilcox (Oracle) ret = shmem_unuse_swap_entries(inode, &fbatch, indices); 1275b56a2d8aSVineeth Remanan Pillai if (ret < 0) 1276b56a2d8aSVineeth Remanan Pillai break; 1277b56a2d8aSVineeth Remanan Pillai 1278da08e9b7SMatthew Wilcox (Oracle) start = indices[folio_batch_count(&fbatch) - 1]; 1279b56a2d8aSVineeth Remanan Pillai } while (true); 1280b56a2d8aSVineeth Remanan Pillai 1281b56a2d8aSVineeth Remanan Pillai return ret; 1282b56a2d8aSVineeth Remanan Pillai } 1283b56a2d8aSVineeth Remanan Pillai 1284b56a2d8aSVineeth Remanan Pillai /* 1285b56a2d8aSVineeth Remanan Pillai * Read all the shared memory data that resides in the swap 1286b56a2d8aSVineeth Remanan Pillai * device 'type' back into memory, so the swap device can be 1287b56a2d8aSVineeth Remanan Pillai * unused. 1288b56a2d8aSVineeth Remanan Pillai */ 128910a9c496SChristoph Hellwig int shmem_unuse(unsigned int type) 1290b56a2d8aSVineeth Remanan Pillai { 1291b56a2d8aSVineeth Remanan Pillai struct shmem_inode_info *info, *next; 1292b56a2d8aSVineeth Remanan Pillai int error = 0; 1293b56a2d8aSVineeth Remanan Pillai 1294b56a2d8aSVineeth Remanan Pillai if (list_empty(&shmem_swaplist)) 1295b56a2d8aSVineeth Remanan Pillai return 0; 1296b56a2d8aSVineeth Remanan Pillai 1297b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1298b56a2d8aSVineeth Remanan Pillai list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1299b56a2d8aSVineeth Remanan Pillai if (!info->swapped) { 1300b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1301b56a2d8aSVineeth Remanan Pillai continue; 1302b56a2d8aSVineeth Remanan Pillai } 1303af53d3e9SHugh Dickins /* 1304af53d3e9SHugh Dickins * Drop the swaplist mutex while searching the inode for swap; 1305af53d3e9SHugh Dickins * but before doing so, make sure shmem_evict_inode() will not 1306af53d3e9SHugh Dickins * remove placeholder inode from swaplist, nor let it be freed 1307af53d3e9SHugh Dickins * (igrab() would protect from unlink, but not from unmount). 1308af53d3e9SHugh Dickins */ 1309af53d3e9SHugh Dickins atomic_inc(&info->stop_eviction); 1310b56a2d8aSVineeth Remanan Pillai mutex_unlock(&shmem_swaplist_mutex); 1311b56a2d8aSVineeth Remanan Pillai 131210a9c496SChristoph Hellwig error = shmem_unuse_inode(&info->vfs_inode, type); 1313b56a2d8aSVineeth Remanan Pillai cond_resched(); 1314b56a2d8aSVineeth Remanan Pillai 1315b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1316b56a2d8aSVineeth Remanan Pillai next = list_next_entry(info, swaplist); 1317b56a2d8aSVineeth Remanan Pillai if (!info->swapped) 1318b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1319af53d3e9SHugh Dickins if (atomic_dec_and_test(&info->stop_eviction)) 1320af53d3e9SHugh Dickins wake_up_var(&info->stop_eviction); 1321b56a2d8aSVineeth Remanan Pillai if (error) 1322b56a2d8aSVineeth Remanan Pillai break; 13231da177e4SLinus Torvalds } 1324cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1325778dd893SHugh Dickins 1326778dd893SHugh Dickins return error; 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds /* 13301da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 13311da177e4SLinus Torvalds */ 13321da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 13331da177e4SLinus Torvalds { 1334e2e3fdc7SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 13351da177e4SLinus Torvalds struct shmem_inode_info *info; 13361da177e4SLinus Torvalds struct address_space *mapping; 13371da177e4SLinus Torvalds struct inode *inode; 13386922c0c7SHugh Dickins swp_entry_t swap; 13396922c0c7SHugh Dickins pgoff_t index; 13401da177e4SLinus Torvalds 13411e6decf3SHugh Dickins /* 13421e6decf3SHugh Dickins * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or 13431e6decf3SHugh Dickins * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages, 13441e6decf3SHugh Dickins * and its shmem_writeback() needs them to be split when swapping. 13451e6decf3SHugh Dickins */ 1346f530ed0eSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 13471e6decf3SHugh Dickins /* Ensure the subpages are still dirty */ 1348f530ed0eSMatthew Wilcox (Oracle) folio_test_set_dirty(folio); 13491e6decf3SHugh Dickins if (split_huge_page(page) < 0) 13501e6decf3SHugh Dickins goto redirty; 1351f530ed0eSMatthew Wilcox (Oracle) folio = page_folio(page); 1352f530ed0eSMatthew Wilcox (Oracle) folio_clear_dirty(folio); 13531e6decf3SHugh Dickins } 13541e6decf3SHugh Dickins 1355f530ed0eSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 1356f530ed0eSMatthew Wilcox (Oracle) mapping = folio->mapping; 1357f530ed0eSMatthew Wilcox (Oracle) index = folio->index; 13581da177e4SLinus Torvalds inode = mapping->host; 13591da177e4SLinus Torvalds info = SHMEM_I(inode); 13601da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 13611da177e4SLinus Torvalds goto redirty; 1362d9fe526aSHugh Dickins if (!total_swap_pages) 13631da177e4SLinus Torvalds goto redirty; 13641da177e4SLinus Torvalds 1365d9fe526aSHugh Dickins /* 136697b713baSChristoph Hellwig * Our capabilities prevent regular writeback or sync from ever calling 136797b713baSChristoph Hellwig * shmem_writepage; but a stacking filesystem might use ->writepage of 136897b713baSChristoph Hellwig * its underlying filesystem, in which case tmpfs should write out to 136997b713baSChristoph Hellwig * swap only in response to memory pressure, and not for the writeback 137097b713baSChristoph Hellwig * threads or sync. 1371d9fe526aSHugh Dickins */ 137248f170fbSHugh Dickins if (!wbc->for_reclaim) { 137348f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 137448f170fbSHugh Dickins goto redirty; 137548f170fbSHugh Dickins } 13761635f6a7SHugh Dickins 13771635f6a7SHugh Dickins /* 13781635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 13791635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 1380f530ed0eSMatthew Wilcox (Oracle) * fallocated folio arriving here is now to initialize it and write it. 13811aac1400SHugh Dickins * 1382f530ed0eSMatthew Wilcox (Oracle) * That's okay for a folio already fallocated earlier, but if we have 13831aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 1384f530ed0eSMatthew Wilcox (Oracle) * of this folio in case we have to undo it, and (b) it may not be a 13851aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 1386f530ed0eSMatthew Wilcox (Oracle) * reactivate the folio, and let shmem_fallocate() quit when too many. 13871635f6a7SHugh Dickins */ 1388f530ed0eSMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 13891aac1400SHugh Dickins if (inode->i_private) { 13901aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 13911aac1400SHugh Dickins spin_lock(&inode->i_lock); 13921aac1400SHugh Dickins shmem_falloc = inode->i_private; 13931aac1400SHugh Dickins if (shmem_falloc && 13948e205f77SHugh Dickins !shmem_falloc->waitq && 13951aac1400SHugh Dickins index >= shmem_falloc->start && 13961aac1400SHugh Dickins index < shmem_falloc->next) 13971aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 13981aac1400SHugh Dickins else 13991aac1400SHugh Dickins shmem_falloc = NULL; 14001aac1400SHugh Dickins spin_unlock(&inode->i_lock); 14011aac1400SHugh Dickins if (shmem_falloc) 14021aac1400SHugh Dickins goto redirty; 14031aac1400SHugh Dickins } 1404f530ed0eSMatthew Wilcox (Oracle) folio_zero_range(folio, 0, folio_size(folio)); 1405f530ed0eSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 1406f530ed0eSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 14071635f6a7SHugh Dickins } 14081635f6a7SHugh Dickins 1409e2e3fdc7SMatthew Wilcox (Oracle) swap = folio_alloc_swap(folio); 141048f170fbSHugh Dickins if (!swap.val) 141148f170fbSHugh Dickins goto redirty; 1412d9fe526aSHugh Dickins 1413b1dea800SHugh Dickins /* 1414b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 1415f530ed0eSMatthew Wilcox (Oracle) * if it's not already there. Do it now before the folio is 14166922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1417b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 14186922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 14196922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1420b1dea800SHugh Dickins */ 1421b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 142205bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 1423b56a2d8aSVineeth Remanan Pillai list_add(&info->swaplist, &shmem_swaplist); 1424b1dea800SHugh Dickins 1425a4c366f0SMatthew Wilcox (Oracle) if (add_to_swap_cache(folio, swap, 14263852f676SJoonsoo Kim __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 14273852f676SJoonsoo Kim NULL) == 0) { 14284595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1429267a4c76SHugh Dickins shmem_recalc_inode(inode); 1430267a4c76SHugh Dickins info->swapped++; 14314595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1432267a4c76SHugh Dickins 1433aaa46865SHugh Dickins swap_shmem_alloc(swap); 14344cd400fdSMatthew Wilcox (Oracle) shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); 14356922c0c7SHugh Dickins 14366922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1437f530ed0eSMatthew Wilcox (Oracle) BUG_ON(folio_mapped(folio)); 1438f530ed0eSMatthew Wilcox (Oracle) swap_writepage(&folio->page, wbc); 14391da177e4SLinus Torvalds return 0; 14401da177e4SLinus Torvalds } 14411da177e4SLinus Torvalds 14426922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 14434081f744SMatthew Wilcox (Oracle) put_swap_folio(folio, swap); 14441da177e4SLinus Torvalds redirty: 1445f530ed0eSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1446d9fe526aSHugh Dickins if (wbc->for_reclaim) 1447f530ed0eSMatthew Wilcox (Oracle) return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ 1448f530ed0eSMatthew Wilcox (Oracle) folio_unlock(folio); 1449d9fe526aSHugh Dickins return 0; 14501da177e4SLinus Torvalds } 14511da177e4SLinus Torvalds 145275edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 145371fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1454680d794bSakpm@linux-foundation.org { 1455680d794bSakpm@linux-foundation.org char buffer[64]; 1456680d794bSakpm@linux-foundation.org 145771fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1458095f1fc4SLee Schermerhorn return; /* show nothing */ 1459095f1fc4SLee Schermerhorn 1460a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1461095f1fc4SLee Schermerhorn 1462095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1463680d794bSakpm@linux-foundation.org } 146471fe804bSLee Schermerhorn 146571fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 146671fe804bSLee Schermerhorn { 146771fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 146871fe804bSLee Schermerhorn if (sbinfo->mpol) { 1469bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 147071fe804bSLee Schermerhorn mpol = sbinfo->mpol; 147171fe804bSLee Schermerhorn mpol_get(mpol); 1472bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 147371fe804bSLee Schermerhorn } 147471fe804bSLee Schermerhorn return mpol; 147571fe804bSLee Schermerhorn } 147675edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 147775edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 147875edd345SHugh Dickins { 147975edd345SHugh Dickins } 148075edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 148175edd345SHugh Dickins { 148275edd345SHugh Dickins return NULL; 148375edd345SHugh Dickins } 148475edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 148575edd345SHugh Dickins #ifndef CONFIG_NUMA 148675edd345SHugh Dickins #define vm_policy vm_private_data 148775edd345SHugh Dickins #endif 1488680d794bSakpm@linux-foundation.org 1489800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1490800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1491800d8c63SKirill A. Shutemov { 1492800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 14932c4541e2SKirill A. Shutemov vma_init(vma, NULL); 1494800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1495800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1496800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1497800d8c63SKirill A. Shutemov } 1498800d8c63SKirill A. Shutemov 1499800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1500800d8c63SKirill A. Shutemov { 1501800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1502800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1503800d8c63SKirill A. Shutemov } 1504800d8c63SKirill A. Shutemov 15055739a81cSMatthew Wilcox (Oracle) static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp, 150641ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 15071da177e4SLinus Torvalds { 15081da177e4SLinus Torvalds struct vm_area_struct pvma; 150918a2f371SMel Gorman struct page *page; 15108c63ca5bSWill Deacon struct vm_fault vmf = { 15118c63ca5bSWill Deacon .vma = &pvma, 15128c63ca5bSWill Deacon }; 15131da177e4SLinus Torvalds 1514800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1515e9e9b7ecSMinchan Kim page = swap_cluster_readahead(swap, gfp, &vmf); 1516800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 151718a2f371SMel Gorman 15185739a81cSMatthew Wilcox (Oracle) if (!page) 15195739a81cSMatthew Wilcox (Oracle) return NULL; 15205739a81cSMatthew Wilcox (Oracle) return page_folio(page); 1521800d8c63SKirill A. Shutemov } 152218a2f371SMel Gorman 152378cc8cdcSRik van Riel /* 152478cc8cdcSRik van Riel * Make sure huge_gfp is always more limited than limit_gfp. 152578cc8cdcSRik van Riel * Some of the flags set permissions, while others set limitations. 152678cc8cdcSRik van Riel */ 152778cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) 152878cc8cdcSRik van Riel { 152978cc8cdcSRik van Riel gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 153078cc8cdcSRik van Riel gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; 1531187df5ddSRik van Riel gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; 1532187df5ddSRik van Riel gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); 1533187df5ddSRik van Riel 1534187df5ddSRik van Riel /* Allow allocations only from the originally specified zones. */ 1535187df5ddSRik van Riel result |= zoneflags; 153678cc8cdcSRik van Riel 153778cc8cdcSRik van Riel /* 153878cc8cdcSRik van Riel * Minimize the result gfp by taking the union with the deny flags, 153978cc8cdcSRik van Riel * and the intersection of the allow flags. 154078cc8cdcSRik van Riel */ 154178cc8cdcSRik van Riel result |= (limit_gfp & denyflags); 154278cc8cdcSRik van Riel result |= (huge_gfp & limit_gfp) & allowflags; 154378cc8cdcSRik van Riel 154478cc8cdcSRik van Riel return result; 154578cc8cdcSRik van Riel } 154678cc8cdcSRik van Riel 154772827e5cSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_hugefolio(gfp_t gfp, 1548800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1549800d8c63SKirill A. Shutemov { 1550800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 15517b8d046fSMatthew Wilcox struct address_space *mapping = info->vfs_inode.i_mapping; 15527b8d046fSMatthew Wilcox pgoff_t hindex; 1553dfe98499SMatthew Wilcox (Oracle) struct folio *folio; 1554800d8c63SKirill A. Shutemov 15554620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 15567b8d046fSMatthew Wilcox if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 15577b8d046fSMatthew Wilcox XA_PRESENT)) 1558800d8c63SKirill A. Shutemov return NULL; 1559800d8c63SKirill A. Shutemov 1560800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1561dfe98499SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); 1562800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1563dfe98499SMatthew Wilcox (Oracle) if (!folio) 1564dcdf11eeSDavid Rientjes count_vm_event(THP_FILE_FALLBACK); 156572827e5cSMatthew Wilcox (Oracle) return folio; 156618a2f371SMel Gorman } 156718a2f371SMel Gorman 15680c023ef5SMatthew Wilcox (Oracle) static struct folio *shmem_alloc_folio(gfp_t gfp, 156918a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 157018a2f371SMel Gorman { 157118a2f371SMel Gorman struct vm_area_struct pvma; 15720c023ef5SMatthew Wilcox (Oracle) struct folio *folio; 157318a2f371SMel Gorman 1574800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 15750c023ef5SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, 0, &pvma, 0, false); 1576800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 157718a2f371SMel Gorman 15780c023ef5SMatthew Wilcox (Oracle) return folio; 157918a2f371SMel Gorman } 158018a2f371SMel Gorman 1581b1d0ec3aSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, 1582800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1583800d8c63SKirill A. Shutemov { 15840f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 158572827e5cSMatthew Wilcox (Oracle) struct folio *folio; 1586800d8c63SKirill A. Shutemov int nr; 1587800d8c63SKirill A. Shutemov int err = -ENOSPC; 1588800d8c63SKirill A. Shutemov 1589396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1590800d8c63SKirill A. Shutemov huge = false; 1591800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1592800d8c63SKirill A. Shutemov 15930f079694SMike Rapoport if (!shmem_inode_acct_block(inode, nr)) 1594800d8c63SKirill A. Shutemov goto failed; 1595800d8c63SKirill A. Shutemov 1596800d8c63SKirill A. Shutemov if (huge) 159772827e5cSMatthew Wilcox (Oracle) folio = shmem_alloc_hugefolio(gfp, info, index); 1598800d8c63SKirill A. Shutemov else 159972827e5cSMatthew Wilcox (Oracle) folio = shmem_alloc_folio(gfp, info, index); 160072827e5cSMatthew Wilcox (Oracle) if (folio) { 160172827e5cSMatthew Wilcox (Oracle) __folio_set_locked(folio); 160272827e5cSMatthew Wilcox (Oracle) __folio_set_swapbacked(folio); 1603b1d0ec3aSMatthew Wilcox (Oracle) return folio; 160475edd345SHugh Dickins } 160518a2f371SMel Gorman 1606800d8c63SKirill A. Shutemov err = -ENOMEM; 16070f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr); 1608800d8c63SKirill A. Shutemov failed: 1609800d8c63SKirill A. Shutemov return ERR_PTR(err); 16101da177e4SLinus Torvalds } 161171fe804bSLee Schermerhorn 16121da177e4SLinus Torvalds /* 1613bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1614fc26babbSMatthew Wilcox (Oracle) * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of 1615bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1616bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1617bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1618bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1619bde05d1cSHugh Dickins * 1620bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1621bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1622bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1623bde05d1cSHugh Dickins */ 1624069d849cSMatthew Wilcox (Oracle) static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) 1625bde05d1cSHugh Dickins { 1626069d849cSMatthew Wilcox (Oracle) return folio_zonenum(folio) > gfp_zone(gfp); 1627bde05d1cSHugh Dickins } 1628bde05d1cSHugh Dickins 16290d698e25SMatthew Wilcox (Oracle) static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, 1630bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1631bde05d1cSHugh Dickins { 1632d21bba2bSMatthew Wilcox (Oracle) struct folio *old, *new; 1633bde05d1cSHugh Dickins struct address_space *swap_mapping; 1634c1cb20d4SYu Zhao swp_entry_t entry; 1635bde05d1cSHugh Dickins pgoff_t swap_index; 1636bde05d1cSHugh Dickins int error; 1637bde05d1cSHugh Dickins 16380d698e25SMatthew Wilcox (Oracle) old = *foliop; 1639907ea17eSMatthew Wilcox (Oracle) entry = folio_swap_entry(old); 1640c1cb20d4SYu Zhao swap_index = swp_offset(entry); 1641907ea17eSMatthew Wilcox (Oracle) swap_mapping = swap_address_space(entry); 1642bde05d1cSHugh Dickins 1643bde05d1cSHugh Dickins /* 1644bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1645bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1646bde05d1cSHugh Dickins */ 1647bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1648907ea17eSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(old), old); 1649907ea17eSMatthew Wilcox (Oracle) new = shmem_alloc_folio(gfp, info, index); 1650907ea17eSMatthew Wilcox (Oracle) if (!new) 1651bde05d1cSHugh Dickins return -ENOMEM; 1652bde05d1cSHugh Dickins 1653907ea17eSMatthew Wilcox (Oracle) folio_get(new); 1654907ea17eSMatthew Wilcox (Oracle) folio_copy(new, old); 1655907ea17eSMatthew Wilcox (Oracle) flush_dcache_folio(new); 1656bde05d1cSHugh Dickins 1657907ea17eSMatthew Wilcox (Oracle) __folio_set_locked(new); 1658907ea17eSMatthew Wilcox (Oracle) __folio_set_swapbacked(new); 1659907ea17eSMatthew Wilcox (Oracle) folio_mark_uptodate(new); 1660907ea17eSMatthew Wilcox (Oracle) folio_set_swap_entry(new, entry); 1661907ea17eSMatthew Wilcox (Oracle) folio_set_swapcache(new); 1662bde05d1cSHugh Dickins 1663bde05d1cSHugh Dickins /* 1664bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1665bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1666bde05d1cSHugh Dickins */ 1667b93b0163SMatthew Wilcox xa_lock_irq(&swap_mapping->i_pages); 1668907ea17eSMatthew Wilcox (Oracle) error = shmem_replace_entry(swap_mapping, swap_index, old, new); 16690142ef6cSHugh Dickins if (!error) { 1670d21bba2bSMatthew Wilcox (Oracle) mem_cgroup_migrate(old, new); 1671907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); 1672907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new, NR_SHMEM, 1); 1673907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); 1674907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(old, NR_SHMEM, -1); 16750142ef6cSHugh Dickins } 1676b93b0163SMatthew Wilcox xa_unlock_irq(&swap_mapping->i_pages); 1677bde05d1cSHugh Dickins 16780142ef6cSHugh Dickins if (unlikely(error)) { 16790142ef6cSHugh Dickins /* 16800142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 16810142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 16820142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 16830142ef6cSHugh Dickins */ 1684907ea17eSMatthew Wilcox (Oracle) old = new; 16850142ef6cSHugh Dickins } else { 1686907ea17eSMatthew Wilcox (Oracle) folio_add_lru(new); 16870d698e25SMatthew Wilcox (Oracle) *foliop = new; 16880142ef6cSHugh Dickins } 1689bde05d1cSHugh Dickins 1690907ea17eSMatthew Wilcox (Oracle) folio_clear_swapcache(old); 1691907ea17eSMatthew Wilcox (Oracle) old->private = NULL; 1692bde05d1cSHugh Dickins 1693907ea17eSMatthew Wilcox (Oracle) folio_unlock(old); 1694907ea17eSMatthew Wilcox (Oracle) folio_put_refs(old, 2); 16950142ef6cSHugh Dickins return error; 1696bde05d1cSHugh Dickins } 1697bde05d1cSHugh Dickins 16986cec2b95SMiaohe Lin static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, 16996cec2b95SMiaohe Lin struct folio *folio, swp_entry_t swap) 17006cec2b95SMiaohe Lin { 17016cec2b95SMiaohe Lin struct address_space *mapping = inode->i_mapping; 17026cec2b95SMiaohe Lin struct shmem_inode_info *info = SHMEM_I(inode); 17036cec2b95SMiaohe Lin swp_entry_t swapin_error; 17046cec2b95SMiaohe Lin void *old; 17056cec2b95SMiaohe Lin 170615520a3fSPeter Xu swapin_error = make_swapin_error_entry(); 17076cec2b95SMiaohe Lin old = xa_cmpxchg_irq(&mapping->i_pages, index, 17086cec2b95SMiaohe Lin swp_to_radix_entry(swap), 17096cec2b95SMiaohe Lin swp_to_radix_entry(swapin_error), 0); 17106cec2b95SMiaohe Lin if (old != swp_to_radix_entry(swap)) 17116cec2b95SMiaohe Lin return; 17126cec2b95SMiaohe Lin 17136cec2b95SMiaohe Lin folio_wait_writeback(folio); 171475fa68a5SMatthew Wilcox (Oracle) delete_from_swap_cache(folio); 17156cec2b95SMiaohe Lin spin_lock_irq(&info->lock); 17166cec2b95SMiaohe Lin /* 17176cec2b95SMiaohe Lin * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't 17186cec2b95SMiaohe Lin * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in 17196cec2b95SMiaohe Lin * shmem_evict_inode. 17206cec2b95SMiaohe Lin */ 17216cec2b95SMiaohe Lin info->alloced--; 17226cec2b95SMiaohe Lin info->swapped--; 17236cec2b95SMiaohe Lin shmem_recalc_inode(inode); 17246cec2b95SMiaohe Lin spin_unlock_irq(&info->lock); 17256cec2b95SMiaohe Lin swap_free(swap); 17266cec2b95SMiaohe Lin } 17276cec2b95SMiaohe Lin 1728bde05d1cSHugh Dickins /* 1729833de10fSMiaohe Lin * Swap in the folio pointed to by *foliop. 1730833de10fSMiaohe Lin * Caller has to make sure that *foliop contains a valid swapped folio. 1731833de10fSMiaohe Lin * Returns 0 and the folio in foliop if success. On failure, returns the 1732833de10fSMiaohe Lin * error code and NULL in *foliop. 17331da177e4SLinus Torvalds */ 1734da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 1735da08e9b7SMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, 1736c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 17372b740303SSouptick Joarder vm_fault_t *fault_type) 17381da177e4SLinus Torvalds { 17391da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 174023f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 174104f94e3fSDan Schatzberg struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; 1742*cbc2bd98SKairui Song struct swap_info_struct *si; 1743da08e9b7SMatthew Wilcox (Oracle) struct folio *folio = NULL; 17441da177e4SLinus Torvalds swp_entry_t swap; 17451da177e4SLinus Torvalds int error; 17461da177e4SLinus Torvalds 1747da08e9b7SMatthew Wilcox (Oracle) VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); 1748da08e9b7SMatthew Wilcox (Oracle) swap = radix_to_swp_entry(*foliop); 1749da08e9b7SMatthew Wilcox (Oracle) *foliop = NULL; 175054af6042SHugh Dickins 17516cec2b95SMiaohe Lin if (is_swapin_error_entry(swap)) 17526cec2b95SMiaohe Lin return -EIO; 17536cec2b95SMiaohe Lin 1754*cbc2bd98SKairui Song si = get_swap_device(swap); 1755*cbc2bd98SKairui Song if (!si) { 1756*cbc2bd98SKairui Song if (!shmem_confirm_swap(mapping, index, swap)) 1757*cbc2bd98SKairui Song return -EEXIST; 1758*cbc2bd98SKairui Song else 1759*cbc2bd98SKairui Song return -EINVAL; 1760*cbc2bd98SKairui Song } 1761*cbc2bd98SKairui Song 17621da177e4SLinus Torvalds /* Look it up and read it in.. */ 17635739a81cSMatthew Wilcox (Oracle) folio = swap_cache_get_folio(swap, NULL, 0); 17645739a81cSMatthew Wilcox (Oracle) if (!folio) { 17659e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 17669e18eb29SAndres Lagar-Cavilla if (fault_type) { 176768da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 17689e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 17692262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT); 17709e18eb29SAndres Lagar-Cavilla } 17719e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 17725739a81cSMatthew Wilcox (Oracle) folio = shmem_swapin(swap, gfp, info, index); 17735739a81cSMatthew Wilcox (Oracle) if (!folio) { 17741da177e4SLinus Torvalds error = -ENOMEM; 177554af6042SHugh Dickins goto failed; 1776285b2c4fSHugh Dickins } 17771da177e4SLinus Torvalds } 17781da177e4SLinus Torvalds 1779833de10fSMiaohe Lin /* We have to do this with folio locked to prevent races */ 1780da08e9b7SMatthew Wilcox (Oracle) folio_lock(folio); 1781da08e9b7SMatthew Wilcox (Oracle) if (!folio_test_swapcache(folio) || 1782da08e9b7SMatthew Wilcox (Oracle) folio_swap_entry(folio).val != swap.val || 1783d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1784c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1785d1899228SHugh Dickins goto unlock; 1786bde05d1cSHugh Dickins } 1787da08e9b7SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 17881da177e4SLinus Torvalds error = -EIO; 178954af6042SHugh Dickins goto failed; 179054af6042SHugh Dickins } 1791da08e9b7SMatthew Wilcox (Oracle) folio_wait_writeback(folio); 179254af6042SHugh Dickins 17938a84802eSSteven Price /* 17948a84802eSSteven Price * Some architectures may have to restore extra metadata to the 1795da08e9b7SMatthew Wilcox (Oracle) * folio after reading from swap. 17968a84802eSSteven Price */ 1797da08e9b7SMatthew Wilcox (Oracle) arch_swap_restore(swap, folio); 17988a84802eSSteven Price 1799069d849cSMatthew Wilcox (Oracle) if (shmem_should_replace_folio(folio, gfp)) { 18000d698e25SMatthew Wilcox (Oracle) error = shmem_replace_folio(&folio, gfp, info, index); 1801bde05d1cSHugh Dickins if (error) 180254af6042SHugh Dickins goto failed; 18031da177e4SLinus Torvalds } 18041da177e4SLinus Torvalds 1805b7dd44a1SMatthew Wilcox (Oracle) error = shmem_add_to_page_cache(folio, mapping, index, 18063fea5a49SJohannes Weiner swp_to_radix_entry(swap), gfp, 18073fea5a49SJohannes Weiner charge_mm); 180854af6042SHugh Dickins if (error) 180954af6042SHugh Dickins goto failed; 181054af6042SHugh Dickins 18114595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 181254af6042SHugh Dickins info->swapped--; 181354af6042SHugh Dickins shmem_recalc_inode(inode); 18144595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 181527ab7006SHugh Dickins 181666d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1817da08e9b7SMatthew Wilcox (Oracle) folio_mark_accessed(folio); 181866d2f4d2SHugh Dickins 181975fa68a5SMatthew Wilcox (Oracle) delete_from_swap_cache(folio); 1820da08e9b7SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 182127ab7006SHugh Dickins swap_free(swap); 1822*cbc2bd98SKairui Song put_swap_device(si); 182327ab7006SHugh Dickins 1824da08e9b7SMatthew Wilcox (Oracle) *foliop = folio; 1825c5bf121eSVineeth Remanan Pillai return 0; 1826c5bf121eSVineeth Remanan Pillai failed: 1827c5bf121eSVineeth Remanan Pillai if (!shmem_confirm_swap(mapping, index, swap)) 1828c5bf121eSVineeth Remanan Pillai error = -EEXIST; 18296cec2b95SMiaohe Lin if (error == -EIO) 18306cec2b95SMiaohe Lin shmem_set_folio_swapin_error(inode, index, folio, swap); 1831c5bf121eSVineeth Remanan Pillai unlock: 1832da08e9b7SMatthew Wilcox (Oracle) if (folio) { 1833da08e9b7SMatthew Wilcox (Oracle) folio_unlock(folio); 1834da08e9b7SMatthew Wilcox (Oracle) folio_put(folio); 1835c5bf121eSVineeth Remanan Pillai } 1836*cbc2bd98SKairui Song put_swap_device(si); 1837c5bf121eSVineeth Remanan Pillai 1838c5bf121eSVineeth Remanan Pillai return error; 1839c5bf121eSVineeth Remanan Pillai } 1840c5bf121eSVineeth Remanan Pillai 1841c5bf121eSVineeth Remanan Pillai /* 1842fc26babbSMatthew Wilcox (Oracle) * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate 1843c5bf121eSVineeth Remanan Pillai * 1844c5bf121eSVineeth Remanan Pillai * If we allocate a new one we do not mark it dirty. That's up to the 1845c5bf121eSVineeth Remanan Pillai * vm. If we swap it in we mark it dirty since we also free the swap 1846c5bf121eSVineeth Remanan Pillai * entry since a page cannot live in both the swap and page cache. 1847c5bf121eSVineeth Remanan Pillai * 1848c949b097SAxel Rasmussen * vma, vmf, and fault_type are only supplied by shmem_fault: 1849c5bf121eSVineeth Remanan Pillai * otherwise they are NULL. 1850c5bf121eSVineeth Remanan Pillai */ 1851fc26babbSMatthew Wilcox (Oracle) static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, 1852fc26babbSMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, gfp_t gfp, 1853c5bf121eSVineeth Remanan Pillai struct vm_area_struct *vma, struct vm_fault *vmf, 1854c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type) 1855c5bf121eSVineeth Remanan Pillai { 1856c5bf121eSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1857c5bf121eSVineeth Remanan Pillai struct shmem_inode_info *info = SHMEM_I(inode); 1858c5bf121eSVineeth Remanan Pillai struct shmem_sb_info *sbinfo; 1859c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm; 1860b7dd44a1SMatthew Wilcox (Oracle) struct folio *folio; 18616fe7d712SLukas Bulwahn pgoff_t hindex; 1862164cc4feSRik van Riel gfp_t huge_gfp; 1863c5bf121eSVineeth Remanan Pillai int error; 1864c5bf121eSVineeth Remanan Pillai int once = 0; 1865c5bf121eSVineeth Remanan Pillai int alloced = 0; 1866c5bf121eSVineeth Remanan Pillai 1867c5bf121eSVineeth Remanan Pillai if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 1868c5bf121eSVineeth Remanan Pillai return -EFBIG; 1869c5bf121eSVineeth Remanan Pillai repeat: 1870c5bf121eSVineeth Remanan Pillai if (sgp <= SGP_CACHE && 1871c5bf121eSVineeth Remanan Pillai ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1872c5bf121eSVineeth Remanan Pillai return -EINVAL; 1873c5bf121eSVineeth Remanan Pillai } 1874c5bf121eSVineeth Remanan Pillai 1875c5bf121eSVineeth Remanan Pillai sbinfo = SHMEM_SB(inode->i_sb); 187604f94e3fSDan Schatzberg charge_mm = vma ? vma->vm_mm : NULL; 1877c5bf121eSVineeth Remanan Pillai 1878b1d0ec3aSMatthew Wilcox (Oracle) folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0); 1879b1d0ec3aSMatthew Wilcox (Oracle) if (folio && vma && userfaultfd_minor(vma)) { 1880b1d0ec3aSMatthew Wilcox (Oracle) if (!xa_is_value(folio)) { 1881b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 1882b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 1883c949b097SAxel Rasmussen } 1884c949b097SAxel Rasmussen *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 1885c949b097SAxel Rasmussen return 0; 1886c949b097SAxel Rasmussen } 1887c949b097SAxel Rasmussen 1888b1d0ec3aSMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 1889da08e9b7SMatthew Wilcox (Oracle) error = shmem_swapin_folio(inode, index, &folio, 1890c5bf121eSVineeth Remanan Pillai sgp, gfp, vma, fault_type); 1891c5bf121eSVineeth Remanan Pillai if (error == -EEXIST) 1892c5bf121eSVineeth Remanan Pillai goto repeat; 1893c5bf121eSVineeth Remanan Pillai 1894fc26babbSMatthew Wilcox (Oracle) *foliop = folio; 1895c5bf121eSVineeth Remanan Pillai return error; 1896c5bf121eSVineeth Remanan Pillai } 1897c5bf121eSVineeth Remanan Pillai 1898b1d0ec3aSMatthew Wilcox (Oracle) if (folio) { 1899acdd9f8eSHugh Dickins if (sgp == SGP_WRITE) 1900b1d0ec3aSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 1901b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 1902acdd9f8eSHugh Dickins goto out; 1903fc26babbSMatthew Wilcox (Oracle) /* fallocated folio */ 1904c5bf121eSVineeth Remanan Pillai if (sgp != SGP_READ) 1905c5bf121eSVineeth Remanan Pillai goto clear; 1906b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 1907b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 1908c5bf121eSVineeth Remanan Pillai } 1909c5bf121eSVineeth Remanan Pillai 1910c5bf121eSVineeth Remanan Pillai /* 1911fc26babbSMatthew Wilcox (Oracle) * SGP_READ: succeed on hole, with NULL folio, letting caller zero. 1912fc26babbSMatthew Wilcox (Oracle) * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail. 1913acdd9f8eSHugh Dickins */ 1914fc26babbSMatthew Wilcox (Oracle) *foliop = NULL; 1915acdd9f8eSHugh Dickins if (sgp == SGP_READ) 1916acdd9f8eSHugh Dickins return 0; 1917acdd9f8eSHugh Dickins if (sgp == SGP_NOALLOC) 1918acdd9f8eSHugh Dickins return -ENOENT; 1919acdd9f8eSHugh Dickins 1920acdd9f8eSHugh Dickins /* 1921acdd9f8eSHugh Dickins * Fast cache lookup and swap lookup did not find it: allocate. 1922c5bf121eSVineeth Remanan Pillai */ 1923c5bf121eSVineeth Remanan Pillai 1924cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) { 1925cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1926cfda0526SMike Rapoport return 0; 1927cfda0526SMike Rapoport } 1928cfda0526SMike Rapoport 19297c6c6cc4SZach O'Keefe if (!shmem_is_huge(vma, inode, index, false)) 1930800d8c63SKirill A. Shutemov goto alloc_nohuge; 193127d80fa2SKees Cook 1932164cc4feSRik van Riel huge_gfp = vma_thp_gfp_mask(vma); 193378cc8cdcSRik van Riel huge_gfp = limit_gfp_mask(huge_gfp, gfp); 1934b1d0ec3aSMatthew Wilcox (Oracle) folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true); 1935b1d0ec3aSMatthew Wilcox (Oracle) if (IS_ERR(folio)) { 1936c5bf121eSVineeth Remanan Pillai alloc_nohuge: 1937b1d0ec3aSMatthew Wilcox (Oracle) folio = shmem_alloc_and_acct_folio(gfp, inode, index, false); 193854af6042SHugh Dickins } 1939b1d0ec3aSMatthew Wilcox (Oracle) if (IS_ERR(folio)) { 1940779750d2SKirill A. Shutemov int retry = 5; 1941c5bf121eSVineeth Remanan Pillai 1942b1d0ec3aSMatthew Wilcox (Oracle) error = PTR_ERR(folio); 1943b1d0ec3aSMatthew Wilcox (Oracle) folio = NULL; 1944779750d2SKirill A. Shutemov if (error != -ENOSPC) 1945c5bf121eSVineeth Remanan Pillai goto unlock; 1946779750d2SKirill A. Shutemov /* 1947fc26babbSMatthew Wilcox (Oracle) * Try to reclaim some space by splitting a large folio 1948779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1949779750d2SKirill A. Shutemov */ 1950779750d2SKirill A. Shutemov while (retry--) { 1951779750d2SKirill A. Shutemov int ret; 1952c5bf121eSVineeth Remanan Pillai 1953779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1954779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1955779750d2SKirill A. Shutemov break; 1956779750d2SKirill A. Shutemov if (ret) 1957779750d2SKirill A. Shutemov goto alloc_nohuge; 1958779750d2SKirill A. Shutemov } 1959c5bf121eSVineeth Remanan Pillai goto unlock; 1960800d8c63SKirill A. Shutemov } 1961800d8c63SKirill A. Shutemov 1962b1d0ec3aSMatthew Wilcox (Oracle) hindex = round_down(index, folio_nr_pages(folio)); 1963800d8c63SKirill A. Shutemov 196466d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1965b1d0ec3aSMatthew Wilcox (Oracle) __folio_set_referenced(folio); 196666d2f4d2SHugh Dickins 1967b7dd44a1SMatthew Wilcox (Oracle) error = shmem_add_to_page_cache(folio, mapping, hindex, 19683fea5a49SJohannes Weiner NULL, gfp & GFP_RECLAIM_MASK, 19693fea5a49SJohannes Weiner charge_mm); 19703fea5a49SJohannes Weiner if (error) 1971800d8c63SKirill A. Shutemov goto unacct; 1972b1d0ec3aSMatthew Wilcox (Oracle) folio_add_lru(folio); 197354af6042SHugh Dickins 19744595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1975b1d0ec3aSMatthew Wilcox (Oracle) info->alloced += folio_nr_pages(folio); 1976fa020a2bSAndrew Morton inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio); 197754af6042SHugh Dickins shmem_recalc_inode(inode); 19784595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 19791635f6a7SHugh Dickins alloced = true; 198054af6042SHugh Dickins 1981b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio) && 1982779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1983fc26babbSMatthew Wilcox (Oracle) folio_next_index(folio) - 1) { 1984779750d2SKirill A. Shutemov /* 1985fc26babbSMatthew Wilcox (Oracle) * Part of the large folio is beyond i_size: subject 1986779750d2SKirill A. Shutemov * to shrink under memory pressure. 1987779750d2SKirill A. Shutemov */ 1988779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1989d041353dSCong Wang /* 1990d041353dSCong Wang * _careful to defend against unlocked access to 1991d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1992d041353dSCong Wang */ 1993d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1994779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1995779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1996779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1997779750d2SKirill A. Shutemov } 1998779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1999779750d2SKirill A. Shutemov } 2000779750d2SKirill A. Shutemov 2001ec9516fbSHugh Dickins /* 2002fc26babbSMatthew Wilcox (Oracle) * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. 20031635f6a7SHugh Dickins */ 20041635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 20051635f6a7SHugh Dickins sgp = SGP_WRITE; 20061635f6a7SHugh Dickins clear: 20071635f6a7SHugh Dickins /* 2008fc26babbSMatthew Wilcox (Oracle) * Let SGP_WRITE caller clear ends if write does not fill folio; 2009fc26babbSMatthew Wilcox (Oracle) * but SGP_FALLOC on a folio fallocated earlier must initialize 20101635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 2011ec9516fbSHugh Dickins */ 2012b1d0ec3aSMatthew Wilcox (Oracle) if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) { 2013b1d0ec3aSMatthew Wilcox (Oracle) long i, n = folio_nr_pages(folio); 2014800d8c63SKirill A. Shutemov 2015b1d0ec3aSMatthew Wilcox (Oracle) for (i = 0; i < n; i++) 2016b1d0ec3aSMatthew Wilcox (Oracle) clear_highpage(folio_page(folio, i)); 2017b1d0ec3aSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 2018b1d0ec3aSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 2019ec9516fbSHugh Dickins } 2020bde05d1cSHugh Dickins 202154af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 202275edd345SHugh Dickins if (sgp <= SGP_CACHE && 202309cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2024267a4c76SHugh Dickins if (alloced) { 2025b1d0ec3aSMatthew Wilcox (Oracle) folio_clear_dirty(folio); 2026b1d0ec3aSMatthew Wilcox (Oracle) filemap_remove_folio(folio); 20274595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 2028267a4c76SHugh Dickins shmem_recalc_inode(inode); 20294595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 2030267a4c76SHugh Dickins } 203154af6042SHugh Dickins error = -EINVAL; 2032267a4c76SHugh Dickins goto unlock; 2033ff36b801SShaohua Li } 203463ec1973SMatthew Wilcox (Oracle) out: 2035fc26babbSMatthew Wilcox (Oracle) *foliop = folio; 203654af6042SHugh Dickins return 0; 2037d00806b1SNick Piggin 2038d0217ac0SNick Piggin /* 203954af6042SHugh Dickins * Error recovery. 20401da177e4SLinus Torvalds */ 204154af6042SHugh Dickins unacct: 2042b1d0ec3aSMatthew Wilcox (Oracle) shmem_inode_unacct_blocks(inode, folio_nr_pages(folio)); 2043800d8c63SKirill A. Shutemov 2044b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 2045b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 2046b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 2047800d8c63SKirill A. Shutemov goto alloc_nohuge; 2048800d8c63SKirill A. Shutemov } 2049d1899228SHugh Dickins unlock: 2050b1d0ec3aSMatthew Wilcox (Oracle) if (folio) { 2051b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 2052b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 205354af6042SHugh Dickins } 205454af6042SHugh Dickins if (error == -ENOSPC && !once++) { 20554595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 205654af6042SHugh Dickins shmem_recalc_inode(inode); 20574595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 20581da177e4SLinus Torvalds goto repeat; 2059d8dc74f2SAdrian Bunk } 20607f4446eeSMatthew Wilcox if (error == -EEXIST) 206154af6042SHugh Dickins goto repeat; 206254af6042SHugh Dickins return error; 20631da177e4SLinus Torvalds } 20641da177e4SLinus Torvalds 20654e1fc793SMatthew Wilcox (Oracle) int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, 20664e1fc793SMatthew Wilcox (Oracle) enum sgp_type sgp) 20674e1fc793SMatthew Wilcox (Oracle) { 20684e1fc793SMatthew Wilcox (Oracle) return shmem_get_folio_gfp(inode, index, foliop, sgp, 20694e1fc793SMatthew Wilcox (Oracle) mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 20704e1fc793SMatthew Wilcox (Oracle) } 20714e1fc793SMatthew Wilcox (Oracle) 207210d20bd2SLinus Torvalds /* 207310d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 207410d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 207510d20bd2SLinus Torvalds * target. 207610d20bd2SLinus Torvalds */ 2077ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 207810d20bd2SLinus Torvalds { 207910d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 20802055da97SIngo Molnar list_del_init(&wait->entry); 208110d20bd2SLinus Torvalds return ret; 208210d20bd2SLinus Torvalds } 208310d20bd2SLinus Torvalds 208420acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf) 20851da177e4SLinus Torvalds { 208611bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 2087496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 20889e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 208968a54100SMatthew Wilcox (Oracle) struct folio *folio = NULL; 209020acce67SSouptick Joarder int err; 209120acce67SSouptick Joarder vm_fault_t ret = VM_FAULT_LOCKED; 20921da177e4SLinus Torvalds 2093f00cdc6dSHugh Dickins /* 2094f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 2095f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 20969608703eSJan Kara * locks writers out with its hold on i_rwsem. So refrain from 20978e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 20988e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 20998e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 21008e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 21018e205f77SHugh Dickins * 21028e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 21038e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 21048e205f77SHugh Dickins * we just need to make racing faults a rare case. 21058e205f77SHugh Dickins * 21068e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 21079608703eSJan Kara * standard mutex or completion: but we cannot take i_rwsem in fault, 21088e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 2109f00cdc6dSHugh Dickins */ 2110f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 2111f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 2112f00cdc6dSHugh Dickins 2113f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2114f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 21158e205f77SHugh Dickins if (shmem_falloc && 21168e205f77SHugh Dickins shmem_falloc->waitq && 21178e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 21188e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 21198897c1b1SKirill A. Shutemov struct file *fpin; 21208e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 212110d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 21228e205f77SHugh Dickins 21238e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 21248897c1b1SKirill A. Shutemov fpin = maybe_unlock_mmap_for_io(vmf, NULL); 21258897c1b1SKirill A. Shutemov if (fpin) 21268e205f77SHugh Dickins ret = VM_FAULT_RETRY; 21278e205f77SHugh Dickins 21288e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 21298e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 21308e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 21318e205f77SHugh Dickins spin_unlock(&inode->i_lock); 21328e205f77SHugh Dickins schedule(); 21338e205f77SHugh Dickins 21348e205f77SHugh Dickins /* 21358e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 21368e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 21378e205f77SHugh Dickins * is usually invalid by the time we reach here, but 21388e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 21398e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 21408e205f77SHugh Dickins */ 21418e205f77SHugh Dickins spin_lock(&inode->i_lock); 21428e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 21438e205f77SHugh Dickins spin_unlock(&inode->i_lock); 21448897c1b1SKirill A. Shutemov 21458897c1b1SKirill A. Shutemov if (fpin) 21468897c1b1SKirill A. Shutemov fput(fpin); 21478e205f77SHugh Dickins return ret; 2148f00cdc6dSHugh Dickins } 21498e205f77SHugh Dickins spin_unlock(&inode->i_lock); 2150f00cdc6dSHugh Dickins } 2151f00cdc6dSHugh Dickins 215268a54100SMatthew Wilcox (Oracle) err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, 2153cfda0526SMike Rapoport gfp, vma, vmf, &ret); 215420acce67SSouptick Joarder if (err) 215520acce67SSouptick Joarder return vmf_error(err); 215668a54100SMatthew Wilcox (Oracle) if (folio) 215768a54100SMatthew Wilcox (Oracle) vmf->page = folio_file_page(folio, vmf->pgoff); 215868da9f05SHugh Dickins return ret; 21591da177e4SLinus Torvalds } 21601da177e4SLinus Torvalds 2161c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 2162c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 2163c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 2164c01d5b30SHugh Dickins { 2165c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 2166c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 2167c01d5b30SHugh Dickins unsigned long addr; 2168c01d5b30SHugh Dickins unsigned long offset; 2169c01d5b30SHugh Dickins unsigned long inflated_len; 2170c01d5b30SHugh Dickins unsigned long inflated_addr; 2171c01d5b30SHugh Dickins unsigned long inflated_offset; 2172c01d5b30SHugh Dickins 2173c01d5b30SHugh Dickins if (len > TASK_SIZE) 2174c01d5b30SHugh Dickins return -ENOMEM; 2175c01d5b30SHugh Dickins 2176c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 2177c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 2178c01d5b30SHugh Dickins 2179396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2180c01d5b30SHugh Dickins return addr; 2181c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 2182c01d5b30SHugh Dickins return addr; 2183c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 2184c01d5b30SHugh Dickins return addr; 2185c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2186c01d5b30SHugh Dickins return addr; 2187c01d5b30SHugh Dickins 2188c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2189c01d5b30SHugh Dickins return addr; 2190c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2191c01d5b30SHugh Dickins return addr; 2192c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2193c01d5b30SHugh Dickins return addr; 2194c01d5b30SHugh Dickins /* 2195c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2196c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 219799158997SKirill A. Shutemov * But if caller specified an address hint and we allocated area there 219899158997SKirill A. Shutemov * successfully, respect that as before. 2199c01d5b30SHugh Dickins */ 220099158997SKirill A. Shutemov if (uaddr == addr) 2201c01d5b30SHugh Dickins return addr; 2202c01d5b30SHugh Dickins 2203c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2204c01d5b30SHugh Dickins struct super_block *sb; 2205c01d5b30SHugh Dickins 2206c01d5b30SHugh Dickins if (file) { 2207c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2208c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2209c01d5b30SHugh Dickins } else { 2210c01d5b30SHugh Dickins /* 2211c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2212c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2213c01d5b30SHugh Dickins */ 2214c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2215c01d5b30SHugh Dickins return addr; 2216c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2217c01d5b30SHugh Dickins } 22183089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2219c01d5b30SHugh Dickins return addr; 2220c01d5b30SHugh Dickins } 2221c01d5b30SHugh Dickins 2222c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2223c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2224c01d5b30SHugh Dickins return addr; 2225c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2226c01d5b30SHugh Dickins return addr; 2227c01d5b30SHugh Dickins 2228c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2229c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2230c01d5b30SHugh Dickins return addr; 2231c01d5b30SHugh Dickins if (inflated_len < len) 2232c01d5b30SHugh Dickins return addr; 2233c01d5b30SHugh Dickins 223499158997SKirill A. Shutemov inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2235c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2236c01d5b30SHugh Dickins return addr; 2237c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2238c01d5b30SHugh Dickins return addr; 2239c01d5b30SHugh Dickins 2240c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2241c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2242c01d5b30SHugh Dickins if (inflated_offset > offset) 2243c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2244c01d5b30SHugh Dickins 2245c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2246c01d5b30SHugh Dickins return addr; 2247c01d5b30SHugh Dickins return inflated_addr; 2248c01d5b30SHugh Dickins } 2249c01d5b30SHugh Dickins 22501da177e4SLinus Torvalds #ifdef CONFIG_NUMA 225141ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 22521da177e4SLinus Torvalds { 2253496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 225441ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 22551da177e4SLinus Torvalds } 22561da177e4SLinus Torvalds 2257d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2258d8dc74f2SAdrian Bunk unsigned long addr) 22591da177e4SLinus Torvalds { 2260496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 226141ffe5d5SHugh Dickins pgoff_t index; 22621da177e4SLinus Torvalds 226341ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 226441ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 22651da177e4SLinus Torvalds } 22661da177e4SLinus Torvalds #endif 22671da177e4SLinus Torvalds 2268d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 22691da177e4SLinus Torvalds { 2270496ad9aaSAl Viro struct inode *inode = file_inode(file); 22711da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 22721da177e4SLinus Torvalds int retval = -ENOMEM; 22731da177e4SLinus Torvalds 2274ea0dfeb4SHugh Dickins /* 2275ea0dfeb4SHugh Dickins * What serializes the accesses to info->flags? 2276ea0dfeb4SHugh Dickins * ipc_lock_object() when called from shmctl_do_lock(), 2277ea0dfeb4SHugh Dickins * no serialization needed when called from shm_destroy(). 2278ea0dfeb4SHugh Dickins */ 22791da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 2280d7c9e99aSAlexey Gladkov if (!user_shm_lock(inode->i_size, ucounts)) 22811da177e4SLinus Torvalds goto out_nomem; 22821da177e4SLinus Torvalds info->flags |= VM_LOCKED; 228389e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 22841da177e4SLinus Torvalds } 2285d7c9e99aSAlexey Gladkov if (!lock && (info->flags & VM_LOCKED) && ucounts) { 2286d7c9e99aSAlexey Gladkov user_shm_unlock(inode->i_size, ucounts); 22871da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 228889e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 22891da177e4SLinus Torvalds } 22901da177e4SLinus Torvalds retval = 0; 229189e004eaSLee Schermerhorn 22921da177e4SLinus Torvalds out_nomem: 22931da177e4SLinus Torvalds return retval; 22941da177e4SLinus Torvalds } 22951da177e4SLinus Torvalds 22969b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 22971da177e4SLinus Torvalds { 2298d09e8ca6SPasha Tatashin struct inode *inode = file_inode(file); 2299d09e8ca6SPasha Tatashin struct shmem_inode_info *info = SHMEM_I(inode); 230022247efdSPeter Xu int ret; 2301ab3948f5SJoel Fernandes (Google) 230222247efdSPeter Xu ret = seal_check_future_write(info->seals, vma); 230322247efdSPeter Xu if (ret) 230422247efdSPeter Xu return ret; 2305ab3948f5SJoel Fernandes (Google) 230651b0bff2SCatalin Marinas /* arm64 - allow memory tagging on RAM-based files */ 230751b0bff2SCatalin Marinas vma->vm_flags |= VM_MTE_ALLOWED; 230851b0bff2SCatalin Marinas 23091da177e4SLinus Torvalds file_accessed(file); 2310d09e8ca6SPasha Tatashin /* This is anonymous shared memory if it is unlinked at the time of mmap */ 2311d09e8ca6SPasha Tatashin if (inode->i_nlink) 23121da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2313d09e8ca6SPasha Tatashin else 2314d09e8ca6SPasha Tatashin vma->vm_ops = &shmem_anon_vm_ops; 23151da177e4SLinus Torvalds return 0; 23161da177e4SLinus Torvalds } 23171da177e4SLinus Torvalds 2318cb241339SHugh Dickins #ifdef CONFIG_TMPFS_XATTR 2319cb241339SHugh Dickins static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 2320cb241339SHugh Dickins 2321cb241339SHugh Dickins /* 2322cb241339SHugh Dickins * chattr's fsflags are unrelated to extended attributes, 2323cb241339SHugh Dickins * but tmpfs has chosen to enable them under the same config option. 2324cb241339SHugh Dickins */ 2325cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2326e408e695STheodore Ts'o { 2327cb241339SHugh Dickins unsigned int i_flags = 0; 2328cb241339SHugh Dickins 2329cb241339SHugh Dickins if (fsflags & FS_NOATIME_FL) 2330cb241339SHugh Dickins i_flags |= S_NOATIME; 2331cb241339SHugh Dickins if (fsflags & FS_APPEND_FL) 2332cb241339SHugh Dickins i_flags |= S_APPEND; 2333cb241339SHugh Dickins if (fsflags & FS_IMMUTABLE_FL) 2334cb241339SHugh Dickins i_flags |= S_IMMUTABLE; 2335cb241339SHugh Dickins /* 2336cb241339SHugh Dickins * But FS_NODUMP_FL does not require any action in i_flags. 2337cb241339SHugh Dickins */ 2338cb241339SHugh Dickins inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE); 2339e408e695STheodore Ts'o } 2340cb241339SHugh Dickins #else 2341cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2342cb241339SHugh Dickins { 2343cb241339SHugh Dickins } 2344cb241339SHugh Dickins #define shmem_initxattrs NULL 2345cb241339SHugh Dickins #endif 2346e408e695STheodore Ts'o 2347e408e695STheodore Ts'o static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir, 234809208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 23491da177e4SLinus Torvalds { 23501da177e4SLinus Torvalds struct inode *inode; 23511da177e4SLinus Torvalds struct shmem_inode_info *info; 23521da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2353e809d5f0SChris Down ino_t ino; 23541da177e4SLinus Torvalds 2355e809d5f0SChris Down if (shmem_reserve_inode(sb, &ino)) 23561da177e4SLinus Torvalds return NULL; 23571da177e4SLinus Torvalds 23581da177e4SLinus Torvalds inode = new_inode(sb); 23591da177e4SLinus Torvalds if (inode) { 2360e809d5f0SChris Down inode->i_ino = ino; 236121cb47beSChristian Brauner inode_init_owner(&init_user_ns, inode, dir, mode); 23621da177e4SLinus Torvalds inode->i_blocks = 0; 2363078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 2364a251c17aSJason A. Donenfeld inode->i_generation = get_random_u32(); 23651da177e4SLinus Torvalds info = SHMEM_I(inode); 23661da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 23671da177e4SLinus Torvalds spin_lock_init(&info->lock); 2368af53d3e9SHugh Dickins atomic_set(&info->stop_eviction, 0); 236940e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 23700b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2371f7cd16a5SXavier Roche info->i_crtime = inode->i_mtime; 2372e408e695STheodore Ts'o info->fsflags = (dir == NULL) ? 0 : 2373e408e695STheodore Ts'o SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; 2374cb241339SHugh Dickins if (info->fsflags) 2375cb241339SHugh Dickins shmem_set_inode_flags(inode, info->fsflags); 2376779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 23771da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 237838f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 237972c04902SAl Viro cache_no_acl(inode); 2380ff36da69SMatthew Wilcox (Oracle) mapping_set_large_folios(inode->i_mapping); 23811da177e4SLinus Torvalds 23821da177e4SLinus Torvalds switch (mode & S_IFMT) { 23831da177e4SLinus Torvalds default: 238439f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 23851da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 23861da177e4SLinus Torvalds break; 23871da177e4SLinus Torvalds case S_IFREG: 238814fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 23891da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 23901da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 239171fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 239271fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 23931da177e4SLinus Torvalds break; 23941da177e4SLinus Torvalds case S_IFDIR: 2395d8c76e6fSDave Hansen inc_nlink(inode); 23961da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 23971da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 23981da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 23991da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 24001da177e4SLinus Torvalds break; 24011da177e4SLinus Torvalds case S_IFLNK: 24021da177e4SLinus Torvalds /* 24031da177e4SLinus Torvalds * Must not load anything in the rbtree, 24041da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 24051da177e4SLinus Torvalds */ 240671fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 24071da177e4SLinus Torvalds break; 24081da177e4SLinus Torvalds } 2409b45d71fbSJoel Fernandes (Google) 2410b45d71fbSJoel Fernandes (Google) lockdep_annotate_inode_mutex_key(inode); 24115b04c689SPavel Emelyanov } else 24125b04c689SPavel Emelyanov shmem_free_inode(sb); 24131da177e4SLinus Torvalds return inode; 24141da177e4SLinus Torvalds } 24151da177e4SLinus Torvalds 24163460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD 24173460f6e5SAxel Rasmussen int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 24184c27fe4cSMike Rapoport pmd_t *dst_pmd, 24194c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 24204c27fe4cSMike Rapoport unsigned long dst_addr, 24214c27fe4cSMike Rapoport unsigned long src_addr, 24228ee79edfSPeter Xu bool zeropage, bool wp_copy, 24234c27fe4cSMike Rapoport struct page **pagep) 24244c27fe4cSMike Rapoport { 24254c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 24264c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 24274c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 24284c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 24294c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 24304c27fe4cSMike Rapoport void *page_kaddr; 2431b7dd44a1SMatthew Wilcox (Oracle) struct folio *folio; 24324c27fe4cSMike Rapoport int ret; 24333460f6e5SAxel Rasmussen pgoff_t max_off; 24344c27fe4cSMike Rapoport 24357ed9d238SAxel Rasmussen if (!shmem_inode_acct_block(inode, 1)) { 24367ed9d238SAxel Rasmussen /* 24377ed9d238SAxel Rasmussen * We may have got a page, returned -ENOENT triggering a retry, 24387ed9d238SAxel Rasmussen * and now we find ourselves with -ENOMEM. Release the page, to 24397ed9d238SAxel Rasmussen * avoid a BUG_ON in our caller. 24407ed9d238SAxel Rasmussen */ 24417ed9d238SAxel Rasmussen if (unlikely(*pagep)) { 24427ed9d238SAxel Rasmussen put_page(*pagep); 24437ed9d238SAxel Rasmussen *pagep = NULL; 24447ed9d238SAxel Rasmussen } 24457d64ae3aSAxel Rasmussen return -ENOMEM; 24467ed9d238SAxel Rasmussen } 24474c27fe4cSMike Rapoport 2448cb658a45SAndrea Arcangeli if (!*pagep) { 24497d64ae3aSAxel Rasmussen ret = -ENOMEM; 24507a7256d5SMatthew Wilcox (Oracle) folio = shmem_alloc_folio(gfp, info, pgoff); 24517a7256d5SMatthew Wilcox (Oracle) if (!folio) 24520f079694SMike Rapoport goto out_unacct_blocks; 24534c27fe4cSMike Rapoport 24543460f6e5SAxel Rasmussen if (!zeropage) { /* COPY */ 24557a7256d5SMatthew Wilcox (Oracle) page_kaddr = kmap_local_folio(folio, 0); 24565dc21f0cSIra Weiny /* 24575dc21f0cSIra Weiny * The read mmap_lock is held here. Despite the 24585dc21f0cSIra Weiny * mmap_lock being read recursive a deadlock is still 24595dc21f0cSIra Weiny * possible if a writer has taken a lock. For example: 24605dc21f0cSIra Weiny * 24615dc21f0cSIra Weiny * process A thread 1 takes read lock on own mmap_lock 24625dc21f0cSIra Weiny * process A thread 2 calls mmap, blocks taking write lock 24635dc21f0cSIra Weiny * process B thread 1 takes page fault, read lock on own mmap lock 24645dc21f0cSIra Weiny * process B thread 2 calls mmap, blocks taking write lock 24655dc21f0cSIra Weiny * process A thread 1 blocks taking read lock on process B 24665dc21f0cSIra Weiny * process B thread 1 blocks taking read lock on process A 24675dc21f0cSIra Weiny * 24685dc21f0cSIra Weiny * Disable page faults to prevent potential deadlock 24695dc21f0cSIra Weiny * and retry the copy outside the mmap_lock. 24705dc21f0cSIra Weiny */ 24715dc21f0cSIra Weiny pagefault_disable(); 24728d103963SMike Rapoport ret = copy_from_user(page_kaddr, 24738d103963SMike Rapoport (const void __user *)src_addr, 24744c27fe4cSMike Rapoport PAGE_SIZE); 24755dc21f0cSIra Weiny pagefault_enable(); 24767a7256d5SMatthew Wilcox (Oracle) kunmap_local(page_kaddr); 24774c27fe4cSMike Rapoport 2478c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 24794c27fe4cSMike Rapoport if (unlikely(ret)) { 24807a7256d5SMatthew Wilcox (Oracle) *pagep = &folio->page; 24817d64ae3aSAxel Rasmussen ret = -ENOENT; 24824c27fe4cSMike Rapoport /* don't free the page */ 24837d64ae3aSAxel Rasmussen goto out_unacct_blocks; 24844c27fe4cSMike Rapoport } 248519b482c2SMuchun Song 24867a7256d5SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 24873460f6e5SAxel Rasmussen } else { /* ZEROPAGE */ 24887a7256d5SMatthew Wilcox (Oracle) clear_user_highpage(&folio->page, dst_addr); 24898d103963SMike Rapoport } 24904c27fe4cSMike Rapoport } else { 24917a7256d5SMatthew Wilcox (Oracle) folio = page_folio(*pagep); 24927a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 24934c27fe4cSMike Rapoport *pagep = NULL; 24944c27fe4cSMike Rapoport } 24954c27fe4cSMike Rapoport 24967a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON(folio_test_locked(folio)); 24977a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON(folio_test_swapbacked(folio)); 24987a7256d5SMatthew Wilcox (Oracle) __folio_set_locked(folio); 24997a7256d5SMatthew Wilcox (Oracle) __folio_set_swapbacked(folio); 25007a7256d5SMatthew Wilcox (Oracle) __folio_mark_uptodate(folio); 25019cc90c66SAndrea Arcangeli 2502e2a50c1fSAndrea Arcangeli ret = -EFAULT; 2503e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 25043460f6e5SAxel Rasmussen if (unlikely(pgoff >= max_off)) 2505e2a50c1fSAndrea Arcangeli goto out_release; 2506e2a50c1fSAndrea Arcangeli 2507b7dd44a1SMatthew Wilcox (Oracle) ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, 25083fea5a49SJohannes Weiner gfp & GFP_RECLAIM_MASK, dst_mm); 25094c27fe4cSMike Rapoport if (ret) 25104c27fe4cSMike Rapoport goto out_release; 25114c27fe4cSMike Rapoport 25127d64ae3aSAxel Rasmussen ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 25137a7256d5SMatthew Wilcox (Oracle) &folio->page, true, wp_copy); 25147d64ae3aSAxel Rasmussen if (ret) 25157d64ae3aSAxel Rasmussen goto out_delete_from_cache; 25164c27fe4cSMike Rapoport 251794b7cc01SYang Shi spin_lock_irq(&info->lock); 25184c27fe4cSMike Rapoport info->alloced++; 25194c27fe4cSMike Rapoport inode->i_blocks += BLOCKS_PER_PAGE; 25204c27fe4cSMike Rapoport shmem_recalc_inode(inode); 252194b7cc01SYang Shi spin_unlock_irq(&info->lock); 25224c27fe4cSMike Rapoport 25237a7256d5SMatthew Wilcox (Oracle) folio_unlock(folio); 25247d64ae3aSAxel Rasmussen return 0; 25257d64ae3aSAxel Rasmussen out_delete_from_cache: 25267a7256d5SMatthew Wilcox (Oracle) filemap_remove_folio(folio); 25274c27fe4cSMike Rapoport out_release: 25287a7256d5SMatthew Wilcox (Oracle) folio_unlock(folio); 25297a7256d5SMatthew Wilcox (Oracle) folio_put(folio); 25304c27fe4cSMike Rapoport out_unacct_blocks: 25310f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 25327d64ae3aSAxel Rasmussen return ret; 25334c27fe4cSMike Rapoport } 25343460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */ 25358d103963SMike Rapoport 25361da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 253792e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 253869f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 25391da177e4SLinus Torvalds 25401da177e4SLinus Torvalds static int 2541800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 25429d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len, 2543800d15a5SNick Piggin struct page **pagep, void **fsdata) 25441da177e4SLinus Torvalds { 2545800d15a5SNick Piggin struct inode *inode = mapping->host; 254640e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 254709cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 2548eff1f906SMatthew Wilcox (Oracle) struct folio *folio; 2549a7605426SYang Shi int ret = 0; 255040e041a2SDavid Herrmann 25519608703eSJan Kara /* i_rwsem is held by caller */ 2552ab3948f5SJoel Fernandes (Google) if (unlikely(info->seals & (F_SEAL_GROW | 2553ab3948f5SJoel Fernandes (Google) F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 2554ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 255540e041a2SDavid Herrmann return -EPERM; 255640e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 255740e041a2SDavid Herrmann return -EPERM; 255840e041a2SDavid Herrmann } 255940e041a2SDavid Herrmann 2560eff1f906SMatthew Wilcox (Oracle) ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); 2561a7605426SYang Shi 2562a7605426SYang Shi if (ret) 2563a7605426SYang Shi return ret; 2564a7605426SYang Shi 2565eff1f906SMatthew Wilcox (Oracle) *pagep = folio_file_page(folio, index); 2566a7605426SYang Shi if (PageHWPoison(*pagep)) { 2567eff1f906SMatthew Wilcox (Oracle) folio_unlock(folio); 2568eff1f906SMatthew Wilcox (Oracle) folio_put(folio); 2569a7605426SYang Shi *pagep = NULL; 2570a7605426SYang Shi return -EIO; 2571a7605426SYang Shi } 2572a7605426SYang Shi 2573a7605426SYang Shi return 0; 2574800d15a5SNick Piggin } 2575800d15a5SNick Piggin 2576800d15a5SNick Piggin static int 2577800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2578800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2579800d15a5SNick Piggin struct page *page, void *fsdata) 2580800d15a5SNick Piggin { 2581800d15a5SNick Piggin struct inode *inode = mapping->host; 2582800d15a5SNick Piggin 2583800d15a5SNick Piggin if (pos + copied > inode->i_size) 2584800d15a5SNick Piggin i_size_write(inode, pos + copied); 2585800d15a5SNick Piggin 2586ec9516fbSHugh Dickins if (!PageUptodate(page)) { 2587800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 2588800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 2589800d8c63SKirill A. Shutemov int i; 2590800d8c63SKirill A. Shutemov 2591800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2592800d8c63SKirill A. Shutemov if (head + i == page) 2593800d8c63SKirill A. Shutemov continue; 2594800d8c63SKirill A. Shutemov clear_highpage(head + i); 2595800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 2596800d8c63SKirill A. Shutemov } 2597800d8c63SKirill A. Shutemov } 259809cbfeafSKirill A. Shutemov if (copied < PAGE_SIZE) { 259909cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2600ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 260109cbfeafSKirill A. Shutemov from + copied, PAGE_SIZE); 2602ec9516fbSHugh Dickins } 2603800d8c63SKirill A. Shutemov SetPageUptodate(head); 2604ec9516fbSHugh Dickins } 2605d3602444SHugh Dickins set_page_dirty(page); 26066746aff7SWu Fengguang unlock_page(page); 260709cbfeafSKirill A. Shutemov put_page(page); 2608d3602444SHugh Dickins 2609800d15a5SNick Piggin return copied; 26101da177e4SLinus Torvalds } 26111da177e4SLinus Torvalds 26122ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 26131da177e4SLinus Torvalds { 26146e58e79dSAl Viro struct file *file = iocb->ki_filp; 26156e58e79dSAl Viro struct inode *inode = file_inode(file); 26161da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 261741ffe5d5SHugh Dickins pgoff_t index; 261841ffe5d5SHugh Dickins unsigned long offset; 2619f7c1d074SGeert Uytterhoeven int error = 0; 2620cb66a7a1SAl Viro ssize_t retval = 0; 26216e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2622a0ee5ec5SHugh Dickins 262309cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 262409cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 26251da177e4SLinus Torvalds 26261da177e4SLinus Torvalds for (;;) { 26274601e2fcSMatthew Wilcox (Oracle) struct folio *folio = NULL; 26281da177e4SLinus Torvalds struct page *page = NULL; 262941ffe5d5SHugh Dickins pgoff_t end_index; 263041ffe5d5SHugh Dickins unsigned long nr, ret; 26311da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 26321da177e4SLinus Torvalds 263309cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 26341da177e4SLinus Torvalds if (index > end_index) 26351da177e4SLinus Torvalds break; 26361da177e4SLinus Torvalds if (index == end_index) { 263709cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 26381da177e4SLinus Torvalds if (nr <= offset) 26391da177e4SLinus Torvalds break; 26401da177e4SLinus Torvalds } 26411da177e4SLinus Torvalds 26424601e2fcSMatthew Wilcox (Oracle) error = shmem_get_folio(inode, index, &folio, SGP_READ); 26436e58e79dSAl Viro if (error) { 26446e58e79dSAl Viro if (error == -EINVAL) 26456e58e79dSAl Viro error = 0; 26461da177e4SLinus Torvalds break; 26471da177e4SLinus Torvalds } 26484601e2fcSMatthew Wilcox (Oracle) if (folio) { 26494601e2fcSMatthew Wilcox (Oracle) folio_unlock(folio); 2650a7605426SYang Shi 26514601e2fcSMatthew Wilcox (Oracle) page = folio_file_page(folio, index); 2652a7605426SYang Shi if (PageHWPoison(page)) { 26534601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 2654a7605426SYang Shi error = -EIO; 2655a7605426SYang Shi break; 2656a7605426SYang Shi } 265775edd345SHugh Dickins } 26581da177e4SLinus Torvalds 26591da177e4SLinus Torvalds /* 26601da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 26619608703eSJan Kara * are called without i_rwsem protection against truncate 26621da177e4SLinus Torvalds */ 266309cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 26641da177e4SLinus Torvalds i_size = i_size_read(inode); 266509cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 26661da177e4SLinus Torvalds if (index == end_index) { 266709cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 26681da177e4SLinus Torvalds if (nr <= offset) { 26694601e2fcSMatthew Wilcox (Oracle) if (folio) 26704601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 26711da177e4SLinus Torvalds break; 26721da177e4SLinus Torvalds } 26731da177e4SLinus Torvalds } 26741da177e4SLinus Torvalds nr -= offset; 26751da177e4SLinus Torvalds 26764601e2fcSMatthew Wilcox (Oracle) if (folio) { 26771da177e4SLinus Torvalds /* 26781da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 26791da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 26801da177e4SLinus Torvalds * before reading the page on the kernel side. 26811da177e4SLinus Torvalds */ 26821da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 26831da177e4SLinus Torvalds flush_dcache_page(page); 26841da177e4SLinus Torvalds /* 26851da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 26861da177e4SLinus Torvalds */ 26871da177e4SLinus Torvalds if (!offset) 26884601e2fcSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 26891da177e4SLinus Torvalds /* 26901da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 26911da177e4SLinus Torvalds * now we can copy it to user space... 26921da177e4SLinus Torvalds */ 26932ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 26944601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 26951bdec44bSHugh Dickins 2696fcb14cb1SAl Viro } else if (user_backed_iter(to)) { 26971bdec44bSHugh Dickins /* 26981bdec44bSHugh Dickins * Copy to user tends to be so well optimized, but 26991bdec44bSHugh Dickins * clear_user() not so much, that it is noticeably 27001bdec44bSHugh Dickins * faster to copy the zero page instead of clearing. 27011bdec44bSHugh Dickins */ 27021bdec44bSHugh Dickins ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to); 27031bdec44bSHugh Dickins } else { 27041bdec44bSHugh Dickins /* 27051bdec44bSHugh Dickins * But submitting the same page twice in a row to 27061bdec44bSHugh Dickins * splice() - or others? - can result in confusion: 27071bdec44bSHugh Dickins * so don't attempt that optimization on pipes etc. 27081bdec44bSHugh Dickins */ 27091bdec44bSHugh Dickins ret = iov_iter_zero(nr, to); 27101bdec44bSHugh Dickins } 27111bdec44bSHugh Dickins 27126e58e79dSAl Viro retval += ret; 27131da177e4SLinus Torvalds offset += ret; 271409cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 271509cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 27161da177e4SLinus Torvalds 27172ba5bbedSAl Viro if (!iov_iter_count(to)) 27181da177e4SLinus Torvalds break; 27196e58e79dSAl Viro if (ret < nr) { 27206e58e79dSAl Viro error = -EFAULT; 27216e58e79dSAl Viro break; 27226e58e79dSAl Viro } 27231da177e4SLinus Torvalds cond_resched(); 27241da177e4SLinus Torvalds } 27251da177e4SLinus Torvalds 272609cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 27276e58e79dSAl Viro file_accessed(file); 27286e58e79dSAl Viro return retval ? retval : error; 27291da177e4SLinus Torvalds } 27301da177e4SLinus Torvalds 2731965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2732220f2ac9SHugh Dickins { 2733220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2734220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2735220f2ac9SHugh Dickins 2736965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2737965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2738220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 273941139aa4SMatthew Wilcox (Oracle) if (offset < 0) 274041139aa4SMatthew Wilcox (Oracle) return -ENXIO; 274141139aa4SMatthew Wilcox (Oracle) 27425955102cSAl Viro inode_lock(inode); 27439608703eSJan Kara /* We're holding i_rwsem so we can access i_size directly */ 274441139aa4SMatthew Wilcox (Oracle) offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); 2745387aae6fSHugh Dickins if (offset >= 0) 274646a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 27475955102cSAl Viro inode_unlock(inode); 2748220f2ac9SHugh Dickins return offset; 2749220f2ac9SHugh Dickins } 2750220f2ac9SHugh Dickins 275183e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 275283e4fa9cSHugh Dickins loff_t len) 275383e4fa9cSHugh Dickins { 2754496ad9aaSAl Viro struct inode *inode = file_inode(file); 2755e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 275640e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 27571aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2758d144bf62SHugh Dickins pgoff_t start, index, end, undo_fallocend; 2759e2d12e22SHugh Dickins int error; 276083e4fa9cSHugh Dickins 276113ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 276213ace4d0SHugh Dickins return -EOPNOTSUPP; 276313ace4d0SHugh Dickins 27645955102cSAl Viro inode_lock(inode); 276583e4fa9cSHugh Dickins 276683e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 276783e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 276883e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 276983e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 27708e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 277183e4fa9cSHugh Dickins 27729608703eSJan Kara /* protected by i_rwsem */ 2773ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 277440e041a2SDavid Herrmann error = -EPERM; 277540e041a2SDavid Herrmann goto out; 277640e041a2SDavid Herrmann } 277740e041a2SDavid Herrmann 27788e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2779aa71ecd8SChen Jun shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 2780f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2781f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2782f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2783f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2784f00cdc6dSHugh Dickins 278583e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 278683e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 278783e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 278883e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 278983e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 27908e205f77SHugh Dickins 27918e205f77SHugh Dickins spin_lock(&inode->i_lock); 27928e205f77SHugh Dickins inode->i_private = NULL; 27938e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 27942055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 27958e205f77SHugh Dickins spin_unlock(&inode->i_lock); 279683e4fa9cSHugh Dickins error = 0; 27978e205f77SHugh Dickins goto out; 279883e4fa9cSHugh Dickins } 279983e4fa9cSHugh Dickins 2800e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2801e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2802e2d12e22SHugh Dickins if (error) 2803e2d12e22SHugh Dickins goto out; 2804e2d12e22SHugh Dickins 280540e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 280640e041a2SDavid Herrmann error = -EPERM; 280740e041a2SDavid Herrmann goto out; 280840e041a2SDavid Herrmann } 280940e041a2SDavid Herrmann 281009cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 281109cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2812e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2813e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2814e2d12e22SHugh Dickins error = -ENOSPC; 2815e2d12e22SHugh Dickins goto out; 2816e2d12e22SHugh Dickins } 2817e2d12e22SHugh Dickins 28188e205f77SHugh Dickins shmem_falloc.waitq = NULL; 28191aac1400SHugh Dickins shmem_falloc.start = start; 28201aac1400SHugh Dickins shmem_falloc.next = start; 28211aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 28221aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 28231aac1400SHugh Dickins spin_lock(&inode->i_lock); 28241aac1400SHugh Dickins inode->i_private = &shmem_falloc; 28251aac1400SHugh Dickins spin_unlock(&inode->i_lock); 28261aac1400SHugh Dickins 2827d144bf62SHugh Dickins /* 2828d144bf62SHugh Dickins * info->fallocend is only relevant when huge pages might be 2829d144bf62SHugh Dickins * involved: to prevent split_huge_page() freeing fallocated 2830d144bf62SHugh Dickins * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. 2831d144bf62SHugh Dickins */ 2832d144bf62SHugh Dickins undo_fallocend = info->fallocend; 2833d144bf62SHugh Dickins if (info->fallocend < end) 2834d144bf62SHugh Dickins info->fallocend = end; 2835d144bf62SHugh Dickins 2836050dcb5cSHugh Dickins for (index = start; index < end; ) { 2837b0802b22SMatthew Wilcox (Oracle) struct folio *folio; 2838e2d12e22SHugh Dickins 2839e2d12e22SHugh Dickins /* 2840e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2841e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2842e2d12e22SHugh Dickins */ 2843e2d12e22SHugh Dickins if (signal_pending(current)) 2844e2d12e22SHugh Dickins error = -EINTR; 28451aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 28461aac1400SHugh Dickins error = -ENOMEM; 2847e2d12e22SHugh Dickins else 2848b0802b22SMatthew Wilcox (Oracle) error = shmem_get_folio(inode, index, &folio, 2849b0802b22SMatthew Wilcox (Oracle) SGP_FALLOC); 2850e2d12e22SHugh Dickins if (error) { 2851d144bf62SHugh Dickins info->fallocend = undo_fallocend; 2852b0802b22SMatthew Wilcox (Oracle) /* Remove the !uptodate folios we added */ 28537f556567SHugh Dickins if (index > start) { 28541635f6a7SHugh Dickins shmem_undo_range(inode, 285509cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2856b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 28577f556567SHugh Dickins } 28581aac1400SHugh Dickins goto undone; 2859e2d12e22SHugh Dickins } 2860e2d12e22SHugh Dickins 2861050dcb5cSHugh Dickins /* 2862050dcb5cSHugh Dickins * Here is a more important optimization than it appears: 2863b0802b22SMatthew Wilcox (Oracle) * a second SGP_FALLOC on the same large folio will clear it, 2864b0802b22SMatthew Wilcox (Oracle) * making it uptodate and un-undoable if we fail later. 2865050dcb5cSHugh Dickins */ 2866b0802b22SMatthew Wilcox (Oracle) index = folio_next_index(folio); 2867050dcb5cSHugh Dickins /* Beware 32-bit wraparound */ 2868050dcb5cSHugh Dickins if (!index) 2869050dcb5cSHugh Dickins index--; 2870050dcb5cSHugh Dickins 2871e2d12e22SHugh Dickins /* 28721aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 28731aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 28741aac1400SHugh Dickins */ 2875b0802b22SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) 2876050dcb5cSHugh Dickins shmem_falloc.nr_falloced += index - shmem_falloc.next; 2877050dcb5cSHugh Dickins shmem_falloc.next = index; 28781aac1400SHugh Dickins 28791aac1400SHugh Dickins /* 2880b0802b22SMatthew Wilcox (Oracle) * If !uptodate, leave it that way so that freeable folios 28811635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 2882b0802b22SMatthew Wilcox (Oracle) * But mark it dirty so that memory pressure will swap rather 2883b0802b22SMatthew Wilcox (Oracle) * than free the folios we are allocating (and SGP_CACHE folios 2884e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2885e2d12e22SHugh Dickins */ 2886b0802b22SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 2887b0802b22SMatthew Wilcox (Oracle) folio_unlock(folio); 2888b0802b22SMatthew Wilcox (Oracle) folio_put(folio); 2889e2d12e22SHugh Dickins cond_resched(); 2890e2d12e22SHugh Dickins } 2891e2d12e22SHugh Dickins 2892e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2893e2d12e22SHugh Dickins i_size_write(inode, offset + len); 28941aac1400SHugh Dickins undone: 28951aac1400SHugh Dickins spin_lock(&inode->i_lock); 28961aac1400SHugh Dickins inode->i_private = NULL; 28971aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2898e2d12e22SHugh Dickins out: 289915f242bbSHugh Dickins if (!error) 290015f242bbSHugh Dickins file_modified(file); 29015955102cSAl Viro inode_unlock(inode); 290283e4fa9cSHugh Dickins return error; 290383e4fa9cSHugh Dickins } 290483e4fa9cSHugh Dickins 2905726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 29061da177e4SLinus Torvalds { 2907726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 29081da177e4SLinus Torvalds 29091da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 291009cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 29111da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 29120edd73b3SHugh Dickins if (sbinfo->max_blocks) { 29131da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 291441ffe5d5SHugh Dickins buf->f_bavail = 291541ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 291641ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 29170edd73b3SHugh Dickins } 29180edd73b3SHugh Dickins if (sbinfo->max_inodes) { 29191da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 29201da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 29211da177e4SLinus Torvalds } 29221da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 292359cda49eSAmir Goldstein 292459cda49eSAmir Goldstein buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); 292559cda49eSAmir Goldstein 29261da177e4SLinus Torvalds return 0; 29271da177e4SLinus Torvalds } 29281da177e4SLinus Torvalds 29291da177e4SLinus Torvalds /* 29301da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 29311da177e4SLinus Torvalds */ 29321da177e4SLinus Torvalds static int 2933549c7297SChristian Brauner shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir, 2934549c7297SChristian Brauner struct dentry *dentry, umode_t mode, dev_t dev) 29351da177e4SLinus Torvalds { 29360b0a0806SHugh Dickins struct inode *inode; 29371da177e4SLinus Torvalds int error = -ENOSPC; 29381da177e4SLinus Torvalds 2939454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 29401da177e4SLinus Torvalds if (inode) { 2941feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2942feda821eSChristoph Hellwig if (error) 2943feda821eSChristoph Hellwig goto out_iput; 29442a7dba39SEric Paris error = security_inode_init_security(inode, dir, 29459d8f13baSMimi Zohar &dentry->d_name, 29466d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2947feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2948feda821eSChristoph Hellwig goto out_iput; 294937ec43cdSMimi Zohar 2950718deb6bSAl Viro error = 0; 29511da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2952078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 295336f05cabSJeff Layton inode_inc_iversion(dir); 29541da177e4SLinus Torvalds d_instantiate(dentry, inode); 29551da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 29561da177e4SLinus Torvalds } 29571da177e4SLinus Torvalds return error; 2958feda821eSChristoph Hellwig out_iput: 2959feda821eSChristoph Hellwig iput(inode); 2960feda821eSChristoph Hellwig return error; 29611da177e4SLinus Torvalds } 29621da177e4SLinus Torvalds 296360545d0dSAl Viro static int 2964549c7297SChristian Brauner shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, 2965863f144fSMiklos Szeredi struct file *file, umode_t mode) 296660545d0dSAl Viro { 296760545d0dSAl Viro struct inode *inode; 296860545d0dSAl Viro int error = -ENOSPC; 296960545d0dSAl Viro 297060545d0dSAl Viro inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 297160545d0dSAl Viro if (inode) { 297260545d0dSAl Viro error = security_inode_init_security(inode, dir, 297360545d0dSAl Viro NULL, 297460545d0dSAl Viro shmem_initxattrs, NULL); 2975feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2976feda821eSChristoph Hellwig goto out_iput; 2977feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2978feda821eSChristoph Hellwig if (error) 2979feda821eSChristoph Hellwig goto out_iput; 2980863f144fSMiklos Szeredi d_tmpfile(file, inode); 298160545d0dSAl Viro } 2982863f144fSMiklos Szeredi return finish_open_simple(file, error); 2983feda821eSChristoph Hellwig out_iput: 2984feda821eSChristoph Hellwig iput(inode); 2985feda821eSChristoph Hellwig return error; 298660545d0dSAl Viro } 298760545d0dSAl Viro 2988549c7297SChristian Brauner static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 2989549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 29901da177e4SLinus Torvalds { 29911da177e4SLinus Torvalds int error; 29921da177e4SLinus Torvalds 2993549c7297SChristian Brauner if ((error = shmem_mknod(&init_user_ns, dir, dentry, 2994549c7297SChristian Brauner mode | S_IFDIR, 0))) 29951da177e4SLinus Torvalds return error; 2996d8c76e6fSDave Hansen inc_nlink(dir); 29971da177e4SLinus Torvalds return 0; 29981da177e4SLinus Torvalds } 29991da177e4SLinus Torvalds 3000549c7297SChristian Brauner static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir, 3001549c7297SChristian Brauner struct dentry *dentry, umode_t mode, bool excl) 30021da177e4SLinus Torvalds { 3003549c7297SChristian Brauner return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 30041da177e4SLinus Torvalds } 30051da177e4SLinus Torvalds 30061da177e4SLinus Torvalds /* 30071da177e4SLinus Torvalds * Link a file.. 30081da177e4SLinus Torvalds */ 30091da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 30101da177e4SLinus Torvalds { 301175c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 301229b00e60SDarrick J. Wong int ret = 0; 30131da177e4SLinus Torvalds 30141da177e4SLinus Torvalds /* 30151da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 30161da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 30171da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 30181062af92SDarrick J. Wong * But if an O_TMPFILE file is linked into the tmpfs, the 30191062af92SDarrick J. Wong * first link must skip that, to get the accounting right. 30201da177e4SLinus Torvalds */ 30211062af92SDarrick J. Wong if (inode->i_nlink) { 3022e809d5f0SChris Down ret = shmem_reserve_inode(inode->i_sb, NULL); 30235b04c689SPavel Emelyanov if (ret) 30245b04c689SPavel Emelyanov goto out; 30251062af92SDarrick J. Wong } 30261da177e4SLinus Torvalds 30271da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3028078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 302936f05cabSJeff Layton inode_inc_iversion(dir); 3030d8c76e6fSDave Hansen inc_nlink(inode); 30317de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 30321da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 30331da177e4SLinus Torvalds d_instantiate(dentry, inode); 30345b04c689SPavel Emelyanov out: 30355b04c689SPavel Emelyanov return ret; 30361da177e4SLinus Torvalds } 30371da177e4SLinus Torvalds 30381da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 30391da177e4SLinus Torvalds { 304075c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 30411da177e4SLinus Torvalds 30425b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 30435b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 30441da177e4SLinus Torvalds 30451da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 3046078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 304736f05cabSJeff Layton inode_inc_iversion(dir); 30489a53c3a7SDave Hansen drop_nlink(inode); 30491da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 30501da177e4SLinus Torvalds return 0; 30511da177e4SLinus Torvalds } 30521da177e4SLinus Torvalds 30531da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 30541da177e4SLinus Torvalds { 30551da177e4SLinus Torvalds if (!simple_empty(dentry)) 30561da177e4SLinus Torvalds return -ENOTEMPTY; 30571da177e4SLinus Torvalds 305875c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 30599a53c3a7SDave Hansen drop_nlink(dir); 30601da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 30611da177e4SLinus Torvalds } 30621da177e4SLinus Torvalds 3063549c7297SChristian Brauner static int shmem_whiteout(struct user_namespace *mnt_userns, 3064549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry) 306546fdb794SMiklos Szeredi { 306646fdb794SMiklos Szeredi struct dentry *whiteout; 306746fdb794SMiklos Szeredi int error; 306846fdb794SMiklos Szeredi 306946fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 307046fdb794SMiklos Szeredi if (!whiteout) 307146fdb794SMiklos Szeredi return -ENOMEM; 307246fdb794SMiklos Szeredi 3073549c7297SChristian Brauner error = shmem_mknod(&init_user_ns, old_dir, whiteout, 307446fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 307546fdb794SMiklos Szeredi dput(whiteout); 307646fdb794SMiklos Szeredi if (error) 307746fdb794SMiklos Szeredi return error; 307846fdb794SMiklos Szeredi 307946fdb794SMiklos Szeredi /* 308046fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 308146fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 308246fdb794SMiklos Szeredi * 308346fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 308446fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 308546fdb794SMiklos Szeredi */ 308646fdb794SMiklos Szeredi d_rehash(whiteout); 308746fdb794SMiklos Szeredi return 0; 308846fdb794SMiklos Szeredi } 308946fdb794SMiklos Szeredi 30901da177e4SLinus Torvalds /* 30911da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 30921da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 30931da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 30941da177e4SLinus Torvalds * gets overwritten. 30951da177e4SLinus Torvalds */ 3096549c7297SChristian Brauner static int shmem_rename2(struct user_namespace *mnt_userns, 3097549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry, 3098549c7297SChristian Brauner struct inode *new_dir, struct dentry *new_dentry, 3099549c7297SChristian Brauner unsigned int flags) 31001da177e4SLinus Torvalds { 310175c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 31021da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 31031da177e4SLinus Torvalds 310446fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 31053b69ff51SMiklos Szeredi return -EINVAL; 31063b69ff51SMiklos Szeredi 310737456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 31086429e463SLorenz Bauer return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry); 310937456771SMiklos Szeredi 31101da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 31111da177e4SLinus Torvalds return -ENOTEMPTY; 31121da177e4SLinus Torvalds 311346fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 311446fdb794SMiklos Szeredi int error; 311546fdb794SMiklos Szeredi 3116549c7297SChristian Brauner error = shmem_whiteout(&init_user_ns, old_dir, old_dentry); 311746fdb794SMiklos Szeredi if (error) 311846fdb794SMiklos Szeredi return error; 311946fdb794SMiklos Szeredi } 312046fdb794SMiklos Szeredi 312175c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 31221da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 3123b928095bSMiklos Szeredi if (they_are_dirs) { 312475c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 31259a53c3a7SDave Hansen drop_nlink(old_dir); 3126b928095bSMiklos Szeredi } 31271da177e4SLinus Torvalds } else if (they_are_dirs) { 31289a53c3a7SDave Hansen drop_nlink(old_dir); 3129d8c76e6fSDave Hansen inc_nlink(new_dir); 31301da177e4SLinus Torvalds } 31311da177e4SLinus Torvalds 31321da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 31331da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 31341da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 31351da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 3136078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 313736f05cabSJeff Layton inode_inc_iversion(old_dir); 313836f05cabSJeff Layton inode_inc_iversion(new_dir); 31391da177e4SLinus Torvalds return 0; 31401da177e4SLinus Torvalds } 31411da177e4SLinus Torvalds 3142549c7297SChristian Brauner static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir, 3143549c7297SChristian Brauner struct dentry *dentry, const char *symname) 31441da177e4SLinus Torvalds { 31451da177e4SLinus Torvalds int error; 31461da177e4SLinus Torvalds int len; 31471da177e4SLinus Torvalds struct inode *inode; 31487ad0414bSMatthew Wilcox (Oracle) struct folio *folio; 31491da177e4SLinus Torvalds 31501da177e4SLinus Torvalds len = strlen(symname) + 1; 315109cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 31521da177e4SLinus Torvalds return -ENAMETOOLONG; 31531da177e4SLinus Torvalds 31540825a6f9SJoe Perches inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 31550825a6f9SJoe Perches VM_NORESERVE); 31561da177e4SLinus Torvalds if (!inode) 31571da177e4SLinus Torvalds return -ENOSPC; 31581da177e4SLinus Torvalds 31599d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 31606d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3161343c3d7fSMateusz Nosek if (error && error != -EOPNOTSUPP) { 3162570bc1c2SStephen Smalley iput(inode); 3163570bc1c2SStephen Smalley return error; 3164570bc1c2SStephen Smalley } 3165570bc1c2SStephen Smalley 31661da177e4SLinus Torvalds inode->i_size = len-1; 316769f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 31683ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 31693ed47db3SAl Viro if (!inode->i_link) { 317069f07ec9SHugh Dickins iput(inode); 317169f07ec9SHugh Dickins return -ENOMEM; 317269f07ec9SHugh Dickins } 317369f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 31741da177e4SLinus Torvalds } else { 3175e8ecde25SAl Viro inode_nohighmem(inode); 31767ad0414bSMatthew Wilcox (Oracle) error = shmem_get_folio(inode, 0, &folio, SGP_WRITE); 31771da177e4SLinus Torvalds if (error) { 31781da177e4SLinus Torvalds iput(inode); 31791da177e4SLinus Torvalds return error; 31801da177e4SLinus Torvalds } 318114fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 31821da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 31837ad0414bSMatthew Wilcox (Oracle) memcpy(folio_address(folio), symname, len); 31847ad0414bSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 31857ad0414bSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 31867ad0414bSMatthew Wilcox (Oracle) folio_unlock(folio); 31877ad0414bSMatthew Wilcox (Oracle) folio_put(folio); 31881da177e4SLinus Torvalds } 31891da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3190078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 319136f05cabSJeff Layton inode_inc_iversion(dir); 31921da177e4SLinus Torvalds d_instantiate(dentry, inode); 31931da177e4SLinus Torvalds dget(dentry); 31941da177e4SLinus Torvalds return 0; 31951da177e4SLinus Torvalds } 31961da177e4SLinus Torvalds 3197fceef393SAl Viro static void shmem_put_link(void *arg) 3198fceef393SAl Viro { 3199e4b57722SMatthew Wilcox (Oracle) folio_mark_accessed(arg); 3200e4b57722SMatthew Wilcox (Oracle) folio_put(arg); 3201fceef393SAl Viro } 3202fceef393SAl Viro 32036b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3204fceef393SAl Viro struct inode *inode, 3205fceef393SAl Viro struct delayed_call *done) 32061da177e4SLinus Torvalds { 3207e4b57722SMatthew Wilcox (Oracle) struct folio *folio = NULL; 32086b255391SAl Viro int error; 3209e4b57722SMatthew Wilcox (Oracle) 32106a6c9904SAl Viro if (!dentry) { 3211e4b57722SMatthew Wilcox (Oracle) folio = filemap_get_folio(inode->i_mapping, 0); 3212e4b57722SMatthew Wilcox (Oracle) if (!folio) 32136b255391SAl Viro return ERR_PTR(-ECHILD); 32147459c149SMatthew Wilcox (Oracle) if (PageHWPoison(folio_page(folio, 0)) || 3215e4b57722SMatthew Wilcox (Oracle) !folio_test_uptodate(folio)) { 3216e4b57722SMatthew Wilcox (Oracle) folio_put(folio); 32176a6c9904SAl Viro return ERR_PTR(-ECHILD); 32186a6c9904SAl Viro } 32196a6c9904SAl Viro } else { 3220e4b57722SMatthew Wilcox (Oracle) error = shmem_get_folio(inode, 0, &folio, SGP_READ); 3221680baacbSAl Viro if (error) 3222680baacbSAl Viro return ERR_PTR(error); 3223e4b57722SMatthew Wilcox (Oracle) if (!folio) 3224a7605426SYang Shi return ERR_PTR(-ECHILD); 32257459c149SMatthew Wilcox (Oracle) if (PageHWPoison(folio_page(folio, 0))) { 3226e4b57722SMatthew Wilcox (Oracle) folio_unlock(folio); 3227e4b57722SMatthew Wilcox (Oracle) folio_put(folio); 3228a7605426SYang Shi return ERR_PTR(-ECHILD); 3229a7605426SYang Shi } 3230e4b57722SMatthew Wilcox (Oracle) folio_unlock(folio); 32311da177e4SLinus Torvalds } 3232e4b57722SMatthew Wilcox (Oracle) set_delayed_call(done, shmem_put_link, folio); 3233e4b57722SMatthew Wilcox (Oracle) return folio_address(folio); 32341da177e4SLinus Torvalds } 32351da177e4SLinus Torvalds 3236b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3237e408e695STheodore Ts'o 3238e408e695STheodore Ts'o static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa) 3239e408e695STheodore Ts'o { 3240e408e695STheodore Ts'o struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3241e408e695STheodore Ts'o 3242e408e695STheodore Ts'o fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); 3243e408e695STheodore Ts'o 3244e408e695STheodore Ts'o return 0; 3245e408e695STheodore Ts'o } 3246e408e695STheodore Ts'o 3247e408e695STheodore Ts'o static int shmem_fileattr_set(struct user_namespace *mnt_userns, 3248e408e695STheodore Ts'o struct dentry *dentry, struct fileattr *fa) 3249e408e695STheodore Ts'o { 3250e408e695STheodore Ts'o struct inode *inode = d_inode(dentry); 3251e408e695STheodore Ts'o struct shmem_inode_info *info = SHMEM_I(inode); 3252e408e695STheodore Ts'o 3253e408e695STheodore Ts'o if (fileattr_has_fsx(fa)) 3254e408e695STheodore Ts'o return -EOPNOTSUPP; 3255cb241339SHugh Dickins if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) 3256cb241339SHugh Dickins return -EOPNOTSUPP; 3257e408e695STheodore Ts'o 3258e408e695STheodore Ts'o info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | 3259e408e695STheodore Ts'o (fa->flags & SHMEM_FL_USER_MODIFIABLE); 3260e408e695STheodore Ts'o 3261cb241339SHugh Dickins shmem_set_inode_flags(inode, info->fsflags); 3262e408e695STheodore Ts'o inode->i_ctime = current_time(inode); 326336f05cabSJeff Layton inode_inc_iversion(inode); 3264e408e695STheodore Ts'o return 0; 3265e408e695STheodore Ts'o } 3266e408e695STheodore Ts'o 3267b09e0fa4SEric Paris /* 3268b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3269b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3270b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3271b09e0fa4SEric Paris * filesystem level, though. 3272b09e0fa4SEric Paris */ 3273b09e0fa4SEric Paris 32746d9d88d0SJarkko Sakkinen /* 32756d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 32766d9d88d0SJarkko Sakkinen */ 32776d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 32786d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 32796d9d88d0SJarkko Sakkinen void *fs_info) 32806d9d88d0SJarkko Sakkinen { 32816d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 32826d9d88d0SJarkko Sakkinen const struct xattr *xattr; 328338f38657SAristeu Rozanski struct simple_xattr *new_xattr; 32846d9d88d0SJarkko Sakkinen size_t len; 32856d9d88d0SJarkko Sakkinen 32866d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 328738f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 32886d9d88d0SJarkko Sakkinen if (!new_xattr) 32896d9d88d0SJarkko Sakkinen return -ENOMEM; 32906d9d88d0SJarkko Sakkinen 32916d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 32926d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 32936d9d88d0SJarkko Sakkinen GFP_KERNEL); 32946d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 32953bef735aSChengguang Xu kvfree(new_xattr); 32966d9d88d0SJarkko Sakkinen return -ENOMEM; 32976d9d88d0SJarkko Sakkinen } 32986d9d88d0SJarkko Sakkinen 32996d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 33006d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 33016d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 33026d9d88d0SJarkko Sakkinen xattr->name, len); 33036d9d88d0SJarkko Sakkinen 33043b4c7bc0SChristian Brauner simple_xattr_add(&info->xattrs, new_xattr); 33056d9d88d0SJarkko Sakkinen } 33066d9d88d0SJarkko Sakkinen 33076d9d88d0SJarkko Sakkinen return 0; 33086d9d88d0SJarkko Sakkinen } 33096d9d88d0SJarkko Sakkinen 3310aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3311b296821aSAl Viro struct dentry *unused, struct inode *inode, 3312b296821aSAl Viro const char *name, void *buffer, size_t size) 3313aa7c5241SAndreas Gruenbacher { 3314b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3315aa7c5241SAndreas Gruenbacher 3316aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3317aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3318aa7c5241SAndreas Gruenbacher } 3319aa7c5241SAndreas Gruenbacher 3320aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 3321e65ce2a5SChristian Brauner struct user_namespace *mnt_userns, 332259301226SAl Viro struct dentry *unused, struct inode *inode, 332359301226SAl Viro const char *name, const void *value, 332459301226SAl Viro size_t size, int flags) 3325aa7c5241SAndreas Gruenbacher { 332659301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 332736f05cabSJeff Layton int err; 3328aa7c5241SAndreas Gruenbacher 3329aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 333036f05cabSJeff Layton err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); 333136f05cabSJeff Layton if (!err) { 333236f05cabSJeff Layton inode->i_ctime = current_time(inode); 333336f05cabSJeff Layton inode_inc_iversion(inode); 333436f05cabSJeff Layton } 333536f05cabSJeff Layton return err; 3336aa7c5241SAndreas Gruenbacher } 3337aa7c5241SAndreas Gruenbacher 3338aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3339aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3340aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3341aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3342aa7c5241SAndreas Gruenbacher }; 3343aa7c5241SAndreas Gruenbacher 3344aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3345aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3346aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3347aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3348aa7c5241SAndreas Gruenbacher }; 3349aa7c5241SAndreas Gruenbacher 3350b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3351b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3352feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3353feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3354b09e0fa4SEric Paris #endif 3355aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3356aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3357b09e0fa4SEric Paris NULL 3358b09e0fa4SEric Paris }; 3359b09e0fa4SEric Paris 3360b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3361b09e0fa4SEric Paris { 336275c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3363786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3364b09e0fa4SEric Paris } 3365b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3366b09e0fa4SEric Paris 336769f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 3368f7cd16a5SXavier Roche .getattr = shmem_getattr, 33696b255391SAl Viro .get_link = simple_get_link, 3370b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3371b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3372b09e0fa4SEric Paris #endif 33731da177e4SLinus Torvalds }; 33741da177e4SLinus Torvalds 337592e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 3376f7cd16a5SXavier Roche .getattr = shmem_getattr, 33776b255391SAl Viro .get_link = shmem_get_link, 3378b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3379b09e0fa4SEric Paris .listxattr = shmem_listxattr, 338039f0247dSAndreas Gruenbacher #endif 3381b09e0fa4SEric Paris }; 338239f0247dSAndreas Gruenbacher 338391828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 338491828a40SDavid M. Grimes { 338591828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 338691828a40SDavid M. Grimes } 338791828a40SDavid M. Grimes 338891828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 338991828a40SDavid M. Grimes { 339091828a40SDavid M. Grimes __u32 *fh = vfh; 339191828a40SDavid M. Grimes __u64 inum = fh[2]; 339291828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 339391828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 339491828a40SDavid M. Grimes } 339591828a40SDavid M. Grimes 339612ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */ 339712ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode) 339812ba780dSAmir Goldstein { 339912ba780dSAmir Goldstein struct dentry *alias = d_find_alias(inode); 340012ba780dSAmir Goldstein 340112ba780dSAmir Goldstein return alias ?: d_find_any_alias(inode); 340212ba780dSAmir Goldstein } 340312ba780dSAmir Goldstein 340412ba780dSAmir Goldstein 3405480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3406480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 340791828a40SDavid M. Grimes { 340891828a40SDavid M. Grimes struct inode *inode; 3409480b116cSChristoph Hellwig struct dentry *dentry = NULL; 341035c2a7f4SHugh Dickins u64 inum; 341191828a40SDavid M. Grimes 3412480b116cSChristoph Hellwig if (fh_len < 3) 3413480b116cSChristoph Hellwig return NULL; 3414480b116cSChristoph Hellwig 341535c2a7f4SHugh Dickins inum = fid->raw[2]; 341635c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 341735c2a7f4SHugh Dickins 3418480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3419480b116cSChristoph Hellwig shmem_match, fid->raw); 342091828a40SDavid M. Grimes if (inode) { 342112ba780dSAmir Goldstein dentry = shmem_find_alias(inode); 342291828a40SDavid M. Grimes iput(inode); 342391828a40SDavid M. Grimes } 342491828a40SDavid M. Grimes 3425480b116cSChristoph Hellwig return dentry; 342691828a40SDavid M. Grimes } 342791828a40SDavid M. Grimes 3428b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3429b0b0382bSAl Viro struct inode *parent) 343091828a40SDavid M. Grimes { 34315fe0c237SAneesh Kumar K.V if (*len < 3) { 34325fe0c237SAneesh Kumar K.V *len = 3; 343394e07a75SNamjae Jeon return FILEID_INVALID; 34345fe0c237SAneesh Kumar K.V } 343591828a40SDavid M. Grimes 34361d3382cbSAl Viro if (inode_unhashed(inode)) { 343791828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 343891828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 343991828a40SDavid M. Grimes * time, we need a lock to ensure we only try 344091828a40SDavid M. Grimes * to do it once 344191828a40SDavid M. Grimes */ 344291828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 344391828a40SDavid M. Grimes spin_lock(&lock); 34441d3382cbSAl Viro if (inode_unhashed(inode)) 344591828a40SDavid M. Grimes __insert_inode_hash(inode, 344691828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 344791828a40SDavid M. Grimes spin_unlock(&lock); 344891828a40SDavid M. Grimes } 344991828a40SDavid M. Grimes 345091828a40SDavid M. Grimes fh[0] = inode->i_generation; 345191828a40SDavid M. Grimes fh[1] = inode->i_ino; 345291828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 345391828a40SDavid M. Grimes 345491828a40SDavid M. Grimes *len = 3; 345591828a40SDavid M. Grimes return 1; 345691828a40SDavid M. Grimes } 345791828a40SDavid M. Grimes 345839655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 345991828a40SDavid M. Grimes .get_parent = shmem_get_parent, 346091828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3461480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 346291828a40SDavid M. Grimes }; 346391828a40SDavid M. Grimes 3464626c3920SAl Viro enum shmem_param { 3465626c3920SAl Viro Opt_gid, 3466626c3920SAl Viro Opt_huge, 3467626c3920SAl Viro Opt_mode, 3468626c3920SAl Viro Opt_mpol, 3469626c3920SAl Viro Opt_nr_blocks, 3470626c3920SAl Viro Opt_nr_inodes, 3471626c3920SAl Viro Opt_size, 3472626c3920SAl Viro Opt_uid, 3473ea3271f7SChris Down Opt_inode32, 3474ea3271f7SChris Down Opt_inode64, 3475626c3920SAl Viro }; 34761da177e4SLinus Torvalds 34775eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = { 34782710c957SAl Viro {"never", SHMEM_HUGE_NEVER }, 34792710c957SAl Viro {"always", SHMEM_HUGE_ALWAYS }, 34802710c957SAl Viro {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 34812710c957SAl Viro {"advise", SHMEM_HUGE_ADVISE }, 34822710c957SAl Viro {} 34832710c957SAl Viro }; 34842710c957SAl Viro 3485d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = { 3486626c3920SAl Viro fsparam_u32 ("gid", Opt_gid), 34872710c957SAl Viro fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 3488626c3920SAl Viro fsparam_u32oct("mode", Opt_mode), 3489626c3920SAl Viro fsparam_string("mpol", Opt_mpol), 3490626c3920SAl Viro fsparam_string("nr_blocks", Opt_nr_blocks), 3491626c3920SAl Viro fsparam_string("nr_inodes", Opt_nr_inodes), 3492626c3920SAl Viro fsparam_string("size", Opt_size), 3493626c3920SAl Viro fsparam_u32 ("uid", Opt_uid), 3494ea3271f7SChris Down fsparam_flag ("inode32", Opt_inode32), 3495ea3271f7SChris Down fsparam_flag ("inode64", Opt_inode64), 3496626c3920SAl Viro {} 3497626c3920SAl Viro }; 3498626c3920SAl Viro 3499f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 3500626c3920SAl Viro { 3501f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3502626c3920SAl Viro struct fs_parse_result result; 3503e04dc423SAl Viro unsigned long long size; 3504626c3920SAl Viro char *rest; 3505626c3920SAl Viro int opt; 3506626c3920SAl Viro 3507d7167b14SAl Viro opt = fs_parse(fc, shmem_fs_parameters, param, &result); 3508f3235626SDavid Howells if (opt < 0) 3509626c3920SAl Viro return opt; 3510626c3920SAl Viro 3511626c3920SAl Viro switch (opt) { 3512626c3920SAl Viro case Opt_size: 3513626c3920SAl Viro size = memparse(param->string, &rest); 3514e04dc423SAl Viro if (*rest == '%') { 3515e04dc423SAl Viro size <<= PAGE_SHIFT; 3516e04dc423SAl Viro size *= totalram_pages(); 3517e04dc423SAl Viro do_div(size, 100); 3518e04dc423SAl Viro rest++; 3519e04dc423SAl Viro } 3520e04dc423SAl Viro if (*rest) 3521626c3920SAl Viro goto bad_value; 3522e04dc423SAl Viro ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 3523e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3524626c3920SAl Viro break; 3525626c3920SAl Viro case Opt_nr_blocks: 3526626c3920SAl Viro ctx->blocks = memparse(param->string, &rest); 35270c98c8e1SZhaoLong Wang if (*rest || ctx->blocks > S64_MAX) 3528626c3920SAl Viro goto bad_value; 3529e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3530626c3920SAl Viro break; 3531626c3920SAl Viro case Opt_nr_inodes: 3532626c3920SAl Viro ctx->inodes = memparse(param->string, &rest); 3533e04dc423SAl Viro if (*rest) 3534626c3920SAl Viro goto bad_value; 3535e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_INODES; 3536626c3920SAl Viro break; 3537626c3920SAl Viro case Opt_mode: 3538626c3920SAl Viro ctx->mode = result.uint_32 & 07777; 3539626c3920SAl Viro break; 3540626c3920SAl Viro case Opt_uid: 3541626c3920SAl Viro ctx->uid = make_kuid(current_user_ns(), result.uint_32); 3542e04dc423SAl Viro if (!uid_valid(ctx->uid)) 3543626c3920SAl Viro goto bad_value; 3544626c3920SAl Viro break; 3545626c3920SAl Viro case Opt_gid: 3546626c3920SAl Viro ctx->gid = make_kgid(current_user_ns(), result.uint_32); 3547e04dc423SAl Viro if (!gid_valid(ctx->gid)) 3548626c3920SAl Viro goto bad_value; 3549626c3920SAl Viro break; 3550626c3920SAl Viro case Opt_huge: 3551626c3920SAl Viro ctx->huge = result.uint_32; 3552626c3920SAl Viro if (ctx->huge != SHMEM_HUGE_NEVER && 3553396bcc52SMatthew Wilcox (Oracle) !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 3554626c3920SAl Viro has_transparent_hugepage())) 3555626c3920SAl Viro goto unsupported_parameter; 3556e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_HUGE; 3557626c3920SAl Viro break; 3558626c3920SAl Viro case Opt_mpol: 3559626c3920SAl Viro if (IS_ENABLED(CONFIG_NUMA)) { 3560e04dc423SAl Viro mpol_put(ctx->mpol); 3561e04dc423SAl Viro ctx->mpol = NULL; 3562626c3920SAl Viro if (mpol_parse_str(param->string, &ctx->mpol)) 3563626c3920SAl Viro goto bad_value; 3564626c3920SAl Viro break; 3565626c3920SAl Viro } 3566626c3920SAl Viro goto unsupported_parameter; 3567ea3271f7SChris Down case Opt_inode32: 3568ea3271f7SChris Down ctx->full_inums = false; 3569ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3570ea3271f7SChris Down break; 3571ea3271f7SChris Down case Opt_inode64: 3572ea3271f7SChris Down if (sizeof(ino_t) < 8) { 3573ea3271f7SChris Down return invalfc(fc, 3574ea3271f7SChris Down "Cannot use inode64 with <64bit inums in kernel\n"); 3575ea3271f7SChris Down } 3576ea3271f7SChris Down ctx->full_inums = true; 3577ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3578ea3271f7SChris Down break; 3579e04dc423SAl Viro } 3580e04dc423SAl Viro return 0; 3581e04dc423SAl Viro 3582626c3920SAl Viro unsupported_parameter: 3583f35aa2bcSAl Viro return invalfc(fc, "Unsupported parameter '%s'", param->key); 3584626c3920SAl Viro bad_value: 3585f35aa2bcSAl Viro return invalfc(fc, "Bad value for '%s'", param->key); 3586e04dc423SAl Viro } 3587e04dc423SAl Viro 3588f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data) 3589e04dc423SAl Viro { 3590f3235626SDavid Howells char *options = data; 3591f3235626SDavid Howells 359233f37c64SAl Viro if (options) { 359333f37c64SAl Viro int err = security_sb_eat_lsm_opts(options, &fc->security); 359433f37c64SAl Viro if (err) 359533f37c64SAl Viro return err; 359633f37c64SAl Viro } 359733f37c64SAl Viro 3598b00dc3adSHugh Dickins while (options != NULL) { 3599626c3920SAl Viro char *this_char = options; 3600b00dc3adSHugh Dickins for (;;) { 3601b00dc3adSHugh Dickins /* 3602b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3603b00dc3adSHugh Dickins * mount options form a comma-separated list, 3604b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3605b00dc3adSHugh Dickins */ 3606b00dc3adSHugh Dickins options = strchr(options, ','); 3607b00dc3adSHugh Dickins if (options == NULL) 3608b00dc3adSHugh Dickins break; 3609b00dc3adSHugh Dickins options++; 3610b00dc3adSHugh Dickins if (!isdigit(*options)) { 3611b00dc3adSHugh Dickins options[-1] = '\0'; 3612b00dc3adSHugh Dickins break; 3613b00dc3adSHugh Dickins } 3614b00dc3adSHugh Dickins } 3615626c3920SAl Viro if (*this_char) { 3616626c3920SAl Viro char *value = strchr(this_char, '='); 3617f3235626SDavid Howells size_t len = 0; 3618626c3920SAl Viro int err; 3619626c3920SAl Viro 3620626c3920SAl Viro if (value) { 3621626c3920SAl Viro *value++ = '\0'; 3622f3235626SDavid Howells len = strlen(value); 36231da177e4SLinus Torvalds } 3624f3235626SDavid Howells err = vfs_parse_fs_string(fc, this_char, value, len); 3625f3235626SDavid Howells if (err < 0) 3626f3235626SDavid Howells return err; 36271da177e4SLinus Torvalds } 3628626c3920SAl Viro } 36291da177e4SLinus Torvalds return 0; 36301da177e4SLinus Torvalds } 36311da177e4SLinus Torvalds 3632f3235626SDavid Howells /* 3633f3235626SDavid Howells * Reconfigure a shmem filesystem. 3634f3235626SDavid Howells * 3635f3235626SDavid Howells * Note that we disallow change from limited->unlimited blocks/inodes while any 3636f3235626SDavid Howells * are in use; but we must separately disallow unlimited->limited, because in 3637f3235626SDavid Howells * that case we have no record of how much is already in use. 3638f3235626SDavid Howells */ 3639f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc) 36401da177e4SLinus Torvalds { 3641f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3642f3235626SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 36430edd73b3SHugh Dickins unsigned long inodes; 3644bf11b9a8SSebastian Andrzej Siewior struct mempolicy *mpol = NULL; 3645f3235626SDavid Howells const char *err; 36460edd73b3SHugh Dickins 3647bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 36480edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 36490c98c8e1SZhaoLong Wang 3650f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 3651f3235626SDavid Howells if (!sbinfo->max_blocks) { 3652f3235626SDavid Howells err = "Cannot retroactively limit size"; 36530edd73b3SHugh Dickins goto out; 36540b5071ddSAl Viro } 3655f3235626SDavid Howells if (percpu_counter_compare(&sbinfo->used_blocks, 3656f3235626SDavid Howells ctx->blocks) > 0) { 3657f3235626SDavid Howells err = "Too small a size for current use"; 36580b5071ddSAl Viro goto out; 3659f3235626SDavid Howells } 3660f3235626SDavid Howells } 3661f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 3662f3235626SDavid Howells if (!sbinfo->max_inodes) { 3663f3235626SDavid Howells err = "Cannot retroactively limit inodes"; 36640b5071ddSAl Viro goto out; 36650b5071ddSAl Viro } 3666f3235626SDavid Howells if (ctx->inodes < inodes) { 3667f3235626SDavid Howells err = "Too few inodes for current use"; 3668f3235626SDavid Howells goto out; 3669f3235626SDavid Howells } 3670f3235626SDavid Howells } 36710edd73b3SHugh Dickins 3672ea3271f7SChris Down if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 3673ea3271f7SChris Down sbinfo->next_ino > UINT_MAX) { 3674ea3271f7SChris Down err = "Current inum too high to switch to 32-bit inums"; 3675ea3271f7SChris Down goto out; 3676ea3271f7SChris Down } 3677ea3271f7SChris Down 3678f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_HUGE) 3679f3235626SDavid Howells sbinfo->huge = ctx->huge; 3680ea3271f7SChris Down if (ctx->seen & SHMEM_SEEN_INUMS) 3681ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 3682f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_BLOCKS) 3683f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3684f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_INODES) { 3685f3235626SDavid Howells sbinfo->max_inodes = ctx->inodes; 3686f3235626SDavid Howells sbinfo->free_inodes = ctx->inodes - inodes; 36870b5071ddSAl Viro } 368871fe804bSLee Schermerhorn 36895f00110fSGreg Thelen /* 36905f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 36915f00110fSGreg Thelen */ 3692f3235626SDavid Howells if (ctx->mpol) { 3693bf11b9a8SSebastian Andrzej Siewior mpol = sbinfo->mpol; 3694f3235626SDavid Howells sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 3695f3235626SDavid Howells ctx->mpol = NULL; 36965f00110fSGreg Thelen } 3697bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3698bf11b9a8SSebastian Andrzej Siewior mpol_put(mpol); 3699f3235626SDavid Howells return 0; 37000edd73b3SHugh Dickins out: 3701bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3702f35aa2bcSAl Viro return invalfc(fc, "%s", err); 37031da177e4SLinus Torvalds } 3704680d794bSakpm@linux-foundation.org 370534c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3706680d794bSakpm@linux-foundation.org { 370734c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3708680d794bSakpm@linux-foundation.org 3709680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3710680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 371109cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3712680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3713680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 37140825a6f9SJoe Perches if (sbinfo->mode != (0777 | S_ISVTX)) 371509208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 37168751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 37178751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 37188751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 37198751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 37208751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 37218751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3722ea3271f7SChris Down 3723ea3271f7SChris Down /* 3724ea3271f7SChris Down * Showing inode{64,32} might be useful even if it's the system default, 3725ea3271f7SChris Down * since then people don't have to resort to checking both here and 3726ea3271f7SChris Down * /proc/config.gz to confirm 64-bit inums were successfully applied 3727ea3271f7SChris Down * (which may not even exist if IKCONFIG_PROC isn't enabled). 3728ea3271f7SChris Down * 3729ea3271f7SChris Down * We hide it when inode64 isn't the default and we are using 32-bit 3730ea3271f7SChris Down * inodes, since that probably just means the feature isn't even under 3731ea3271f7SChris Down * consideration. 3732ea3271f7SChris Down * 3733ea3271f7SChris Down * As such: 3734ea3271f7SChris Down * 3735ea3271f7SChris Down * +-----------------+-----------------+ 3736ea3271f7SChris Down * | TMPFS_INODE64=y | TMPFS_INODE64=n | 3737ea3271f7SChris Down * +------------------+-----------------+-----------------+ 3738ea3271f7SChris Down * | full_inums=true | show | show | 3739ea3271f7SChris Down * | full_inums=false | show | hide | 3740ea3271f7SChris Down * +------------------+-----------------+-----------------+ 3741ea3271f7SChris Down * 3742ea3271f7SChris Down */ 3743ea3271f7SChris Down if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 3744ea3271f7SChris Down seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 3745396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 37465a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 37475a6e75f8SKirill A. Shutemov if (sbinfo->huge) 37485a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 37495a6e75f8SKirill A. Shutemov #endif 375071fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 3751680d794bSakpm@linux-foundation.org return 0; 3752680d794bSakpm@linux-foundation.org } 37539183df25SDavid Herrmann 3754680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 37551da177e4SLinus Torvalds 37561da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 37571da177e4SLinus Torvalds { 3758602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3759602586a8SHugh Dickins 3760e809d5f0SChris Down free_percpu(sbinfo->ino_batch); 3761602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 376249cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3763602586a8SHugh Dickins kfree(sbinfo); 37641da177e4SLinus Torvalds sb->s_fs_info = NULL; 37651da177e4SLinus Torvalds } 37661da177e4SLinus Torvalds 3767f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 37681da177e4SLinus Torvalds { 3769f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 37701da177e4SLinus Torvalds struct inode *inode; 37710edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3772680d794bSakpm@linux-foundation.org 3773680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3774425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3775680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3776680d794bSakpm@linux-foundation.org if (!sbinfo) 3777680d794bSakpm@linux-foundation.org return -ENOMEM; 3778680d794bSakpm@linux-foundation.org 3779680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 37801da177e4SLinus Torvalds 37810edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 37821da177e4SLinus Torvalds /* 37831da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 37841da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 37851da177e4SLinus Torvalds * but the internal instance is left unlimited. 37861da177e4SLinus Torvalds */ 37871751e8a6SLinus Torvalds if (!(sb->s_flags & SB_KERNMOUNT)) { 3788f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 3789f3235626SDavid Howells ctx->blocks = shmem_default_max_blocks(); 3790f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_INODES)) 3791f3235626SDavid Howells ctx->inodes = shmem_default_max_inodes(); 3792ea3271f7SChris Down if (!(ctx->seen & SHMEM_SEEN_INUMS)) 3793ea3271f7SChris Down ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 3794ca4e0519SAl Viro } else { 37951751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 37961da177e4SLinus Torvalds } 379791828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 379836f05cabSJeff Layton sb->s_flags |= SB_NOSEC | SB_I_VERSION; 37990edd73b3SHugh Dickins #else 38001751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 38010edd73b3SHugh Dickins #endif 3802f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3803f3235626SDavid Howells sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; 3804e809d5f0SChris Down if (sb->s_flags & SB_KERNMOUNT) { 3805e809d5f0SChris Down sbinfo->ino_batch = alloc_percpu(ino_t); 3806e809d5f0SChris Down if (!sbinfo->ino_batch) 3807e809d5f0SChris Down goto failed; 3808e809d5f0SChris Down } 3809f3235626SDavid Howells sbinfo->uid = ctx->uid; 3810f3235626SDavid Howells sbinfo->gid = ctx->gid; 3811ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 3812f3235626SDavid Howells sbinfo->mode = ctx->mode; 3813f3235626SDavid Howells sbinfo->huge = ctx->huge; 3814f3235626SDavid Howells sbinfo->mpol = ctx->mpol; 3815f3235626SDavid Howells ctx->mpol = NULL; 38161da177e4SLinus Torvalds 3817bf11b9a8SSebastian Andrzej Siewior raw_spin_lock_init(&sbinfo->stat_lock); 3818908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3819602586a8SHugh Dickins goto failed; 3820779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3821779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 38221da177e4SLinus Torvalds 3823285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 382409cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 382509cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 38261da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 38271da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3828cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3829b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 383039f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3831b09e0fa4SEric Paris #endif 3832b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 38331751e8a6SLinus Torvalds sb->s_flags |= SB_POSIXACL; 383439f0247dSAndreas Gruenbacher #endif 38352b4db796SAmir Goldstein uuid_gen(&sb->s_uuid); 38360edd73b3SHugh Dickins 3837454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 38381da177e4SLinus Torvalds if (!inode) 38391da177e4SLinus Torvalds goto failed; 3840680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3841680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3842318ceed0SAl Viro sb->s_root = d_make_root(inode); 3843318ceed0SAl Viro if (!sb->s_root) 384448fde701SAl Viro goto failed; 38451da177e4SLinus Torvalds return 0; 38461da177e4SLinus Torvalds 38471da177e4SLinus Torvalds failed: 38481da177e4SLinus Torvalds shmem_put_super(sb); 3849f2b346e4SMiaohe Lin return -ENOMEM; 38501da177e4SLinus Torvalds } 38511da177e4SLinus Torvalds 3852f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc) 3853f3235626SDavid Howells { 3854f3235626SDavid Howells return get_tree_nodev(fc, shmem_fill_super); 3855f3235626SDavid Howells } 3856f3235626SDavid Howells 3857f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc) 3858f3235626SDavid Howells { 3859f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3860f3235626SDavid Howells 3861f3235626SDavid Howells if (ctx) { 3862f3235626SDavid Howells mpol_put(ctx->mpol); 3863f3235626SDavid Howells kfree(ctx); 3864f3235626SDavid Howells } 3865f3235626SDavid Howells } 3866f3235626SDavid Howells 3867f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = { 3868f3235626SDavid Howells .free = shmem_free_fc, 3869f3235626SDavid Howells .get_tree = shmem_get_tree, 3870f3235626SDavid Howells #ifdef CONFIG_TMPFS 3871f3235626SDavid Howells .parse_monolithic = shmem_parse_options, 3872f3235626SDavid Howells .parse_param = shmem_parse_one, 3873f3235626SDavid Howells .reconfigure = shmem_reconfigure, 3874f3235626SDavid Howells #endif 3875f3235626SDavid Howells }; 3876f3235626SDavid Howells 3877fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 38781da177e4SLinus Torvalds 38791da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 38801da177e4SLinus Torvalds { 388141ffe5d5SHugh Dickins struct shmem_inode_info *info; 3882fd60b288SMuchun Song info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL); 388341ffe5d5SHugh Dickins if (!info) 38841da177e4SLinus Torvalds return NULL; 388541ffe5d5SHugh Dickins return &info->vfs_inode; 38861da177e4SLinus Torvalds } 38871da177e4SLinus Torvalds 388874b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode) 3889fa0d7e3dSNick Piggin { 389084e710daSAl Viro if (S_ISLNK(inode->i_mode)) 38913ed47db3SAl Viro kfree(inode->i_link); 3892fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3893fa0d7e3dSNick Piggin } 3894fa0d7e3dSNick Piggin 38951da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 38961da177e4SLinus Torvalds { 389709208d15SAl Viro if (S_ISREG(inode->i_mode)) 38981da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 38991da177e4SLinus Torvalds } 39001da177e4SLinus Torvalds 390141ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 39021da177e4SLinus Torvalds { 390341ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 390441ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 39051da177e4SLinus Torvalds } 39061da177e4SLinus Torvalds 39079a8ec03eSweiping zhang static void shmem_init_inodecache(void) 39081da177e4SLinus Torvalds { 39091da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 39101da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 39115d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 39121da177e4SLinus Torvalds } 39131da177e4SLinus Torvalds 391441ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 39151da177e4SLinus Torvalds { 39161a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 39171da177e4SLinus Torvalds } 39181da177e4SLinus Torvalds 3919a7605426SYang Shi /* Keep the page in page cache instead of truncating it */ 3920a7605426SYang Shi static int shmem_error_remove_page(struct address_space *mapping, 3921a7605426SYang Shi struct page *page) 3922a7605426SYang Shi { 3923a7605426SYang Shi return 0; 3924a7605426SYang Shi } 3925a7605426SYang Shi 392630e6a51dSHui Su const struct address_space_operations shmem_aops = { 39271da177e4SLinus Torvalds .writepage = shmem_writepage, 392846de8b97SMatthew Wilcox (Oracle) .dirty_folio = noop_dirty_folio, 39291da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3930800d15a5SNick Piggin .write_begin = shmem_write_begin, 3931800d15a5SNick Piggin .write_end = shmem_write_end, 39321da177e4SLinus Torvalds #endif 39331c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 393454184650SMatthew Wilcox (Oracle) .migrate_folio = migrate_folio, 39351c93923cSAndrew Morton #endif 3936a7605426SYang Shi .error_remove_page = shmem_error_remove_page, 39371da177e4SLinus Torvalds }; 393830e6a51dSHui Su EXPORT_SYMBOL(shmem_aops); 39391da177e4SLinus Torvalds 394015ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 39411da177e4SLinus Torvalds .mmap = shmem_mmap, 3942a5454f95SThomas Weißschuh .open = generic_file_open, 3943c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 39441da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3945220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 39462ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 39478174202bSAl Viro .write_iter = generic_file_write_iter, 39481b061d92SChristoph Hellwig .fsync = noop_fsync, 394982c156f8SAl Viro .splice_read = generic_file_splice_read, 3950f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 395183e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 39521da177e4SLinus Torvalds #endif 39531da177e4SLinus Torvalds }; 39541da177e4SLinus Torvalds 395592e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 395644a30220SYu Zhao .getattr = shmem_getattr, 395794c1e62dSHugh Dickins .setattr = shmem_setattr, 3958b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3959b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3960feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3961e408e695STheodore Ts'o .fileattr_get = shmem_fileattr_get, 3962e408e695STheodore Ts'o .fileattr_set = shmem_fileattr_set, 3963b09e0fa4SEric Paris #endif 39641da177e4SLinus Torvalds }; 39651da177e4SLinus Torvalds 396692e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 39671da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3968f7cd16a5SXavier Roche .getattr = shmem_getattr, 39691da177e4SLinus Torvalds .create = shmem_create, 39701da177e4SLinus Torvalds .lookup = simple_lookup, 39711da177e4SLinus Torvalds .link = shmem_link, 39721da177e4SLinus Torvalds .unlink = shmem_unlink, 39731da177e4SLinus Torvalds .symlink = shmem_symlink, 39741da177e4SLinus Torvalds .mkdir = shmem_mkdir, 39751da177e4SLinus Torvalds .rmdir = shmem_rmdir, 39761da177e4SLinus Torvalds .mknod = shmem_mknod, 39772773bf00SMiklos Szeredi .rename = shmem_rename2, 397860545d0dSAl Viro .tmpfile = shmem_tmpfile, 39791da177e4SLinus Torvalds #endif 3980b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3981b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3982e408e695STheodore Ts'o .fileattr_get = shmem_fileattr_get, 3983e408e695STheodore Ts'o .fileattr_set = shmem_fileattr_set, 3984b09e0fa4SEric Paris #endif 398539f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 398694c1e62dSHugh Dickins .setattr = shmem_setattr, 3987feda821eSChristoph Hellwig .set_acl = simple_set_acl, 398839f0247dSAndreas Gruenbacher #endif 398939f0247dSAndreas Gruenbacher }; 399039f0247dSAndreas Gruenbacher 399192e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 3992f7cd16a5SXavier Roche .getattr = shmem_getattr, 3993b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3994b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3995b09e0fa4SEric Paris #endif 399639f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 399794c1e62dSHugh Dickins .setattr = shmem_setattr, 3998feda821eSChristoph Hellwig .set_acl = simple_set_acl, 399939f0247dSAndreas Gruenbacher #endif 40001da177e4SLinus Torvalds }; 40011da177e4SLinus Torvalds 4002759b9775SHugh Dickins static const struct super_operations shmem_ops = { 40031da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 400474b1da56SAl Viro .free_inode = shmem_free_in_core_inode, 40051da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 40061da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 40071da177e4SLinus Torvalds .statfs = shmem_statfs, 4008680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 40091da177e4SLinus Torvalds #endif 40101f895f75SAl Viro .evict_inode = shmem_evict_inode, 40111da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 40121da177e4SLinus Torvalds .put_super = shmem_put_super, 4013396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4014779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 4015779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 4016779750d2SKirill A. Shutemov #endif 40171da177e4SLinus Torvalds }; 40181da177e4SLinus Torvalds 4019f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 402054cb8821SNick Piggin .fault = shmem_fault, 4021d7c17551SNing Qu .map_pages = filemap_map_pages, 40221da177e4SLinus Torvalds #ifdef CONFIG_NUMA 40231da177e4SLinus Torvalds .set_policy = shmem_set_policy, 40241da177e4SLinus Torvalds .get_policy = shmem_get_policy, 40251da177e4SLinus Torvalds #endif 40261da177e4SLinus Torvalds }; 40271da177e4SLinus Torvalds 4028d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops = { 4029d09e8ca6SPasha Tatashin .fault = shmem_fault, 4030d09e8ca6SPasha Tatashin .map_pages = filemap_map_pages, 4031d09e8ca6SPasha Tatashin #ifdef CONFIG_NUMA 4032d09e8ca6SPasha Tatashin .set_policy = shmem_set_policy, 4033d09e8ca6SPasha Tatashin .get_policy = shmem_get_policy, 4034d09e8ca6SPasha Tatashin #endif 4035d09e8ca6SPasha Tatashin }; 4036d09e8ca6SPasha Tatashin 4037f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc) 40381da177e4SLinus Torvalds { 4039f3235626SDavid Howells struct shmem_options *ctx; 4040f3235626SDavid Howells 4041f3235626SDavid Howells ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 4042f3235626SDavid Howells if (!ctx) 4043f3235626SDavid Howells return -ENOMEM; 4044f3235626SDavid Howells 4045f3235626SDavid Howells ctx->mode = 0777 | S_ISVTX; 4046f3235626SDavid Howells ctx->uid = current_fsuid(); 4047f3235626SDavid Howells ctx->gid = current_fsgid(); 4048f3235626SDavid Howells 4049f3235626SDavid Howells fc->fs_private = ctx; 4050f3235626SDavid Howells fc->ops = &shmem_fs_context_ops; 4051f3235626SDavid Howells return 0; 40521da177e4SLinus Torvalds } 40531da177e4SLinus Torvalds 405441ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 40551da177e4SLinus Torvalds .owner = THIS_MODULE, 40561da177e4SLinus Torvalds .name = "tmpfs", 4057f3235626SDavid Howells .init_fs_context = shmem_init_fs_context, 4058f3235626SDavid Howells #ifdef CONFIG_TMPFS 4059d7167b14SAl Viro .parameters = shmem_fs_parameters, 4060f3235626SDavid Howells #endif 40611da177e4SLinus Torvalds .kill_sb = kill_litter_super, 4062ff36da69SMatthew Wilcox (Oracle) .fs_flags = FS_USERNS_MOUNT, 40631da177e4SLinus Torvalds }; 40641da177e4SLinus Torvalds 40659096bbe9SMiaohe Lin void __init shmem_init(void) 40661da177e4SLinus Torvalds { 40671da177e4SLinus Torvalds int error; 40681da177e4SLinus Torvalds 40699a8ec03eSweiping zhang shmem_init_inodecache(); 40701da177e4SLinus Torvalds 407141ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 40721da177e4SLinus Torvalds if (error) { 40731170532bSJoe Perches pr_err("Could not register tmpfs\n"); 40741da177e4SLinus Torvalds goto out2; 40751da177e4SLinus Torvalds } 407695dc112aSGreg Kroah-Hartman 4077ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 40781da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 40791da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 40801170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 40811da177e4SLinus Torvalds goto out1; 40821da177e4SLinus Torvalds } 40835a6e75f8SKirill A. Shutemov 4084396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4085435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 40865a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 40875a6e75f8SKirill A. Shutemov else 40885e6e5a12SHugh Dickins shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ 40895a6e75f8SKirill A. Shutemov #endif 40909096bbe9SMiaohe Lin return; 40911da177e4SLinus Torvalds 40921da177e4SLinus Torvalds out1: 409341ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 40941da177e4SLinus Torvalds out2: 409541ffe5d5SHugh Dickins shmem_destroy_inodecache(); 40961da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 40971da177e4SLinus Torvalds } 4098853ac43aSMatt Mackall 4099396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 41005a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 41015a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 41025a6e75f8SKirill A. Shutemov { 410326083eb6SColin Ian King static const int values[] = { 41045a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 41055a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 41065a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 41075a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 41085a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 41095a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 41105a6e75f8SKirill A. Shutemov }; 411179d4d38aSJoe Perches int len = 0; 411279d4d38aSJoe Perches int i; 41135a6e75f8SKirill A. Shutemov 411479d4d38aSJoe Perches for (i = 0; i < ARRAY_SIZE(values); i++) { 411579d4d38aSJoe Perches len += sysfs_emit_at(buf, len, 411679d4d38aSJoe Perches shmem_huge == values[i] ? "%s[%s]" : "%s%s", 411779d4d38aSJoe Perches i ? " " : "", 41185a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 41195a6e75f8SKirill A. Shutemov } 412079d4d38aSJoe Perches 412179d4d38aSJoe Perches len += sysfs_emit_at(buf, len, "\n"); 412279d4d38aSJoe Perches 412379d4d38aSJoe Perches return len; 41245a6e75f8SKirill A. Shutemov } 41255a6e75f8SKirill A. Shutemov 41265a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 41275a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 41285a6e75f8SKirill A. Shutemov { 41295a6e75f8SKirill A. Shutemov char tmp[16]; 41305a6e75f8SKirill A. Shutemov int huge; 41315a6e75f8SKirill A. Shutemov 41325a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 41335a6e75f8SKirill A. Shutemov return -EINVAL; 41345a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 41355a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 41365a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 41375a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 41385a6e75f8SKirill A. Shutemov 41395a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 41405a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 41415a6e75f8SKirill A. Shutemov return -EINVAL; 41425a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 41435a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 41445a6e75f8SKirill A. Shutemov return -EINVAL; 41455a6e75f8SKirill A. Shutemov 41465a6e75f8SKirill A. Shutemov shmem_huge = huge; 4147435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY) 41485a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 41495a6e75f8SKirill A. Shutemov return count; 41505a6e75f8SKirill A. Shutemov } 41515a6e75f8SKirill A. Shutemov 41524bfa8adaSMiaohe Lin struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); 4153396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 4154f3f0e1d2SKirill A. Shutemov 4155853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 4156853ac43aSMatt Mackall 4157853ac43aSMatt Mackall /* 4158853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4159853ac43aSMatt Mackall * 4160853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 4161853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 4162853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 4163853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 4164853ac43aSMatt Mackall */ 4165853ac43aSMatt Mackall 416641ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 4167853ac43aSMatt Mackall .name = "tmpfs", 4168f3235626SDavid Howells .init_fs_context = ramfs_init_fs_context, 4169d7167b14SAl Viro .parameters = ramfs_fs_parameters, 4170853ac43aSMatt Mackall .kill_sb = kill_litter_super, 41712b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 4172853ac43aSMatt Mackall }; 4173853ac43aSMatt Mackall 41749096bbe9SMiaohe Lin void __init shmem_init(void) 4175853ac43aSMatt Mackall { 417641ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4177853ac43aSMatt Mackall 417841ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 4179853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 4180853ac43aSMatt Mackall } 4181853ac43aSMatt Mackall 418210a9c496SChristoph Hellwig int shmem_unuse(unsigned int type) 4183853ac43aSMatt Mackall { 4184853ac43aSMatt Mackall return 0; 4185853ac43aSMatt Mackall } 4186853ac43aSMatt Mackall 4187d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 41883f96b79aSHugh Dickins { 41893f96b79aSHugh Dickins return 0; 41903f96b79aSHugh Dickins } 41913f96b79aSHugh Dickins 419224513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 419324513264SHugh Dickins { 419424513264SHugh Dickins } 419524513264SHugh Dickins 4196c01d5b30SHugh Dickins #ifdef CONFIG_MMU 4197c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 4198c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 4199c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 4200c01d5b30SHugh Dickins { 4201c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4202c01d5b30SHugh Dickins } 4203c01d5b30SHugh Dickins #endif 4204c01d5b30SHugh Dickins 420541ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 420694c1e62dSHugh Dickins { 420741ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 420894c1e62dSHugh Dickins } 420994c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 421094c1e62dSHugh Dickins 4211853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 4212d09e8ca6SPasha Tatashin #define shmem_anon_vm_ops generic_file_vm_ops 42130b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 4214454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 42150b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 42160b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 4217853ac43aSMatt Mackall 4218853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 4219853ac43aSMatt Mackall 4220853ac43aSMatt Mackall /* common code */ 42211da177e4SLinus Torvalds 4222703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 4223c7277090SEric Paris unsigned long flags, unsigned int i_flags) 42241da177e4SLinus Torvalds { 42251da177e4SLinus Torvalds struct inode *inode; 422693dec2daSAl Viro struct file *res; 42271da177e4SLinus Torvalds 4228703321b6SMatthew Auld if (IS_ERR(mnt)) 4229703321b6SMatthew Auld return ERR_CAST(mnt); 42301da177e4SLinus Torvalds 4231285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 42321da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 42331da177e4SLinus Torvalds 42341da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 42351da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 42361da177e4SLinus Torvalds 423793dec2daSAl Viro inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, 423893dec2daSAl Viro flags); 4239dac2d1f6SAl Viro if (unlikely(!inode)) { 4240dac2d1f6SAl Viro shmem_unacct_size(flags, size); 4241dac2d1f6SAl Viro return ERR_PTR(-ENOSPC); 4242dac2d1f6SAl Viro } 4243c7277090SEric Paris inode->i_flags |= i_flags; 42441da177e4SLinus Torvalds inode->i_size = size; 42456d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 424626567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 424793dec2daSAl Viro if (!IS_ERR(res)) 424893dec2daSAl Viro res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 42494b42af81SAl Viro &shmem_file_operations); 42506b4d0b27SAl Viro if (IS_ERR(res)) 425193dec2daSAl Viro iput(inode); 42526b4d0b27SAl Viro return res; 42531da177e4SLinus Torvalds } 4254c7277090SEric Paris 4255c7277090SEric Paris /** 4256c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4257c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4258c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4259e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4260e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4261c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4262c7277090SEric Paris * @size: size to be set for the file 4263c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4264c7277090SEric Paris */ 4265c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4266c7277090SEric Paris { 4267703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 4268c7277090SEric Paris } 4269c7277090SEric Paris 4270c7277090SEric Paris /** 4271c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4272c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4273c7277090SEric Paris * @size: size to be set for the file 4274c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4275c7277090SEric Paris */ 4276c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4277c7277090SEric Paris { 4278703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, 0); 4279c7277090SEric Paris } 4280395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 42811da177e4SLinus Torvalds 428246711810SRandy Dunlap /** 4283703321b6SMatthew Auld * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 4284703321b6SMatthew Auld * @mnt: the tmpfs mount where the file will be created 4285703321b6SMatthew Auld * @name: name for dentry (to be seen in /proc/<pid>/maps 4286703321b6SMatthew Auld * @size: size to be set for the file 4287703321b6SMatthew Auld * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4288703321b6SMatthew Auld */ 4289703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 4290703321b6SMatthew Auld loff_t size, unsigned long flags) 4291703321b6SMatthew Auld { 4292703321b6SMatthew Auld return __shmem_file_setup(mnt, name, size, flags, 0); 4293703321b6SMatthew Auld } 4294703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4295703321b6SMatthew Auld 4296703321b6SMatthew Auld /** 42971da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 429845e55300SPeter Collingbourne * @vma: the vma to be mmapped is prepared by do_mmap 42991da177e4SLinus Torvalds */ 43001da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 43011da177e4SLinus Torvalds { 43021da177e4SLinus Torvalds struct file *file; 43031da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 43041da177e4SLinus Torvalds 430566fc1303SHugh Dickins /* 4306c1e8d7c6SMichel Lespinasse * Cloning a new file under mmap_lock leads to a lock ordering conflict 430766fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 430866fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 430966fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 431066fc1303SHugh Dickins */ 4311703321b6SMatthew Auld file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 43121da177e4SLinus Torvalds if (IS_ERR(file)) 43131da177e4SLinus Torvalds return PTR_ERR(file); 43141da177e4SLinus Torvalds 43151da177e4SLinus Torvalds if (vma->vm_file) 43161da177e4SLinus Torvalds fput(vma->vm_file); 43171da177e4SLinus Torvalds vma->vm_file = file; 4318d09e8ca6SPasha Tatashin vma->vm_ops = &shmem_anon_vm_ops; 4319f3f0e1d2SKirill A. Shutemov 43201da177e4SLinus Torvalds return 0; 43211da177e4SLinus Torvalds } 4322d9d90e5eSHugh Dickins 4323d9d90e5eSHugh Dickins /** 4324d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4325d9d90e5eSHugh Dickins * @mapping: the page's address_space 4326d9d90e5eSHugh Dickins * @index: the page index 4327d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4328d9d90e5eSHugh Dickins * 4329d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4330d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 43317e0a1265SMatthew Wilcox (Oracle) * But read_cache_page_gfp() uses the ->read_folio() method: which does not 4332d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4333d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4334d9d90e5eSHugh Dickins * 433568da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 433668da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4337d9d90e5eSHugh Dickins */ 4338d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4339d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4340d9d90e5eSHugh Dickins { 434168da9f05SHugh Dickins #ifdef CONFIG_SHMEM 434268da9f05SHugh Dickins struct inode *inode = mapping->host; 4343a3a9c397SMatthew Wilcox (Oracle) struct folio *folio; 43449276aad6SHugh Dickins struct page *page; 434568da9f05SHugh Dickins int error; 434668da9f05SHugh Dickins 434730e6a51dSHui Su BUG_ON(!shmem_mapping(mapping)); 4348a3a9c397SMatthew Wilcox (Oracle) error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, 4349cfda0526SMike Rapoport gfp, NULL, NULL, NULL); 435068da9f05SHugh Dickins if (error) 4351a7605426SYang Shi return ERR_PTR(error); 4352a7605426SYang Shi 4353a3a9c397SMatthew Wilcox (Oracle) folio_unlock(folio); 4354a3a9c397SMatthew Wilcox (Oracle) page = folio_file_page(folio, index); 4355a7605426SYang Shi if (PageHWPoison(page)) { 4356a3a9c397SMatthew Wilcox (Oracle) folio_put(folio); 4357a7605426SYang Shi return ERR_PTR(-EIO); 4358a7605426SYang Shi } 4359a7605426SYang Shi 436068da9f05SHugh Dickins return page; 436168da9f05SHugh Dickins #else 436268da9f05SHugh Dickins /* 436368da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 436468da9f05SHugh Dickins */ 4365d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 436668da9f05SHugh Dickins #endif 4367d9d90e5eSHugh Dickins } 4368d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4369