11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31e408e695STheodore Ts'o #include <linux/fileattr.h> 32853ac43aSMatt Mackall #include <linux/mm.h> 3346c9a946SArnd Bergmann #include <linux/random.h> 34174cd4b1SIngo Molnar #include <linux/sched/signal.h> 35b95f1b31SPaul Gortmaker #include <linux/export.h> 365ff2121aSMatthew Wilcox (Oracle) #include <linux/shmem_fs.h> 37853ac43aSMatt Mackall #include <linux/swap.h> 38e2e40f2cSChristoph Hellwig #include <linux/uio.h> 39749df87bSMike Kravetz #include <linux/hugetlb.h> 40626c3920SAl Viro #include <linux/fs_parser.h> 4186a2f3f2SMiaohe Lin #include <linux/swapfile.h> 4236f05cabSJeff Layton #include <linux/iversion.h> 43014bb1deSNeilBrown #include "swap.h" 4495cc09d6SAndrea Arcangeli 45853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 46853ac43aSMatt Mackall 47853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 481da177e4SLinus Torvalds /* 491da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 501da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 511da177e4SLinus Torvalds * which makes it a completely usable filesystem. 521da177e4SLinus Torvalds */ 531da177e4SLinus Torvalds 5439f0247dSAndreas Gruenbacher #include <linux/xattr.h> 55a5694255SChristoph Hellwig #include <linux/exportfs.h> 561c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 57feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 581da177e4SLinus Torvalds #include <linux/mman.h> 591da177e4SLinus Torvalds #include <linux/string.h> 601da177e4SLinus Torvalds #include <linux/slab.h> 611da177e4SLinus Torvalds #include <linux/backing-dev.h> 621da177e4SLinus Torvalds #include <linux/writeback.h> 63bda97eabSHugh Dickins #include <linux/pagevec.h> 6441ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 6583e4fa9cSHugh Dickins #include <linux/falloc.h> 66708e3508SHugh Dickins #include <linux/splice.h> 671da177e4SLinus Torvalds #include <linux/security.h> 681da177e4SLinus Torvalds #include <linux/swapops.h> 691da177e4SLinus Torvalds #include <linux/mempolicy.h> 701da177e4SLinus Torvalds #include <linux/namei.h> 71b00dc3adSHugh Dickins #include <linux/ctype.h> 72304dbdb7SLee Schermerhorn #include <linux/migrate.h> 73c1f60a5aSChristoph Lameter #include <linux/highmem.h> 74680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 7592562927SMimi Zohar #include <linux/magic.h> 769183df25SDavid Herrmann #include <linux/syscalls.h> 7740e041a2SDavid Herrmann #include <linux/fcntl.h> 789183df25SDavid Herrmann #include <uapi/linux/memfd.h> 79cfda0526SMike Rapoport #include <linux/userfaultfd_k.h> 804c27fe4cSMike Rapoport #include <linux/rmap.h> 812b4db796SAmir Goldstein #include <linux/uuid.h> 82304dbdb7SLee Schermerhorn 837c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 841da177e4SLinus Torvalds 85dd56b046SMel Gorman #include "internal.h" 86dd56b046SMel Gorman 8709cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8809cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 911da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 921da177e4SLinus Torvalds 9369f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 9469f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 9569f07ec9SHugh Dickins 961aac1400SHugh Dickins /* 97f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 989608703eSJan Kara * inode->i_private (with i_rwsem making sure that it has only one user at 99f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 1001aac1400SHugh Dickins */ 1011aac1400SHugh Dickins struct shmem_falloc { 1028e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 1031aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 1041aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 1051aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 1061aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1071aac1400SHugh Dickins }; 1081aac1400SHugh Dickins 1090b5071ddSAl Viro struct shmem_options { 1100b5071ddSAl Viro unsigned long long blocks; 1110b5071ddSAl Viro unsigned long long inodes; 1120b5071ddSAl Viro struct mempolicy *mpol; 1130b5071ddSAl Viro kuid_t uid; 1140b5071ddSAl Viro kgid_t gid; 1150b5071ddSAl Viro umode_t mode; 116ea3271f7SChris Down bool full_inums; 1170b5071ddSAl Viro int huge; 1180b5071ddSAl Viro int seen; 1192c6efe9cSLuis Chamberlain bool noswap; 1200b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1 1210b5071ddSAl Viro #define SHMEM_SEEN_INODES 2 1220b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4 123ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8 1242c6efe9cSLuis Chamberlain #define SHMEM_SEEN_NOSWAP 16 1250b5071ddSAl Viro }; 1260b5071ddSAl Viro 127b76db735SAndrew Morton #ifdef CONFIG_TMPFS 128680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 129680d794bSakpm@linux-foundation.org { 130ca79b0c2SArun KS return totalram_pages() / 2; 131680d794bSakpm@linux-foundation.org } 132680d794bSakpm@linux-foundation.org 133680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 134680d794bSakpm@linux-foundation.org { 135ca79b0c2SArun KS unsigned long nr_pages = totalram_pages(); 136ca79b0c2SArun KS 137ca79b0c2SArun KS return min(nr_pages - totalhigh_pages(), nr_pages / 2); 138680d794bSakpm@linux-foundation.org } 139b76db735SAndrew Morton #endif 140680d794bSakpm@linux-foundation.org 141da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 142da08e9b7SMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, 143c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 144c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type); 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1471da177e4SLinus Torvalds { 1481da177e4SLinus Torvalds return sb->s_fs_info; 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds /* 1521da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1531da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1541da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1551da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1561da177e4SLinus Torvalds */ 1571da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1581da177e4SLinus Torvalds { 1590b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 160191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1641da177e4SLinus Torvalds { 1650b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1661da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1671da177e4SLinus Torvalds } 1681da177e4SLinus Torvalds 16977142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 17077142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 17177142517SKonstantin Khlebnikov { 17277142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 17377142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 17477142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 17577142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 17677142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 17777142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 17877142517SKonstantin Khlebnikov } 17977142517SKonstantin Khlebnikov return 0; 18077142517SKonstantin Khlebnikov } 18177142517SKonstantin Khlebnikov 1821da177e4SLinus Torvalds /* 1831da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 18475edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 185923e2f0eSMatthew Wilcox (Oracle) * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1861da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1871da177e4SLinus Torvalds */ 188800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 1891da177e4SLinus Torvalds { 190800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 191800d8c63SKirill A. Shutemov return 0; 192800d8c63SKirill A. Shutemov 193800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 194800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 1981da177e4SLinus Torvalds { 1990b0a0806SHugh Dickins if (flags & VM_NORESERVE) 20009cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 2011da177e4SLinus Torvalds } 2021da177e4SLinus Torvalds 2030f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 2040f079694SMike Rapoport { 2050f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2060f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2070f079694SMike Rapoport 2080f079694SMike Rapoport if (shmem_acct_block(info->flags, pages)) 2090f079694SMike Rapoport return false; 2100f079694SMike Rapoport 2110f079694SMike Rapoport if (sbinfo->max_blocks) { 2120f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2130f079694SMike Rapoport sbinfo->max_blocks - pages) > 0) 2140f079694SMike Rapoport goto unacct; 2150f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages); 2160f079694SMike Rapoport } 2170f079694SMike Rapoport 2180f079694SMike Rapoport return true; 2190f079694SMike Rapoport 2200f079694SMike Rapoport unacct: 2210f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2220f079694SMike Rapoport return false; 2230f079694SMike Rapoport } 2240f079694SMike Rapoport 2250f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 2260f079694SMike Rapoport { 2270f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2280f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2290f079694SMike Rapoport 2300f079694SMike Rapoport if (sbinfo->max_blocks) 2310f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages); 2320f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2330f079694SMike Rapoport } 2340f079694SMike Rapoport 235759b9775SHugh Dickins static const struct super_operations shmem_ops; 23630e6a51dSHui Su const struct address_space_operations shmem_aops; 23715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 23892e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 23992e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 24092e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 241f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 242d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops; 243779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 2441da177e4SLinus Torvalds 245d09e8ca6SPasha Tatashin bool vma_is_anon_shmem(struct vm_area_struct *vma) 246d09e8ca6SPasha Tatashin { 247d09e8ca6SPasha Tatashin return vma->vm_ops == &shmem_anon_vm_ops; 248d09e8ca6SPasha Tatashin } 249d09e8ca6SPasha Tatashin 250b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma) 251b0506e48SMike Rapoport { 252d09e8ca6SPasha Tatashin return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; 253b0506e48SMike Rapoport } 254b0506e48SMike Rapoport 2551da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 256cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 2571da177e4SLinus Torvalds 258e809d5f0SChris Down /* 259e809d5f0SChris Down * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 260e809d5f0SChris Down * produces a novel ino for the newly allocated inode. 261e809d5f0SChris Down * 262e809d5f0SChris Down * It may also be called when making a hard link to permit the space needed by 263e809d5f0SChris Down * each dentry. However, in that case, no new inode number is needed since that 264e809d5f0SChris Down * internally draws from another pool of inode numbers (currently global 265e809d5f0SChris Down * get_next_ino()). This case is indicated by passing NULL as inop. 266e809d5f0SChris Down */ 267e809d5f0SChris Down #define SHMEM_INO_BATCH 1024 268e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 2695b04c689SPavel Emelyanov { 2705b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 271e809d5f0SChris Down ino_t ino; 272e809d5f0SChris Down 273e809d5f0SChris Down if (!(sb->s_flags & SB_KERNMOUNT)) { 274bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 275bb3e96d6SByron Stanoszek if (sbinfo->max_inodes) { 2765b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 277bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 2785b04c689SPavel Emelyanov return -ENOSPC; 2795b04c689SPavel Emelyanov } 2805b04c689SPavel Emelyanov sbinfo->free_inodes--; 281bb3e96d6SByron Stanoszek } 282e809d5f0SChris Down if (inop) { 283e809d5f0SChris Down ino = sbinfo->next_ino++; 284e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 285e809d5f0SChris Down ino = sbinfo->next_ino++; 286ea3271f7SChris Down if (unlikely(!sbinfo->full_inums && 287ea3271f7SChris Down ino > UINT_MAX)) { 288e809d5f0SChris Down /* 289e809d5f0SChris Down * Emulate get_next_ino uint wraparound for 290e809d5f0SChris Down * compatibility 291e809d5f0SChris Down */ 292ea3271f7SChris Down if (IS_ENABLED(CONFIG_64BIT)) 293ea3271f7SChris Down pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 294ea3271f7SChris Down __func__, MINOR(sb->s_dev)); 295ea3271f7SChris Down sbinfo->next_ino = 1; 296ea3271f7SChris Down ino = sbinfo->next_ino++; 2975b04c689SPavel Emelyanov } 298e809d5f0SChris Down *inop = ino; 299e809d5f0SChris Down } 300bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 301e809d5f0SChris Down } else if (inop) { 302e809d5f0SChris Down /* 303e809d5f0SChris Down * __shmem_file_setup, one of our callers, is lock-free: it 304e809d5f0SChris Down * doesn't hold stat_lock in shmem_reserve_inode since 305e809d5f0SChris Down * max_inodes is always 0, and is called from potentially 306e809d5f0SChris Down * unknown contexts. As such, use a per-cpu batched allocator 307e809d5f0SChris Down * which doesn't require the per-sb stat_lock unless we are at 308e809d5f0SChris Down * the batch boundary. 309ea3271f7SChris Down * 310ea3271f7SChris Down * We don't need to worry about inode{32,64} since SB_KERNMOUNT 311ea3271f7SChris Down * shmem mounts are not exposed to userspace, so we don't need 312ea3271f7SChris Down * to worry about things like glibc compatibility. 313e809d5f0SChris Down */ 314e809d5f0SChris Down ino_t *next_ino; 315bf11b9a8SSebastian Andrzej Siewior 316e809d5f0SChris Down next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 317e809d5f0SChris Down ino = *next_ino; 318e809d5f0SChris Down if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 319bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 320e809d5f0SChris Down ino = sbinfo->next_ino; 321e809d5f0SChris Down sbinfo->next_ino += SHMEM_INO_BATCH; 322bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 323e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 324e809d5f0SChris Down ino++; 325e809d5f0SChris Down } 326e809d5f0SChris Down *inop = ino; 327e809d5f0SChris Down *next_ino = ++ino; 328e809d5f0SChris Down put_cpu(); 329e809d5f0SChris Down } 330e809d5f0SChris Down 3315b04c689SPavel Emelyanov return 0; 3325b04c689SPavel Emelyanov } 3335b04c689SPavel Emelyanov 3345b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 3355b04c689SPavel Emelyanov { 3365b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3375b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 338bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 3395b04c689SPavel Emelyanov sbinfo->free_inodes++; 340bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3415b04c689SPavel Emelyanov } 3425b04c689SPavel Emelyanov } 3435b04c689SPavel Emelyanov 34446711810SRandy Dunlap /** 34541ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 3461da177e4SLinus Torvalds * @inode: inode to recalc 3471da177e4SLinus Torvalds * 3481da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 3491da177e4SLinus Torvalds * undirtied hole pages behind our back. 3501da177e4SLinus Torvalds * 3511da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 3521da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 3531da177e4SLinus Torvalds * 3541da177e4SLinus Torvalds * It has to be called with the spinlock held. 3551da177e4SLinus Torvalds */ 3561da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 3571da177e4SLinus Torvalds { 3581da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 3591da177e4SLinus Torvalds long freed; 3601da177e4SLinus Torvalds 3611da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 3621da177e4SLinus Torvalds if (freed > 0) { 3631da177e4SLinus Torvalds info->alloced -= freed; 36454af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 3650f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed); 3661da177e4SLinus Torvalds } 3671da177e4SLinus Torvalds } 3681da177e4SLinus Torvalds 369800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 370800d8c63SKirill A. Shutemov { 371800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3724595ef88SKirill A. Shutemov unsigned long flags; 373800d8c63SKirill A. Shutemov 3740f079694SMike Rapoport if (!shmem_inode_acct_block(inode, pages)) 375800d8c63SKirill A. Shutemov return false; 376b1cc94abSMike Rapoport 377aaa52e34SHugh Dickins /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 378aaa52e34SHugh Dickins inode->i_mapping->nrpages += pages; 379aaa52e34SHugh Dickins 3804595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 381800d8c63SKirill A. Shutemov info->alloced += pages; 382800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 383800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3844595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 385800d8c63SKirill A. Shutemov 386800d8c63SKirill A. Shutemov return true; 387800d8c63SKirill A. Shutemov } 388800d8c63SKirill A. Shutemov 389800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 390800d8c63SKirill A. Shutemov { 391800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3924595ef88SKirill A. Shutemov unsigned long flags; 393800d8c63SKirill A. Shutemov 3946ffcd825SMatthew Wilcox (Oracle) /* nrpages adjustment done by __filemap_remove_folio() or caller */ 395aaa52e34SHugh Dickins 3964595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 397800d8c63SKirill A. Shutemov info->alloced -= pages; 398800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 399800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 4004595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 401800d8c63SKirill A. Shutemov 4020f079694SMike Rapoport shmem_inode_unacct_blocks(inode, pages); 403800d8c63SKirill A. Shutemov } 404800d8c63SKirill A. Shutemov 4057a5d0fbbSHugh Dickins /* 40662f945b6SMatthew Wilcox * Replace item expected in xarray by a new item, while holding xa_lock. 4077a5d0fbbSHugh Dickins */ 40862f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping, 4097a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 4107a5d0fbbSHugh Dickins { 41162f945b6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 4126dbaf22cSJohannes Weiner void *item; 4137a5d0fbbSHugh Dickins 4147a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 4156dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 41662f945b6SMatthew Wilcox item = xas_load(&xas); 4177a5d0fbbSHugh Dickins if (item != expected) 4187a5d0fbbSHugh Dickins return -ENOENT; 41962f945b6SMatthew Wilcox xas_store(&xas, replacement); 4207a5d0fbbSHugh Dickins return 0; 4217a5d0fbbSHugh Dickins } 4227a5d0fbbSHugh Dickins 4237a5d0fbbSHugh Dickins /* 424d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 425d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 426d1899228SHugh Dickins * 427d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 428d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 429d1899228SHugh Dickins */ 430d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 431d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 432d1899228SHugh Dickins { 433a12831bfSMatthew Wilcox return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 434d1899228SHugh Dickins } 435d1899228SHugh Dickins 436d1899228SHugh Dickins /* 4375a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 4385a6e75f8SKirill A. Shutemov * 4395a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 4405a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 4415a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 4425a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 4435a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 4445a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 4455a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 4465a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 4475a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 4485a6e75f8SKirill A. Shutemov */ 4495a6e75f8SKirill A. Shutemov 4505a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 4515a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 4525a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 4535a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 4545a6e75f8SKirill A. Shutemov 4555a6e75f8SKirill A. Shutemov /* 4565a6e75f8SKirill A. Shutemov * Special values. 4575a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 4585a6e75f8SKirill A. Shutemov * 4595a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 4605a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 4615a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 4625a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 4635a6e75f8SKirill A. Shutemov * 4645a6e75f8SKirill A. Shutemov */ 4655a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 4665a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 4675a6e75f8SKirill A. Shutemov 468396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4695a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 4705a6e75f8SKirill A. Shutemov 4715e6e5a12SHugh Dickins static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; 4725a6e75f8SKirill A. Shutemov 4732cf13384SDavid Stevens bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, 4742cf13384SDavid Stevens struct mm_struct *mm, unsigned long vm_flags) 475c852023eSHugh Dickins { 476c852023eSHugh Dickins loff_t i_size; 477c852023eSHugh Dickins 478f7cd16a5SXavier Roche if (!S_ISREG(inode->i_mode)) 479f7cd16a5SXavier Roche return false; 4802cf13384SDavid Stevens if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) 481c852023eSHugh Dickins return false; 4827c6c6cc4SZach O'Keefe if (shmem_huge == SHMEM_HUGE_DENY) 4837c6c6cc4SZach O'Keefe return false; 4843de0c269SZach O'Keefe if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) 4853de0c269SZach O'Keefe return true; 4865e6e5a12SHugh Dickins 4875e6e5a12SHugh Dickins switch (SHMEM_SB(inode->i_sb)->huge) { 488c852023eSHugh Dickins case SHMEM_HUGE_ALWAYS: 489c852023eSHugh Dickins return true; 490c852023eSHugh Dickins case SHMEM_HUGE_WITHIN_SIZE: 491de6ee659SLiu Yuntao index = round_up(index + 1, HPAGE_PMD_NR); 492c852023eSHugh Dickins i_size = round_up(i_size_read(inode), PAGE_SIZE); 493de6ee659SLiu Yuntao if (i_size >> PAGE_SHIFT >= index) 494c852023eSHugh Dickins return true; 495c852023eSHugh Dickins fallthrough; 496c852023eSHugh Dickins case SHMEM_HUGE_ADVISE: 4972cf13384SDavid Stevens if (mm && (vm_flags & VM_HUGEPAGE)) 4985e6e5a12SHugh Dickins return true; 4995e6e5a12SHugh Dickins fallthrough; 500c852023eSHugh Dickins default: 501c852023eSHugh Dickins return false; 502c852023eSHugh Dickins } 503c852023eSHugh Dickins } 5045a6e75f8SKirill A. Shutemov 505e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) 5065a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 5075a6e75f8SKirill A. Shutemov { 5085a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 5095a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 5105a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 5115a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 5125a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 5135a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 5145a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 5155a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 5165a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 5175a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 5185a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 5195a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 5205a6e75f8SKirill A. Shutemov return -EINVAL; 5215a6e75f8SKirill A. Shutemov } 522e5f2249aSArnd Bergmann #endif 5235a6e75f8SKirill A. Shutemov 524e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 5255a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 5265a6e75f8SKirill A. Shutemov { 5275a6e75f8SKirill A. Shutemov switch (huge) { 5285a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 5295a6e75f8SKirill A. Shutemov return "never"; 5305a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 5315a6e75f8SKirill A. Shutemov return "always"; 5325a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 5335a6e75f8SKirill A. Shutemov return "within_size"; 5345a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 5355a6e75f8SKirill A. Shutemov return "advise"; 5365a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 5375a6e75f8SKirill A. Shutemov return "deny"; 5385a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 5395a6e75f8SKirill A. Shutemov return "force"; 5405a6e75f8SKirill A. Shutemov default: 5415a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 5425a6e75f8SKirill A. Shutemov return "bad_val"; 5435a6e75f8SKirill A. Shutemov } 5445a6e75f8SKirill A. Shutemov } 545f1f5929cSJérémy Lefaure #endif 5465a6e75f8SKirill A. Shutemov 547779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 548779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 549779750d2SKirill A. Shutemov { 550779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 551253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 552779750d2SKirill A. Shutemov struct inode *inode; 553779750d2SKirill A. Shutemov struct shmem_inode_info *info; 55405624571SMatthew Wilcox (Oracle) struct folio *folio; 555779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 55662c9827cSGang Li int split = 0; 557779750d2SKirill A. Shutemov 558779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 559779750d2SKirill A. Shutemov return SHRINK_STOP; 560779750d2SKirill A. Shutemov 561779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 562779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 563779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 564779750d2SKirill A. Shutemov 565779750d2SKirill A. Shutemov /* pin the inode */ 566779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 567779750d2SKirill A. Shutemov 568779750d2SKirill A. Shutemov /* inode is about to be evicted */ 569779750d2SKirill A. Shutemov if (!inode) { 570779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 571779750d2SKirill A. Shutemov goto next; 572779750d2SKirill A. Shutemov } 573779750d2SKirill A. Shutemov 574779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 575779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 576779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 577253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 578779750d2SKirill A. Shutemov goto next; 579779750d2SKirill A. Shutemov } 580779750d2SKirill A. Shutemov 581779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 582779750d2SKirill A. Shutemov next: 58362c9827cSGang Li sbinfo->shrinklist_len--; 584779750d2SKirill A. Shutemov if (!--batch) 585779750d2SKirill A. Shutemov break; 586779750d2SKirill A. Shutemov } 587779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 588779750d2SKirill A. Shutemov 589253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 590253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 591253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 592253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 593253fd0f0SKirill A. Shutemov iput(inode); 594253fd0f0SKirill A. Shutemov } 595253fd0f0SKirill A. Shutemov 596779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 597779750d2SKirill A. Shutemov int ret; 59805624571SMatthew Wilcox (Oracle) pgoff_t index; 599779750d2SKirill A. Shutemov 600779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 601779750d2SKirill A. Shutemov inode = &info->vfs_inode; 602779750d2SKirill A. Shutemov 603b3cd54b2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) 60462c9827cSGang Li goto move_back; 605779750d2SKirill A. Shutemov 60605624571SMatthew Wilcox (Oracle) index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; 60705624571SMatthew Wilcox (Oracle) folio = filemap_get_folio(inode->i_mapping, index); 60805624571SMatthew Wilcox (Oracle) if (!folio) 609779750d2SKirill A. Shutemov goto drop; 610779750d2SKirill A. Shutemov 611b3cd54b2SKirill A. Shutemov /* No huge page at the end of the file: nothing to split */ 61205624571SMatthew Wilcox (Oracle) if (!folio_test_large(folio)) { 61305624571SMatthew Wilcox (Oracle) folio_put(folio); 614779750d2SKirill A. Shutemov goto drop; 615779750d2SKirill A. Shutemov } 616779750d2SKirill A. Shutemov 617b3cd54b2SKirill A. Shutemov /* 61862c9827cSGang Li * Move the inode on the list back to shrinklist if we failed 61962c9827cSGang Li * to lock the page at this time. 620b3cd54b2SKirill A. Shutemov * 621b3cd54b2SKirill A. Shutemov * Waiting for the lock may lead to deadlock in the 622b3cd54b2SKirill A. Shutemov * reclaim path. 623b3cd54b2SKirill A. Shutemov */ 62405624571SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) { 62505624571SMatthew Wilcox (Oracle) folio_put(folio); 62662c9827cSGang Li goto move_back; 627b3cd54b2SKirill A. Shutemov } 628b3cd54b2SKirill A. Shutemov 629d788f5b3SMatthew Wilcox (Oracle) ret = split_folio(folio); 63005624571SMatthew Wilcox (Oracle) folio_unlock(folio); 63105624571SMatthew Wilcox (Oracle) folio_put(folio); 632779750d2SKirill A. Shutemov 63362c9827cSGang Li /* If split failed move the inode on the list back to shrinklist */ 634b3cd54b2SKirill A. Shutemov if (ret) 63562c9827cSGang Li goto move_back; 636779750d2SKirill A. Shutemov 637779750d2SKirill A. Shutemov split++; 638779750d2SKirill A. Shutemov drop: 639779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 64062c9827cSGang Li goto put; 64162c9827cSGang Li move_back: 64262c9827cSGang Li /* 64362c9827cSGang Li * Make sure the inode is either on the global list or deleted 64462c9827cSGang Li * from any local list before iput() since it could be deleted 64562c9827cSGang Li * in another thread once we put the inode (then the local list 64662c9827cSGang Li * is corrupted). 64762c9827cSGang Li */ 64862c9827cSGang Li spin_lock(&sbinfo->shrinklist_lock); 64962c9827cSGang Li list_move(&info->shrinklist, &sbinfo->shrinklist); 65062c9827cSGang Li sbinfo->shrinklist_len++; 65162c9827cSGang Li spin_unlock(&sbinfo->shrinklist_lock); 65262c9827cSGang Li put: 653779750d2SKirill A. Shutemov iput(inode); 654779750d2SKirill A. Shutemov } 655779750d2SKirill A. Shutemov 656779750d2SKirill A. Shutemov return split; 657779750d2SKirill A. Shutemov } 658779750d2SKirill A. Shutemov 659779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 660779750d2SKirill A. Shutemov struct shrink_control *sc) 661779750d2SKirill A. Shutemov { 662779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 663779750d2SKirill A. Shutemov 664779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 665779750d2SKirill A. Shutemov return SHRINK_STOP; 666779750d2SKirill A. Shutemov 667779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 668779750d2SKirill A. Shutemov } 669779750d2SKirill A. Shutemov 670779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 671779750d2SKirill A. Shutemov struct shrink_control *sc) 672779750d2SKirill A. Shutemov { 673779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 674779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 675779750d2SKirill A. Shutemov } 676396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 6775a6e75f8SKirill A. Shutemov 6785a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 6795a6e75f8SKirill A. Shutemov 6802cf13384SDavid Stevens bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, 6812cf13384SDavid Stevens struct mm_struct *mm, unsigned long vm_flags) 6825e6e5a12SHugh Dickins { 6835e6e5a12SHugh Dickins return false; 6845e6e5a12SHugh Dickins } 6855e6e5a12SHugh Dickins 686779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 687779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 688779750d2SKirill A. Shutemov { 689779750d2SKirill A. Shutemov return 0; 690779750d2SKirill A. Shutemov } 691396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 6925a6e75f8SKirill A. Shutemov 6935a6e75f8SKirill A. Shutemov /* 6942bb876b5SMatthew Wilcox (Oracle) * Like filemap_add_folio, but error if expected item has gone. 69546f65ec1SHugh Dickins */ 696b7dd44a1SMatthew Wilcox (Oracle) static int shmem_add_to_page_cache(struct folio *folio, 69746f65ec1SHugh Dickins struct address_space *mapping, 6983fea5a49SJohannes Weiner pgoff_t index, void *expected, gfp_t gfp, 6993fea5a49SJohannes Weiner struct mm_struct *charge_mm) 70046f65ec1SHugh Dickins { 701b7dd44a1SMatthew Wilcox (Oracle) XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); 702b7dd44a1SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio); 7033fea5a49SJohannes Weiner int error; 70446f65ec1SHugh Dickins 705b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); 706b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 707b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); 708b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON(expected && folio_test_large(folio)); 70946f65ec1SHugh Dickins 710b7dd44a1SMatthew Wilcox (Oracle) folio_ref_add(folio, nr); 711b7dd44a1SMatthew Wilcox (Oracle) folio->mapping = mapping; 712b7dd44a1SMatthew Wilcox (Oracle) folio->index = index; 71346f65ec1SHugh Dickins 714b7dd44a1SMatthew Wilcox (Oracle) if (!folio_test_swapcache(folio)) { 715b7dd44a1SMatthew Wilcox (Oracle) error = mem_cgroup_charge(folio, charge_mm, gfp); 7163fea5a49SJohannes Weiner if (error) { 717b7dd44a1SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio)) { 7183fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK); 7193fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK_CHARGE); 7203fea5a49SJohannes Weiner } 7213fea5a49SJohannes Weiner goto error; 7223fea5a49SJohannes Weiner } 7234c6355b2SJohannes Weiner } 724b7dd44a1SMatthew Wilcox (Oracle) folio_throttle_swaprate(folio, gfp); 7253fea5a49SJohannes Weiner 726552446a4SMatthew Wilcox do { 727552446a4SMatthew Wilcox xas_lock_irq(&xas); 7286b24ca4aSMatthew Wilcox (Oracle) if (expected != xas_find_conflict(&xas)) { 729552446a4SMatthew Wilcox xas_set_err(&xas, -EEXIST); 7306b24ca4aSMatthew Wilcox (Oracle) goto unlock; 7316b24ca4aSMatthew Wilcox (Oracle) } 7326b24ca4aSMatthew Wilcox (Oracle) if (expected && xas_find_conflict(&xas)) { 7336b24ca4aSMatthew Wilcox (Oracle) xas_set_err(&xas, -EEXIST); 7346b24ca4aSMatthew Wilcox (Oracle) goto unlock; 7356b24ca4aSMatthew Wilcox (Oracle) } 736b7dd44a1SMatthew Wilcox (Oracle) xas_store(&xas, folio); 737552446a4SMatthew Wilcox if (xas_error(&xas)) 738552446a4SMatthew Wilcox goto unlock; 739b7dd44a1SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio)) { 740800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 741b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); 742552446a4SMatthew Wilcox } 743552446a4SMatthew Wilcox mapping->nrpages += nr; 744b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 745b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); 746552446a4SMatthew Wilcox unlock: 747552446a4SMatthew Wilcox xas_unlock_irq(&xas); 748552446a4SMatthew Wilcox } while (xas_nomem(&xas, gfp)); 749552446a4SMatthew Wilcox 750552446a4SMatthew Wilcox if (xas_error(&xas)) { 7513fea5a49SJohannes Weiner error = xas_error(&xas); 7523fea5a49SJohannes Weiner goto error; 75346f65ec1SHugh Dickins } 754552446a4SMatthew Wilcox 755552446a4SMatthew Wilcox return 0; 7563fea5a49SJohannes Weiner error: 757b7dd44a1SMatthew Wilcox (Oracle) folio->mapping = NULL; 758b7dd44a1SMatthew Wilcox (Oracle) folio_ref_sub(folio, nr); 7593fea5a49SJohannes Weiner return error; 76046f65ec1SHugh Dickins } 76146f65ec1SHugh Dickins 76246f65ec1SHugh Dickins /* 7634cd400fdSMatthew Wilcox (Oracle) * Like delete_from_page_cache, but substitutes swap for @folio. 7646922c0c7SHugh Dickins */ 7654cd400fdSMatthew Wilcox (Oracle) static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) 7666922c0c7SHugh Dickins { 7674cd400fdSMatthew Wilcox (Oracle) struct address_space *mapping = folio->mapping; 7684cd400fdSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio); 7696922c0c7SHugh Dickins int error; 7706922c0c7SHugh Dickins 771b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 7724cd400fdSMatthew Wilcox (Oracle) error = shmem_replace_entry(mapping, folio->index, folio, radswap); 7734cd400fdSMatthew Wilcox (Oracle) folio->mapping = NULL; 7744cd400fdSMatthew Wilcox (Oracle) mapping->nrpages -= nr; 7754cd400fdSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 7764cd400fdSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 777b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 7784cd400fdSMatthew Wilcox (Oracle) folio_put(folio); 7796922c0c7SHugh Dickins BUG_ON(error); 7806922c0c7SHugh Dickins } 7816922c0c7SHugh Dickins 7826922c0c7SHugh Dickins /* 783c121d3bbSMatthew Wilcox * Remove swap entry from page cache, free the swap and its page cache. 7847a5d0fbbSHugh Dickins */ 7857a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 7867a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 7877a5d0fbbSHugh Dickins { 7886dbaf22cSJohannes Weiner void *old; 7897a5d0fbbSHugh Dickins 79055f3f7eaSMatthew Wilcox old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 7916dbaf22cSJohannes Weiner if (old != radswap) 7926dbaf22cSJohannes Weiner return -ENOENT; 7937a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 7946dbaf22cSJohannes Weiner return 0; 7957a5d0fbbSHugh Dickins } 7967a5d0fbbSHugh Dickins 7977a5d0fbbSHugh Dickins /* 7986a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 79948131e03SVlastimil Babka * given offsets are swapped out. 8006a15a370SVlastimil Babka * 8019608703eSJan Kara * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 8026a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 8036a15a370SVlastimil Babka */ 80448131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 80548131e03SVlastimil Babka pgoff_t start, pgoff_t end) 8066a15a370SVlastimil Babka { 8077ae3424fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 8086a15a370SVlastimil Babka struct page *page; 80948131e03SVlastimil Babka unsigned long swapped = 0; 8106a15a370SVlastimil Babka 8116a15a370SVlastimil Babka rcu_read_lock(); 8127ae3424fSMatthew Wilcox xas_for_each(&xas, page, end - 1) { 8137ae3424fSMatthew Wilcox if (xas_retry(&xas, page)) 8142cf938aaSMatthew Wilcox continue; 8153159f943SMatthew Wilcox if (xa_is_value(page)) 8166a15a370SVlastimil Babka swapped++; 8176a15a370SVlastimil Babka 8186a15a370SVlastimil Babka if (need_resched()) { 8197ae3424fSMatthew Wilcox xas_pause(&xas); 8206a15a370SVlastimil Babka cond_resched_rcu(); 8216a15a370SVlastimil Babka } 8226a15a370SVlastimil Babka } 8236a15a370SVlastimil Babka 8246a15a370SVlastimil Babka rcu_read_unlock(); 8256a15a370SVlastimil Babka 8266a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 8276a15a370SVlastimil Babka } 8286a15a370SVlastimil Babka 8296a15a370SVlastimil Babka /* 83048131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 83148131e03SVlastimil Babka * given vma is swapped out. 83248131e03SVlastimil Babka * 8339608703eSJan Kara * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 83448131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 83548131e03SVlastimil Babka */ 83648131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 83748131e03SVlastimil Babka { 83848131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 83948131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 84048131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 84148131e03SVlastimil Babka unsigned long swapped; 84248131e03SVlastimil Babka 84348131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 84448131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 84548131e03SVlastimil Babka 84648131e03SVlastimil Babka /* 84748131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 84848131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 84948131e03SVlastimil Babka * already track. 85048131e03SVlastimil Babka */ 85148131e03SVlastimil Babka if (!swapped) 85248131e03SVlastimil Babka return 0; 85348131e03SVlastimil Babka 85448131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 85548131e03SVlastimil Babka return swapped << PAGE_SHIFT; 85648131e03SVlastimil Babka 85748131e03SVlastimil Babka /* Here comes the more involved part */ 85802399c88SPeter Xu return shmem_partial_swap_usage(mapping, vma->vm_pgoff, 85902399c88SPeter Xu vma->vm_pgoff + vma_pages(vma)); 86048131e03SVlastimil Babka } 86148131e03SVlastimil Babka 86248131e03SVlastimil Babka /* 86324513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 86424513264SHugh Dickins */ 86524513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 86624513264SHugh Dickins { 867105c988fSMatthew Wilcox (Oracle) struct folio_batch fbatch; 86824513264SHugh Dickins pgoff_t index = 0; 86924513264SHugh Dickins 870105c988fSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 87124513264SHugh Dickins /* 87224513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 87324513264SHugh Dickins */ 874105c988fSMatthew Wilcox (Oracle) while (!mapping_unevictable(mapping) && 875105c988fSMatthew Wilcox (Oracle) filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { 876105c988fSMatthew Wilcox (Oracle) check_move_unevictable_folios(&fbatch); 877105c988fSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 87824513264SHugh Dickins cond_resched(); 87924513264SHugh Dickins } 8807a5d0fbbSHugh Dickins } 8817a5d0fbbSHugh Dickins 882b9a8a419SMatthew Wilcox (Oracle) static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) 88371725ed1SHugh Dickins { 884b9a8a419SMatthew Wilcox (Oracle) struct folio *folio; 88571725ed1SHugh Dickins 886b9a8a419SMatthew Wilcox (Oracle) /* 887a7f5862cSMatthew Wilcox (Oracle) * At first avoid shmem_get_folio(,,,SGP_READ): that fails 888*81914affSHugh Dickins * beyond i_size, and reports fallocated folios as holes. 889b9a8a419SMatthew Wilcox (Oracle) */ 890*81914affSHugh Dickins folio = filemap_get_entry(inode->i_mapping, index); 891*81914affSHugh Dickins if (!folio) 892b9a8a419SMatthew Wilcox (Oracle) return folio; 893*81914affSHugh Dickins if (!xa_is_value(folio)) { 894*81914affSHugh Dickins folio_lock(folio); 895*81914affSHugh Dickins if (folio->mapping == inode->i_mapping) 896*81914affSHugh Dickins return folio; 897*81914affSHugh Dickins /* The folio has been swapped out */ 898*81914affSHugh Dickins folio_unlock(folio); 899*81914affSHugh Dickins folio_put(folio); 900*81914affSHugh Dickins } 901b9a8a419SMatthew Wilcox (Oracle) /* 902*81914affSHugh Dickins * But read a folio back from swap if any of it is within i_size 903b9a8a419SMatthew Wilcox (Oracle) * (although in some cases this is just a waste of time). 904b9a8a419SMatthew Wilcox (Oracle) */ 905a7f5862cSMatthew Wilcox (Oracle) folio = NULL; 906a7f5862cSMatthew Wilcox (Oracle) shmem_get_folio(inode, index, &folio, SGP_READ); 907a7f5862cSMatthew Wilcox (Oracle) return folio; 90871725ed1SHugh Dickins } 90971725ed1SHugh Dickins 91071725ed1SHugh Dickins /* 9117f4446eeSMatthew Wilcox * Remove range of pages and swap entries from page cache, and free them. 9121635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 9137a5d0fbbSHugh Dickins */ 9141635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 9151635f6a7SHugh Dickins bool unfalloc) 9161da177e4SLinus Torvalds { 917285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 9181da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 91909cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 92009cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 9210e499ed3SMatthew Wilcox (Oracle) struct folio_batch fbatch; 9227a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 923b9a8a419SMatthew Wilcox (Oracle) struct folio *folio; 924b9a8a419SMatthew Wilcox (Oracle) bool same_folio; 9257a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 926285b2c4fSHugh Dickins pgoff_t index; 927bda97eabSHugh Dickins int i; 9281da177e4SLinus Torvalds 92983e4fa9cSHugh Dickins if (lend == -1) 93083e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 931bda97eabSHugh Dickins 932d144bf62SHugh Dickins if (info->fallocend > start && info->fallocend <= end && !unfalloc) 933d144bf62SHugh Dickins info->fallocend = start; 934d144bf62SHugh Dickins 93551dcbdacSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 936bda97eabSHugh Dickins index = start; 9373392ca12SVishal Moola (Oracle) while (index < end && find_lock_entries(mapping, &index, end - 1, 93851dcbdacSMatthew Wilcox (Oracle) &fbatch, indices)) { 93951dcbdacSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) { 940b9a8a419SMatthew Wilcox (Oracle) folio = fbatch.folios[i]; 941bda97eabSHugh Dickins 9427b774aabSMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 9431635f6a7SHugh Dickins if (unfalloc) 9441635f6a7SHugh Dickins continue; 9457a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 9463392ca12SVishal Moola (Oracle) indices[i], folio); 9477a5d0fbbSHugh Dickins continue; 9487a5d0fbbSHugh Dickins } 9497a5d0fbbSHugh Dickins 9507b774aabSMatthew Wilcox (Oracle) if (!unfalloc || !folio_test_uptodate(folio)) 9511e84a3d9SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio); 9527b774aabSMatthew Wilcox (Oracle) folio_unlock(folio); 953bda97eabSHugh Dickins } 95451dcbdacSMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch); 95551dcbdacSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 956bda97eabSHugh Dickins cond_resched(); 957bda97eabSHugh Dickins } 958bda97eabSHugh Dickins 95944bcabd7SHugh Dickins /* 96044bcabd7SHugh Dickins * When undoing a failed fallocate, we want none of the partial folio 96144bcabd7SHugh Dickins * zeroing and splitting below, but shall want to truncate the whole 96244bcabd7SHugh Dickins * folio when !uptodate indicates that it was added by this fallocate, 96344bcabd7SHugh Dickins * even when [lstart, lend] covers only a part of the folio. 96444bcabd7SHugh Dickins */ 96544bcabd7SHugh Dickins if (unfalloc) 96644bcabd7SHugh Dickins goto whole_folios; 96744bcabd7SHugh Dickins 968b9a8a419SMatthew Wilcox (Oracle) same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 969b9a8a419SMatthew Wilcox (Oracle) folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); 970b9a8a419SMatthew Wilcox (Oracle) if (folio) { 971b9a8a419SMatthew Wilcox (Oracle) same_folio = lend < folio_pos(folio) + folio_size(folio); 972b9a8a419SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 973b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend)) { 974b9a8a419SMatthew Wilcox (Oracle) start = folio->index + folio_nr_pages(folio); 975b9a8a419SMatthew Wilcox (Oracle) if (same_folio) 976b9a8a419SMatthew Wilcox (Oracle) end = folio->index; 97783e4fa9cSHugh Dickins } 978b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio); 979b9a8a419SMatthew Wilcox (Oracle) folio_put(folio); 980b9a8a419SMatthew Wilcox (Oracle) folio = NULL; 981bda97eabSHugh Dickins } 982b9a8a419SMatthew Wilcox (Oracle) 983b9a8a419SMatthew Wilcox (Oracle) if (!same_folio) 984b9a8a419SMatthew Wilcox (Oracle) folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); 985b9a8a419SMatthew Wilcox (Oracle) if (folio) { 986b9a8a419SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 987b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend)) 988b9a8a419SMatthew Wilcox (Oracle) end = folio->index; 989b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio); 990b9a8a419SMatthew Wilcox (Oracle) folio_put(folio); 991bda97eabSHugh Dickins } 992bda97eabSHugh Dickins 99344bcabd7SHugh Dickins whole_folios: 99444bcabd7SHugh Dickins 995bda97eabSHugh Dickins index = start; 996b1a36650SHugh Dickins while (index < end) { 997bda97eabSHugh Dickins cond_resched(); 9980cd6144aSJohannes Weiner 9999fb6beeaSVishal Moola (Oracle) if (!find_get_entries(mapping, &index, end - 1, &fbatch, 1000cf2039afSMatthew Wilcox (Oracle) indices)) { 1001b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 1002b1a36650SHugh Dickins if (index == start || end != -1) 1003bda97eabSHugh Dickins break; 1004b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 1005bda97eabSHugh Dickins index = start; 1006bda97eabSHugh Dickins continue; 1007bda97eabSHugh Dickins } 10080e499ed3SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) { 1009b9a8a419SMatthew Wilcox (Oracle) folio = fbatch.folios[i]; 1010bda97eabSHugh Dickins 10110e499ed3SMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 10121635f6a7SHugh Dickins if (unfalloc) 10131635f6a7SHugh Dickins continue; 10149fb6beeaSVishal Moola (Oracle) if (shmem_free_swap(mapping, indices[i], folio)) { 1015b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 10169fb6beeaSVishal Moola (Oracle) index = indices[i]; 1017b1a36650SHugh Dickins break; 1018b1a36650SHugh Dickins } 1019b1a36650SHugh Dickins nr_swaps_freed++; 10207a5d0fbbSHugh Dickins continue; 10217a5d0fbbSHugh Dickins } 10227a5d0fbbSHugh Dickins 10230e499ed3SMatthew Wilcox (Oracle) folio_lock(folio); 1024800d8c63SKirill A. Shutemov 10250e499ed3SMatthew Wilcox (Oracle) if (!unfalloc || !folio_test_uptodate(folio)) { 10260e499ed3SMatthew Wilcox (Oracle) if (folio_mapping(folio) != mapping) { 1027b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 10280e499ed3SMatthew Wilcox (Oracle) folio_unlock(folio); 10299fb6beeaSVishal Moola (Oracle) index = indices[i]; 1030b1a36650SHugh Dickins break; 10317a5d0fbbSHugh Dickins } 10320e499ed3SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_writeback(folio), 10330e499ed3SMatthew Wilcox (Oracle) folio); 10340e499ed3SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio); 103571725ed1SHugh Dickins } 10360e499ed3SMatthew Wilcox (Oracle) folio_unlock(folio); 1037bda97eabSHugh Dickins } 10380e499ed3SMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch); 10390e499ed3SMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 1040bda97eabSHugh Dickins } 104194c1e62dSHugh Dickins 10424595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 10437a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 10441da177e4SLinus Torvalds shmem_recalc_inode(inode); 10454595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 10461635f6a7SHugh Dickins } 10471da177e4SLinus Torvalds 10481635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 10491635f6a7SHugh Dickins { 10501635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 1051078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 105236f05cabSJeff Layton inode_inc_iversion(inode); 10531da177e4SLinus Torvalds } 105494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 10551da177e4SLinus Torvalds 1056b74d24f7SChristian Brauner static int shmem_getattr(struct mnt_idmap *idmap, 1057549c7297SChristian Brauner const struct path *path, struct kstat *stat, 1058a528d35eSDavid Howells u32 request_mask, unsigned int query_flags) 105944a30220SYu Zhao { 1060a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode; 106144a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 106244a30220SYu Zhao 1063d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 10644595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 106544a30220SYu Zhao shmem_recalc_inode(inode); 10664595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1067d0424c42SHugh Dickins } 1068e408e695STheodore Ts'o if (info->fsflags & FS_APPEND_FL) 1069e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_APPEND; 1070e408e695STheodore Ts'o if (info->fsflags & FS_IMMUTABLE_FL) 1071e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_IMMUTABLE; 1072e408e695STheodore Ts'o if (info->fsflags & FS_NODUMP_FL) 1073e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_NODUMP; 1074e408e695STheodore Ts'o stat->attributes_mask |= (STATX_ATTR_APPEND | 1075e408e695STheodore Ts'o STATX_ATTR_IMMUTABLE | 1076e408e695STheodore Ts'o STATX_ATTR_NODUMP); 10777a80e5b8SGiuseppe Scrivano generic_fillattr(idmap, inode, stat); 107889fdcd26SYang Shi 10792cf13384SDavid Stevens if (shmem_is_huge(inode, 0, false, NULL, 0)) 108089fdcd26SYang Shi stat->blksize = HPAGE_PMD_SIZE; 108189fdcd26SYang Shi 1082f7cd16a5SXavier Roche if (request_mask & STATX_BTIME) { 1083f7cd16a5SXavier Roche stat->result_mask |= STATX_BTIME; 1084f7cd16a5SXavier Roche stat->btime.tv_sec = info->i_crtime.tv_sec; 1085f7cd16a5SXavier Roche stat->btime.tv_nsec = info->i_crtime.tv_nsec; 1086f7cd16a5SXavier Roche } 1087f7cd16a5SXavier Roche 108844a30220SYu Zhao return 0; 108944a30220SYu Zhao } 109044a30220SYu Zhao 1091c1632a0fSChristian Brauner static int shmem_setattr(struct mnt_idmap *idmap, 1092549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 10931da177e4SLinus Torvalds { 109475c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 109540e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 10961da177e4SLinus Torvalds int error; 109736f05cabSJeff Layton bool update_mtime = false; 109836f05cabSJeff Layton bool update_ctime = true; 10991da177e4SLinus Torvalds 11007a80e5b8SGiuseppe Scrivano error = setattr_prepare(idmap, dentry, attr); 1101db78b877SChristoph Hellwig if (error) 1102db78b877SChristoph Hellwig return error; 1103db78b877SChristoph Hellwig 11046fd73538SDaniel Verkamp if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { 11056fd73538SDaniel Verkamp if ((inode->i_mode ^ attr->ia_mode) & 0111) { 11066fd73538SDaniel Verkamp return -EPERM; 11076fd73538SDaniel Verkamp } 11086fd73538SDaniel Verkamp } 11096fd73538SDaniel Verkamp 111094c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 111194c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 111294c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 11133889e6e7Snpiggin@suse.de 11149608703eSJan Kara /* protected by i_rwsem */ 111540e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 111640e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 111740e041a2SDavid Herrmann return -EPERM; 111840e041a2SDavid Herrmann 111994c1e62dSHugh Dickins if (newsize != oldsize) { 112077142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 112177142517SKonstantin Khlebnikov oldsize, newsize); 112277142517SKonstantin Khlebnikov if (error) 112377142517SKonstantin Khlebnikov return error; 112494c1e62dSHugh Dickins i_size_write(inode, newsize); 112536f05cabSJeff Layton update_mtime = true; 112636f05cabSJeff Layton } else { 112736f05cabSJeff Layton update_ctime = false; 112894c1e62dSHugh Dickins } 1129afa2db2fSJosef Bacik if (newsize <= oldsize) { 113094c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 1131d0424c42SHugh Dickins if (oldsize > holebegin) 1132d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1133d0424c42SHugh Dickins holebegin, 0, 1); 1134d0424c42SHugh Dickins if (info->alloced) 1135d0424c42SHugh Dickins shmem_truncate_range(inode, 1136d0424c42SHugh Dickins newsize, (loff_t)-1); 113794c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1138d0424c42SHugh Dickins if (oldsize > holebegin) 1139d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1140d0424c42SHugh Dickins holebegin, 0, 1); 114194c1e62dSHugh Dickins } 11421da177e4SLinus Torvalds } 11431da177e4SLinus Torvalds 11447a80e5b8SGiuseppe Scrivano setattr_copy(idmap, inode, attr); 1145db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 11467a80e5b8SGiuseppe Scrivano error = posix_acl_chmod(idmap, dentry, inode->i_mode); 114736f05cabSJeff Layton if (!error && update_ctime) { 114836f05cabSJeff Layton inode->i_ctime = current_time(inode); 114936f05cabSJeff Layton if (update_mtime) 115036f05cabSJeff Layton inode->i_mtime = inode->i_ctime; 115136f05cabSJeff Layton inode_inc_iversion(inode); 115236f05cabSJeff Layton } 11531da177e4SLinus Torvalds return error; 11541da177e4SLinus Torvalds } 11551da177e4SLinus Torvalds 11561f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 11571da177e4SLinus Torvalds { 11581da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1159779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 11601da177e4SLinus Torvalds 116130e6a51dSHui Su if (shmem_mapping(inode->i_mapping)) { 11621da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 11631da177e4SLinus Torvalds inode->i_size = 0; 1164bc786390SHugh Dickins mapping_set_exiting(inode->i_mapping); 11653889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1166779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1167779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1168779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1169779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1170779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1171779750d2SKirill A. Shutemov } 1172779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1173779750d2SKirill A. Shutemov } 1174af53d3e9SHugh Dickins while (!list_empty(&info->swaplist)) { 1175af53d3e9SHugh Dickins /* Wait while shmem_unuse() is scanning this inode... */ 1176af53d3e9SHugh Dickins wait_var_event(&info->stop_eviction, 1177af53d3e9SHugh Dickins !atomic_read(&info->stop_eviction)); 1178cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1179af53d3e9SHugh Dickins /* ...but beware of the race if we peeked too early */ 1180af53d3e9SHugh Dickins if (!atomic_read(&info->stop_eviction)) 11811da177e4SLinus Torvalds list_del_init(&info->swaplist); 1182cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 11831da177e4SLinus Torvalds } 11843ed47db3SAl Viro } 1185b09e0fa4SEric Paris 118638f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 11870f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 11885b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1189dbd5768fSJan Kara clear_inode(inode); 11901da177e4SLinus Torvalds } 11911da177e4SLinus Torvalds 1192b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping, 1193da08e9b7SMatthew Wilcox (Oracle) pgoff_t start, struct folio_batch *fbatch, 1194da08e9b7SMatthew Wilcox (Oracle) pgoff_t *indices, unsigned int type) 1195478922e2SMatthew Wilcox { 1196b56a2d8aSVineeth Remanan Pillai XA_STATE(xas, &mapping->i_pages, start); 1197da08e9b7SMatthew Wilcox (Oracle) struct folio *folio; 119887039546SHugh Dickins swp_entry_t entry; 1199478922e2SMatthew Wilcox 1200478922e2SMatthew Wilcox rcu_read_lock(); 1201da08e9b7SMatthew Wilcox (Oracle) xas_for_each(&xas, folio, ULONG_MAX) { 1202da08e9b7SMatthew Wilcox (Oracle) if (xas_retry(&xas, folio)) 12035b9c98f3SMike Kravetz continue; 1204b56a2d8aSVineeth Remanan Pillai 1205da08e9b7SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 1206478922e2SMatthew Wilcox continue; 1207b56a2d8aSVineeth Remanan Pillai 1208da08e9b7SMatthew Wilcox (Oracle) entry = radix_to_swp_entry(folio); 12096cec2b95SMiaohe Lin /* 12106cec2b95SMiaohe Lin * swapin error entries can be found in the mapping. But they're 12116cec2b95SMiaohe Lin * deliberately ignored here as we've done everything we can do. 12126cec2b95SMiaohe Lin */ 121387039546SHugh Dickins if (swp_type(entry) != type) 1214b56a2d8aSVineeth Remanan Pillai continue; 1215b56a2d8aSVineeth Remanan Pillai 1216e384200eSHugh Dickins indices[folio_batch_count(fbatch)] = xas.xa_index; 1217da08e9b7SMatthew Wilcox (Oracle) if (!folio_batch_add(fbatch, folio)) 1218da08e9b7SMatthew Wilcox (Oracle) break; 1219b56a2d8aSVineeth Remanan Pillai 1220b56a2d8aSVineeth Remanan Pillai if (need_resched()) { 1221e21a2955SMatthew Wilcox xas_pause(&xas); 1222478922e2SMatthew Wilcox cond_resched_rcu(); 1223478922e2SMatthew Wilcox } 1224b56a2d8aSVineeth Remanan Pillai } 1225478922e2SMatthew Wilcox rcu_read_unlock(); 1226e21a2955SMatthew Wilcox 1227da08e9b7SMatthew Wilcox (Oracle) return xas.xa_index; 1228b56a2d8aSVineeth Remanan Pillai } 1229b56a2d8aSVineeth Remanan Pillai 1230b56a2d8aSVineeth Remanan Pillai /* 1231b56a2d8aSVineeth Remanan Pillai * Move the swapped pages for an inode to page cache. Returns the count 1232b56a2d8aSVineeth Remanan Pillai * of pages swapped in, or the error in case of failure. 1233b56a2d8aSVineeth Remanan Pillai */ 1234da08e9b7SMatthew Wilcox (Oracle) static int shmem_unuse_swap_entries(struct inode *inode, 1235da08e9b7SMatthew Wilcox (Oracle) struct folio_batch *fbatch, pgoff_t *indices) 1236b56a2d8aSVineeth Remanan Pillai { 1237b56a2d8aSVineeth Remanan Pillai int i = 0; 1238b56a2d8aSVineeth Remanan Pillai int ret = 0; 1239b56a2d8aSVineeth Remanan Pillai int error = 0; 1240b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1241b56a2d8aSVineeth Remanan Pillai 1242da08e9b7SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(fbatch); i++) { 1243da08e9b7SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 1244b56a2d8aSVineeth Remanan Pillai 1245da08e9b7SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 1246b56a2d8aSVineeth Remanan Pillai continue; 1247da08e9b7SMatthew Wilcox (Oracle) error = shmem_swapin_folio(inode, indices[i], 1248da08e9b7SMatthew Wilcox (Oracle) &folio, SGP_CACHE, 1249b56a2d8aSVineeth Remanan Pillai mapping_gfp_mask(mapping), 1250b56a2d8aSVineeth Remanan Pillai NULL, NULL); 1251b56a2d8aSVineeth Remanan Pillai if (error == 0) { 1252da08e9b7SMatthew Wilcox (Oracle) folio_unlock(folio); 1253da08e9b7SMatthew Wilcox (Oracle) folio_put(folio); 1254b56a2d8aSVineeth Remanan Pillai ret++; 1255b56a2d8aSVineeth Remanan Pillai } 1256b56a2d8aSVineeth Remanan Pillai if (error == -ENOMEM) 1257b56a2d8aSVineeth Remanan Pillai break; 1258b56a2d8aSVineeth Remanan Pillai error = 0; 1259b56a2d8aSVineeth Remanan Pillai } 1260b56a2d8aSVineeth Remanan Pillai return error ? error : ret; 1261478922e2SMatthew Wilcox } 1262478922e2SMatthew Wilcox 126346f65ec1SHugh Dickins /* 126446f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 126546f65ec1SHugh Dickins */ 126610a9c496SChristoph Hellwig static int shmem_unuse_inode(struct inode *inode, unsigned int type) 12671da177e4SLinus Torvalds { 1268b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1269b56a2d8aSVineeth Remanan Pillai pgoff_t start = 0; 1270da08e9b7SMatthew Wilcox (Oracle) struct folio_batch fbatch; 1271b56a2d8aSVineeth Remanan Pillai pgoff_t indices[PAGEVEC_SIZE]; 1272b56a2d8aSVineeth Remanan Pillai int ret = 0; 12731da177e4SLinus Torvalds 1274b56a2d8aSVineeth Remanan Pillai do { 1275da08e9b7SMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 1276da08e9b7SMatthew Wilcox (Oracle) shmem_find_swap_entries(mapping, start, &fbatch, indices, type); 1277da08e9b7SMatthew Wilcox (Oracle) if (folio_batch_count(&fbatch) == 0) { 1278b56a2d8aSVineeth Remanan Pillai ret = 0; 1279778dd893SHugh Dickins break; 1280b56a2d8aSVineeth Remanan Pillai } 1281b56a2d8aSVineeth Remanan Pillai 1282da08e9b7SMatthew Wilcox (Oracle) ret = shmem_unuse_swap_entries(inode, &fbatch, indices); 1283b56a2d8aSVineeth Remanan Pillai if (ret < 0) 1284b56a2d8aSVineeth Remanan Pillai break; 1285b56a2d8aSVineeth Remanan Pillai 1286da08e9b7SMatthew Wilcox (Oracle) start = indices[folio_batch_count(&fbatch) - 1]; 1287b56a2d8aSVineeth Remanan Pillai } while (true); 1288b56a2d8aSVineeth Remanan Pillai 1289b56a2d8aSVineeth Remanan Pillai return ret; 1290b56a2d8aSVineeth Remanan Pillai } 1291b56a2d8aSVineeth Remanan Pillai 1292b56a2d8aSVineeth Remanan Pillai /* 1293b56a2d8aSVineeth Remanan Pillai * Read all the shared memory data that resides in the swap 1294b56a2d8aSVineeth Remanan Pillai * device 'type' back into memory, so the swap device can be 1295b56a2d8aSVineeth Remanan Pillai * unused. 1296b56a2d8aSVineeth Remanan Pillai */ 129710a9c496SChristoph Hellwig int shmem_unuse(unsigned int type) 1298b56a2d8aSVineeth Remanan Pillai { 1299b56a2d8aSVineeth Remanan Pillai struct shmem_inode_info *info, *next; 1300b56a2d8aSVineeth Remanan Pillai int error = 0; 1301b56a2d8aSVineeth Remanan Pillai 1302b56a2d8aSVineeth Remanan Pillai if (list_empty(&shmem_swaplist)) 1303b56a2d8aSVineeth Remanan Pillai return 0; 1304b56a2d8aSVineeth Remanan Pillai 1305b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1306b56a2d8aSVineeth Remanan Pillai list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1307b56a2d8aSVineeth Remanan Pillai if (!info->swapped) { 1308b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1309b56a2d8aSVineeth Remanan Pillai continue; 1310b56a2d8aSVineeth Remanan Pillai } 1311af53d3e9SHugh Dickins /* 1312af53d3e9SHugh Dickins * Drop the swaplist mutex while searching the inode for swap; 1313af53d3e9SHugh Dickins * but before doing so, make sure shmem_evict_inode() will not 1314af53d3e9SHugh Dickins * remove placeholder inode from swaplist, nor let it be freed 1315af53d3e9SHugh Dickins * (igrab() would protect from unlink, but not from unmount). 1316af53d3e9SHugh Dickins */ 1317af53d3e9SHugh Dickins atomic_inc(&info->stop_eviction); 1318b56a2d8aSVineeth Remanan Pillai mutex_unlock(&shmem_swaplist_mutex); 1319b56a2d8aSVineeth Remanan Pillai 132010a9c496SChristoph Hellwig error = shmem_unuse_inode(&info->vfs_inode, type); 1321b56a2d8aSVineeth Remanan Pillai cond_resched(); 1322b56a2d8aSVineeth Remanan Pillai 1323b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1324b56a2d8aSVineeth Remanan Pillai next = list_next_entry(info, swaplist); 1325b56a2d8aSVineeth Remanan Pillai if (!info->swapped) 1326b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1327af53d3e9SHugh Dickins if (atomic_dec_and_test(&info->stop_eviction)) 1328af53d3e9SHugh Dickins wake_up_var(&info->stop_eviction); 1329b56a2d8aSVineeth Remanan Pillai if (error) 1330b56a2d8aSVineeth Remanan Pillai break; 13311da177e4SLinus Torvalds } 1332cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1333778dd893SHugh Dickins 1334778dd893SHugh Dickins return error; 13351da177e4SLinus Torvalds } 13361da177e4SLinus Torvalds 13371da177e4SLinus Torvalds /* 13381da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 13391da177e4SLinus Torvalds */ 13401da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 13411da177e4SLinus Torvalds { 1342e2e3fdc7SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 13438ccee8c1SLuis Chamberlain struct address_space *mapping = folio->mapping; 13448ccee8c1SLuis Chamberlain struct inode *inode = mapping->host; 13458ccee8c1SLuis Chamberlain struct shmem_inode_info *info = SHMEM_I(inode); 13462c6efe9cSLuis Chamberlain struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 13476922c0c7SHugh Dickins swp_entry_t swap; 13486922c0c7SHugh Dickins pgoff_t index; 13491da177e4SLinus Torvalds 13501e6decf3SHugh Dickins /* 1351cf7992bfSLuis Chamberlain * Our capabilities prevent regular writeback or sync from ever calling 1352cf7992bfSLuis Chamberlain * shmem_writepage; but a stacking filesystem might use ->writepage of 1353cf7992bfSLuis Chamberlain * its underlying filesystem, in which case tmpfs should write out to 1354cf7992bfSLuis Chamberlain * swap only in response to memory pressure, and not for the writeback 1355cf7992bfSLuis Chamberlain * threads or sync. 1356cf7992bfSLuis Chamberlain */ 1357cf7992bfSLuis Chamberlain if (WARN_ON_ONCE(!wbc->for_reclaim)) 1358cf7992bfSLuis Chamberlain goto redirty; 1359cf7992bfSLuis Chamberlain 13602c6efe9cSLuis Chamberlain if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) 13619a976f0cSLuis Chamberlain goto redirty; 13629a976f0cSLuis Chamberlain 13639a976f0cSLuis Chamberlain if (!total_swap_pages) 13649a976f0cSLuis Chamberlain goto redirty; 13659a976f0cSLuis Chamberlain 1366cf7992bfSLuis Chamberlain /* 13671e6decf3SHugh Dickins * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or 13681e6decf3SHugh Dickins * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages, 13691e6decf3SHugh Dickins * and its shmem_writeback() needs them to be split when swapping. 13701e6decf3SHugh Dickins */ 1371f530ed0eSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 13721e6decf3SHugh Dickins /* Ensure the subpages are still dirty */ 1373f530ed0eSMatthew Wilcox (Oracle) folio_test_set_dirty(folio); 13741e6decf3SHugh Dickins if (split_huge_page(page) < 0) 13751e6decf3SHugh Dickins goto redirty; 1376f530ed0eSMatthew Wilcox (Oracle) folio = page_folio(page); 1377f530ed0eSMatthew Wilcox (Oracle) folio_clear_dirty(folio); 13781e6decf3SHugh Dickins } 13791e6decf3SHugh Dickins 1380f530ed0eSMatthew Wilcox (Oracle) index = folio->index; 13811da177e4SLinus Torvalds 1382d9fe526aSHugh Dickins /* 13831635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 13841635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 1385f530ed0eSMatthew Wilcox (Oracle) * fallocated folio arriving here is now to initialize it and write it. 13861aac1400SHugh Dickins * 1387f530ed0eSMatthew Wilcox (Oracle) * That's okay for a folio already fallocated earlier, but if we have 13881aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 1389f530ed0eSMatthew Wilcox (Oracle) * of this folio in case we have to undo it, and (b) it may not be a 13901aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 1391f530ed0eSMatthew Wilcox (Oracle) * reactivate the folio, and let shmem_fallocate() quit when too many. 13921635f6a7SHugh Dickins */ 1393f530ed0eSMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 13941aac1400SHugh Dickins if (inode->i_private) { 13951aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 13961aac1400SHugh Dickins spin_lock(&inode->i_lock); 13971aac1400SHugh Dickins shmem_falloc = inode->i_private; 13981aac1400SHugh Dickins if (shmem_falloc && 13998e205f77SHugh Dickins !shmem_falloc->waitq && 14001aac1400SHugh Dickins index >= shmem_falloc->start && 14011aac1400SHugh Dickins index < shmem_falloc->next) 14021aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 14031aac1400SHugh Dickins else 14041aac1400SHugh Dickins shmem_falloc = NULL; 14051aac1400SHugh Dickins spin_unlock(&inode->i_lock); 14061aac1400SHugh Dickins if (shmem_falloc) 14071aac1400SHugh Dickins goto redirty; 14081aac1400SHugh Dickins } 1409f530ed0eSMatthew Wilcox (Oracle) folio_zero_range(folio, 0, folio_size(folio)); 1410f530ed0eSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 1411f530ed0eSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 14121635f6a7SHugh Dickins } 14131635f6a7SHugh Dickins 1414e2e3fdc7SMatthew Wilcox (Oracle) swap = folio_alloc_swap(folio); 141548f170fbSHugh Dickins if (!swap.val) 141648f170fbSHugh Dickins goto redirty; 1417d9fe526aSHugh Dickins 1418b1dea800SHugh Dickins /* 1419b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 1420f530ed0eSMatthew Wilcox (Oracle) * if it's not already there. Do it now before the folio is 14216922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1422b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 14236922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 14246922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1425b1dea800SHugh Dickins */ 1426b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 142705bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 1428b56a2d8aSVineeth Remanan Pillai list_add(&info->swaplist, &shmem_swaplist); 1429b1dea800SHugh Dickins 1430a4c366f0SMatthew Wilcox (Oracle) if (add_to_swap_cache(folio, swap, 14313852f676SJoonsoo Kim __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 14323852f676SJoonsoo Kim NULL) == 0) { 14334595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1434267a4c76SHugh Dickins shmem_recalc_inode(inode); 1435267a4c76SHugh Dickins info->swapped++; 14364595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1437267a4c76SHugh Dickins 1438aaa46865SHugh Dickins swap_shmem_alloc(swap); 14394cd400fdSMatthew Wilcox (Oracle) shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); 14406922c0c7SHugh Dickins 14416922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1442f530ed0eSMatthew Wilcox (Oracle) BUG_ON(folio_mapped(folio)); 1443f530ed0eSMatthew Wilcox (Oracle) swap_writepage(&folio->page, wbc); 14441da177e4SLinus Torvalds return 0; 14451da177e4SLinus Torvalds } 14461da177e4SLinus Torvalds 14476922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 14484081f744SMatthew Wilcox (Oracle) put_swap_folio(folio, swap); 14491da177e4SLinus Torvalds redirty: 1450f530ed0eSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1451d9fe526aSHugh Dickins if (wbc->for_reclaim) 1452f530ed0eSMatthew Wilcox (Oracle) return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ 1453f530ed0eSMatthew Wilcox (Oracle) folio_unlock(folio); 1454d9fe526aSHugh Dickins return 0; 14551da177e4SLinus Torvalds } 14561da177e4SLinus Torvalds 145775edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 145871fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1459680d794bSakpm@linux-foundation.org { 1460680d794bSakpm@linux-foundation.org char buffer[64]; 1461680d794bSakpm@linux-foundation.org 146271fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1463095f1fc4SLee Schermerhorn return; /* show nothing */ 1464095f1fc4SLee Schermerhorn 1465a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1466095f1fc4SLee Schermerhorn 1467095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1468680d794bSakpm@linux-foundation.org } 146971fe804bSLee Schermerhorn 147071fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 147171fe804bSLee Schermerhorn { 147271fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 147371fe804bSLee Schermerhorn if (sbinfo->mpol) { 1474bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 147571fe804bSLee Schermerhorn mpol = sbinfo->mpol; 147671fe804bSLee Schermerhorn mpol_get(mpol); 1477bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 147871fe804bSLee Schermerhorn } 147971fe804bSLee Schermerhorn return mpol; 148071fe804bSLee Schermerhorn } 148175edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 148275edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 148375edd345SHugh Dickins { 148475edd345SHugh Dickins } 148575edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 148675edd345SHugh Dickins { 148775edd345SHugh Dickins return NULL; 148875edd345SHugh Dickins } 148975edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 149075edd345SHugh Dickins #ifndef CONFIG_NUMA 149175edd345SHugh Dickins #define vm_policy vm_private_data 149275edd345SHugh Dickins #endif 1493680d794bSakpm@linux-foundation.org 1494800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1495800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1496800d8c63SKirill A. Shutemov { 1497800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 14982c4541e2SKirill A. Shutemov vma_init(vma, NULL); 1499800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1500800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1501800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1502800d8c63SKirill A. Shutemov } 1503800d8c63SKirill A. Shutemov 1504800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1505800d8c63SKirill A. Shutemov { 1506800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1507800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1508800d8c63SKirill A. Shutemov } 1509800d8c63SKirill A. Shutemov 15105739a81cSMatthew Wilcox (Oracle) static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp, 151141ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 15121da177e4SLinus Torvalds { 15131da177e4SLinus Torvalds struct vm_area_struct pvma; 151418a2f371SMel Gorman struct page *page; 15158c63ca5bSWill Deacon struct vm_fault vmf = { 15168c63ca5bSWill Deacon .vma = &pvma, 15178c63ca5bSWill Deacon }; 15181da177e4SLinus Torvalds 1519800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1520e9e9b7ecSMinchan Kim page = swap_cluster_readahead(swap, gfp, &vmf); 1521800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 152218a2f371SMel Gorman 15235739a81cSMatthew Wilcox (Oracle) if (!page) 15245739a81cSMatthew Wilcox (Oracle) return NULL; 15255739a81cSMatthew Wilcox (Oracle) return page_folio(page); 1526800d8c63SKirill A. Shutemov } 152718a2f371SMel Gorman 152878cc8cdcSRik van Riel /* 152978cc8cdcSRik van Riel * Make sure huge_gfp is always more limited than limit_gfp. 153078cc8cdcSRik van Riel * Some of the flags set permissions, while others set limitations. 153178cc8cdcSRik van Riel */ 153278cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) 153378cc8cdcSRik van Riel { 153478cc8cdcSRik van Riel gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 153578cc8cdcSRik van Riel gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; 1536187df5ddSRik van Riel gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; 1537187df5ddSRik van Riel gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); 1538187df5ddSRik van Riel 1539187df5ddSRik van Riel /* Allow allocations only from the originally specified zones. */ 1540187df5ddSRik van Riel result |= zoneflags; 154178cc8cdcSRik van Riel 154278cc8cdcSRik van Riel /* 154378cc8cdcSRik van Riel * Minimize the result gfp by taking the union with the deny flags, 154478cc8cdcSRik van Riel * and the intersection of the allow flags. 154578cc8cdcSRik van Riel */ 154678cc8cdcSRik van Riel result |= (limit_gfp & denyflags); 154778cc8cdcSRik van Riel result |= (huge_gfp & limit_gfp) & allowflags; 154878cc8cdcSRik van Riel 154978cc8cdcSRik van Riel return result; 155078cc8cdcSRik van Riel } 155178cc8cdcSRik van Riel 155272827e5cSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_hugefolio(gfp_t gfp, 1553800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1554800d8c63SKirill A. Shutemov { 1555800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 15567b8d046fSMatthew Wilcox struct address_space *mapping = info->vfs_inode.i_mapping; 15577b8d046fSMatthew Wilcox pgoff_t hindex; 1558dfe98499SMatthew Wilcox (Oracle) struct folio *folio; 1559800d8c63SKirill A. Shutemov 15604620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 15617b8d046fSMatthew Wilcox if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 15627b8d046fSMatthew Wilcox XA_PRESENT)) 1563800d8c63SKirill A. Shutemov return NULL; 1564800d8c63SKirill A. Shutemov 1565800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1566dfe98499SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); 1567800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1568dfe98499SMatthew Wilcox (Oracle) if (!folio) 1569dcdf11eeSDavid Rientjes count_vm_event(THP_FILE_FALLBACK); 157072827e5cSMatthew Wilcox (Oracle) return folio; 157118a2f371SMel Gorman } 157218a2f371SMel Gorman 15730c023ef5SMatthew Wilcox (Oracle) static struct folio *shmem_alloc_folio(gfp_t gfp, 157418a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 157518a2f371SMel Gorman { 157618a2f371SMel Gorman struct vm_area_struct pvma; 15770c023ef5SMatthew Wilcox (Oracle) struct folio *folio; 157818a2f371SMel Gorman 1579800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 15800c023ef5SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, 0, &pvma, 0, false); 1581800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 158218a2f371SMel Gorman 15830c023ef5SMatthew Wilcox (Oracle) return folio; 158418a2f371SMel Gorman } 158518a2f371SMel Gorman 1586b1d0ec3aSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, 1587800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1588800d8c63SKirill A. Shutemov { 15890f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 159072827e5cSMatthew Wilcox (Oracle) struct folio *folio; 1591800d8c63SKirill A. Shutemov int nr; 1592800d8c63SKirill A. Shutemov int err = -ENOSPC; 1593800d8c63SKirill A. Shutemov 1594396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1595800d8c63SKirill A. Shutemov huge = false; 1596800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1597800d8c63SKirill A. Shutemov 15980f079694SMike Rapoport if (!shmem_inode_acct_block(inode, nr)) 1599800d8c63SKirill A. Shutemov goto failed; 1600800d8c63SKirill A. Shutemov 1601800d8c63SKirill A. Shutemov if (huge) 160272827e5cSMatthew Wilcox (Oracle) folio = shmem_alloc_hugefolio(gfp, info, index); 1603800d8c63SKirill A. Shutemov else 160472827e5cSMatthew Wilcox (Oracle) folio = shmem_alloc_folio(gfp, info, index); 160572827e5cSMatthew Wilcox (Oracle) if (folio) { 160672827e5cSMatthew Wilcox (Oracle) __folio_set_locked(folio); 160772827e5cSMatthew Wilcox (Oracle) __folio_set_swapbacked(folio); 1608b1d0ec3aSMatthew Wilcox (Oracle) return folio; 160975edd345SHugh Dickins } 161018a2f371SMel Gorman 1611800d8c63SKirill A. Shutemov err = -ENOMEM; 16120f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr); 1613800d8c63SKirill A. Shutemov failed: 1614800d8c63SKirill A. Shutemov return ERR_PTR(err); 16151da177e4SLinus Torvalds } 161671fe804bSLee Schermerhorn 16171da177e4SLinus Torvalds /* 1618bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1619fc26babbSMatthew Wilcox (Oracle) * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of 1620bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1621bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1622bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1623bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1624bde05d1cSHugh Dickins * 1625bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1626bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1627bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1628bde05d1cSHugh Dickins */ 1629069d849cSMatthew Wilcox (Oracle) static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) 1630bde05d1cSHugh Dickins { 1631069d849cSMatthew Wilcox (Oracle) return folio_zonenum(folio) > gfp_zone(gfp); 1632bde05d1cSHugh Dickins } 1633bde05d1cSHugh Dickins 16340d698e25SMatthew Wilcox (Oracle) static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, 1635bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1636bde05d1cSHugh Dickins { 1637d21bba2bSMatthew Wilcox (Oracle) struct folio *old, *new; 1638bde05d1cSHugh Dickins struct address_space *swap_mapping; 1639c1cb20d4SYu Zhao swp_entry_t entry; 1640bde05d1cSHugh Dickins pgoff_t swap_index; 1641bde05d1cSHugh Dickins int error; 1642bde05d1cSHugh Dickins 16430d698e25SMatthew Wilcox (Oracle) old = *foliop; 1644907ea17eSMatthew Wilcox (Oracle) entry = folio_swap_entry(old); 1645c1cb20d4SYu Zhao swap_index = swp_offset(entry); 1646907ea17eSMatthew Wilcox (Oracle) swap_mapping = swap_address_space(entry); 1647bde05d1cSHugh Dickins 1648bde05d1cSHugh Dickins /* 1649bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1650bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1651bde05d1cSHugh Dickins */ 1652bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1653907ea17eSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(old), old); 1654907ea17eSMatthew Wilcox (Oracle) new = shmem_alloc_folio(gfp, info, index); 1655907ea17eSMatthew Wilcox (Oracle) if (!new) 1656bde05d1cSHugh Dickins return -ENOMEM; 1657bde05d1cSHugh Dickins 1658907ea17eSMatthew Wilcox (Oracle) folio_get(new); 1659907ea17eSMatthew Wilcox (Oracle) folio_copy(new, old); 1660907ea17eSMatthew Wilcox (Oracle) flush_dcache_folio(new); 1661bde05d1cSHugh Dickins 1662907ea17eSMatthew Wilcox (Oracle) __folio_set_locked(new); 1663907ea17eSMatthew Wilcox (Oracle) __folio_set_swapbacked(new); 1664907ea17eSMatthew Wilcox (Oracle) folio_mark_uptodate(new); 1665907ea17eSMatthew Wilcox (Oracle) folio_set_swap_entry(new, entry); 1666907ea17eSMatthew Wilcox (Oracle) folio_set_swapcache(new); 1667bde05d1cSHugh Dickins 1668bde05d1cSHugh Dickins /* 1669bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1670bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1671bde05d1cSHugh Dickins */ 1672b93b0163SMatthew Wilcox xa_lock_irq(&swap_mapping->i_pages); 1673907ea17eSMatthew Wilcox (Oracle) error = shmem_replace_entry(swap_mapping, swap_index, old, new); 16740142ef6cSHugh Dickins if (!error) { 1675d21bba2bSMatthew Wilcox (Oracle) mem_cgroup_migrate(old, new); 1676907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); 1677907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new, NR_SHMEM, 1); 1678907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); 1679907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(old, NR_SHMEM, -1); 16800142ef6cSHugh Dickins } 1681b93b0163SMatthew Wilcox xa_unlock_irq(&swap_mapping->i_pages); 1682bde05d1cSHugh Dickins 16830142ef6cSHugh Dickins if (unlikely(error)) { 16840142ef6cSHugh Dickins /* 16850142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 16860142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 16870142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 16880142ef6cSHugh Dickins */ 1689907ea17eSMatthew Wilcox (Oracle) old = new; 16900142ef6cSHugh Dickins } else { 1691907ea17eSMatthew Wilcox (Oracle) folio_add_lru(new); 16920d698e25SMatthew Wilcox (Oracle) *foliop = new; 16930142ef6cSHugh Dickins } 1694bde05d1cSHugh Dickins 1695907ea17eSMatthew Wilcox (Oracle) folio_clear_swapcache(old); 1696907ea17eSMatthew Wilcox (Oracle) old->private = NULL; 1697bde05d1cSHugh Dickins 1698907ea17eSMatthew Wilcox (Oracle) folio_unlock(old); 1699907ea17eSMatthew Wilcox (Oracle) folio_put_refs(old, 2); 17000142ef6cSHugh Dickins return error; 1701bde05d1cSHugh Dickins } 1702bde05d1cSHugh Dickins 17036cec2b95SMiaohe Lin static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, 17046cec2b95SMiaohe Lin struct folio *folio, swp_entry_t swap) 17056cec2b95SMiaohe Lin { 17066cec2b95SMiaohe Lin struct address_space *mapping = inode->i_mapping; 17076cec2b95SMiaohe Lin struct shmem_inode_info *info = SHMEM_I(inode); 17086cec2b95SMiaohe Lin swp_entry_t swapin_error; 17096cec2b95SMiaohe Lin void *old; 17106cec2b95SMiaohe Lin 171115520a3fSPeter Xu swapin_error = make_swapin_error_entry(); 17126cec2b95SMiaohe Lin old = xa_cmpxchg_irq(&mapping->i_pages, index, 17136cec2b95SMiaohe Lin swp_to_radix_entry(swap), 17146cec2b95SMiaohe Lin swp_to_radix_entry(swapin_error), 0); 17156cec2b95SMiaohe Lin if (old != swp_to_radix_entry(swap)) 17166cec2b95SMiaohe Lin return; 17176cec2b95SMiaohe Lin 17186cec2b95SMiaohe Lin folio_wait_writeback(folio); 171975fa68a5SMatthew Wilcox (Oracle) delete_from_swap_cache(folio); 17206cec2b95SMiaohe Lin spin_lock_irq(&info->lock); 17216cec2b95SMiaohe Lin /* 17226cec2b95SMiaohe Lin * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't 17236cec2b95SMiaohe Lin * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in 17246cec2b95SMiaohe Lin * shmem_evict_inode. 17256cec2b95SMiaohe Lin */ 17266cec2b95SMiaohe Lin info->alloced--; 17276cec2b95SMiaohe Lin info->swapped--; 17286cec2b95SMiaohe Lin shmem_recalc_inode(inode); 17296cec2b95SMiaohe Lin spin_unlock_irq(&info->lock); 17306cec2b95SMiaohe Lin swap_free(swap); 17316cec2b95SMiaohe Lin } 17326cec2b95SMiaohe Lin 1733bde05d1cSHugh Dickins /* 1734833de10fSMiaohe Lin * Swap in the folio pointed to by *foliop. 1735833de10fSMiaohe Lin * Caller has to make sure that *foliop contains a valid swapped folio. 1736833de10fSMiaohe Lin * Returns 0 and the folio in foliop if success. On failure, returns the 1737833de10fSMiaohe Lin * error code and NULL in *foliop. 17381da177e4SLinus Torvalds */ 1739da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 1740da08e9b7SMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, 1741c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 17422b740303SSouptick Joarder vm_fault_t *fault_type) 17431da177e4SLinus Torvalds { 17441da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 174523f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 174604f94e3fSDan Schatzberg struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; 1747cbc2bd98SKairui Song struct swap_info_struct *si; 1748da08e9b7SMatthew Wilcox (Oracle) struct folio *folio = NULL; 17491da177e4SLinus Torvalds swp_entry_t swap; 17501da177e4SLinus Torvalds int error; 17511da177e4SLinus Torvalds 1752da08e9b7SMatthew Wilcox (Oracle) VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); 1753da08e9b7SMatthew Wilcox (Oracle) swap = radix_to_swp_entry(*foliop); 1754da08e9b7SMatthew Wilcox (Oracle) *foliop = NULL; 175554af6042SHugh Dickins 17566cec2b95SMiaohe Lin if (is_swapin_error_entry(swap)) 17576cec2b95SMiaohe Lin return -EIO; 17586cec2b95SMiaohe Lin 1759cbc2bd98SKairui Song si = get_swap_device(swap); 1760cbc2bd98SKairui Song if (!si) { 1761cbc2bd98SKairui Song if (!shmem_confirm_swap(mapping, index, swap)) 1762cbc2bd98SKairui Song return -EEXIST; 1763cbc2bd98SKairui Song else 1764cbc2bd98SKairui Song return -EINVAL; 1765cbc2bd98SKairui Song } 1766cbc2bd98SKairui Song 17671da177e4SLinus Torvalds /* Look it up and read it in.. */ 17685739a81cSMatthew Wilcox (Oracle) folio = swap_cache_get_folio(swap, NULL, 0); 17695739a81cSMatthew Wilcox (Oracle) if (!folio) { 17709e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 17719e18eb29SAndres Lagar-Cavilla if (fault_type) { 177268da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 17739e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 17742262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT); 17759e18eb29SAndres Lagar-Cavilla } 17769e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 17775739a81cSMatthew Wilcox (Oracle) folio = shmem_swapin(swap, gfp, info, index); 17785739a81cSMatthew Wilcox (Oracle) if (!folio) { 17791da177e4SLinus Torvalds error = -ENOMEM; 178054af6042SHugh Dickins goto failed; 1781285b2c4fSHugh Dickins } 17821da177e4SLinus Torvalds } 17831da177e4SLinus Torvalds 1784833de10fSMiaohe Lin /* We have to do this with folio locked to prevent races */ 1785da08e9b7SMatthew Wilcox (Oracle) folio_lock(folio); 1786da08e9b7SMatthew Wilcox (Oracle) if (!folio_test_swapcache(folio) || 1787da08e9b7SMatthew Wilcox (Oracle) folio_swap_entry(folio).val != swap.val || 1788d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1789c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1790d1899228SHugh Dickins goto unlock; 1791bde05d1cSHugh Dickins } 1792da08e9b7SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 17931da177e4SLinus Torvalds error = -EIO; 179454af6042SHugh Dickins goto failed; 179554af6042SHugh Dickins } 1796da08e9b7SMatthew Wilcox (Oracle) folio_wait_writeback(folio); 179754af6042SHugh Dickins 17988a84802eSSteven Price /* 17998a84802eSSteven Price * Some architectures may have to restore extra metadata to the 1800da08e9b7SMatthew Wilcox (Oracle) * folio after reading from swap. 18018a84802eSSteven Price */ 1802da08e9b7SMatthew Wilcox (Oracle) arch_swap_restore(swap, folio); 18038a84802eSSteven Price 1804069d849cSMatthew Wilcox (Oracle) if (shmem_should_replace_folio(folio, gfp)) { 18050d698e25SMatthew Wilcox (Oracle) error = shmem_replace_folio(&folio, gfp, info, index); 1806bde05d1cSHugh Dickins if (error) 180754af6042SHugh Dickins goto failed; 18081da177e4SLinus Torvalds } 18091da177e4SLinus Torvalds 1810b7dd44a1SMatthew Wilcox (Oracle) error = shmem_add_to_page_cache(folio, mapping, index, 18113fea5a49SJohannes Weiner swp_to_radix_entry(swap), gfp, 18123fea5a49SJohannes Weiner charge_mm); 181354af6042SHugh Dickins if (error) 181454af6042SHugh Dickins goto failed; 181554af6042SHugh Dickins 18164595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 181754af6042SHugh Dickins info->swapped--; 181854af6042SHugh Dickins shmem_recalc_inode(inode); 18194595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 182027ab7006SHugh Dickins 182166d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1822da08e9b7SMatthew Wilcox (Oracle) folio_mark_accessed(folio); 182366d2f4d2SHugh Dickins 182475fa68a5SMatthew Wilcox (Oracle) delete_from_swap_cache(folio); 1825da08e9b7SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 182627ab7006SHugh Dickins swap_free(swap); 1827cbc2bd98SKairui Song put_swap_device(si); 182827ab7006SHugh Dickins 1829da08e9b7SMatthew Wilcox (Oracle) *foliop = folio; 1830c5bf121eSVineeth Remanan Pillai return 0; 1831c5bf121eSVineeth Remanan Pillai failed: 1832c5bf121eSVineeth Remanan Pillai if (!shmem_confirm_swap(mapping, index, swap)) 1833c5bf121eSVineeth Remanan Pillai error = -EEXIST; 18346cec2b95SMiaohe Lin if (error == -EIO) 18356cec2b95SMiaohe Lin shmem_set_folio_swapin_error(inode, index, folio, swap); 1836c5bf121eSVineeth Remanan Pillai unlock: 1837da08e9b7SMatthew Wilcox (Oracle) if (folio) { 1838da08e9b7SMatthew Wilcox (Oracle) folio_unlock(folio); 1839da08e9b7SMatthew Wilcox (Oracle) folio_put(folio); 1840c5bf121eSVineeth Remanan Pillai } 1841cbc2bd98SKairui Song put_swap_device(si); 1842c5bf121eSVineeth Remanan Pillai 1843c5bf121eSVineeth Remanan Pillai return error; 1844c5bf121eSVineeth Remanan Pillai } 1845c5bf121eSVineeth Remanan Pillai 1846c5bf121eSVineeth Remanan Pillai /* 1847fc26babbSMatthew Wilcox (Oracle) * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate 1848c5bf121eSVineeth Remanan Pillai * 1849c5bf121eSVineeth Remanan Pillai * If we allocate a new one we do not mark it dirty. That's up to the 1850c5bf121eSVineeth Remanan Pillai * vm. If we swap it in we mark it dirty since we also free the swap 1851c5bf121eSVineeth Remanan Pillai * entry since a page cannot live in both the swap and page cache. 1852c5bf121eSVineeth Remanan Pillai * 1853c949b097SAxel Rasmussen * vma, vmf, and fault_type are only supplied by shmem_fault: 1854c5bf121eSVineeth Remanan Pillai * otherwise they are NULL. 1855c5bf121eSVineeth Remanan Pillai */ 1856fc26babbSMatthew Wilcox (Oracle) static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, 1857fc26babbSMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, gfp_t gfp, 1858c5bf121eSVineeth Remanan Pillai struct vm_area_struct *vma, struct vm_fault *vmf, 1859c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type) 1860c5bf121eSVineeth Remanan Pillai { 1861c5bf121eSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1862c5bf121eSVineeth Remanan Pillai struct shmem_inode_info *info = SHMEM_I(inode); 1863c5bf121eSVineeth Remanan Pillai struct shmem_sb_info *sbinfo; 1864c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm; 1865b7dd44a1SMatthew Wilcox (Oracle) struct folio *folio; 18666fe7d712SLukas Bulwahn pgoff_t hindex; 1867164cc4feSRik van Riel gfp_t huge_gfp; 1868c5bf121eSVineeth Remanan Pillai int error; 1869c5bf121eSVineeth Remanan Pillai int once = 0; 1870c5bf121eSVineeth Remanan Pillai int alloced = 0; 1871c5bf121eSVineeth Remanan Pillai 1872c5bf121eSVineeth Remanan Pillai if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 1873c5bf121eSVineeth Remanan Pillai return -EFBIG; 1874c5bf121eSVineeth Remanan Pillai repeat: 1875c5bf121eSVineeth Remanan Pillai if (sgp <= SGP_CACHE && 1876c5bf121eSVineeth Remanan Pillai ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1877c5bf121eSVineeth Remanan Pillai return -EINVAL; 1878c5bf121eSVineeth Remanan Pillai } 1879c5bf121eSVineeth Remanan Pillai 1880c5bf121eSVineeth Remanan Pillai sbinfo = SHMEM_SB(inode->i_sb); 188104f94e3fSDan Schatzberg charge_mm = vma ? vma->vm_mm : NULL; 1882c5bf121eSVineeth Remanan Pillai 1883b1d0ec3aSMatthew Wilcox (Oracle) folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0); 1884b1d0ec3aSMatthew Wilcox (Oracle) if (folio && vma && userfaultfd_minor(vma)) { 1885b1d0ec3aSMatthew Wilcox (Oracle) if (!xa_is_value(folio)) { 1886b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 1887b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 1888c949b097SAxel Rasmussen } 1889c949b097SAxel Rasmussen *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 1890c949b097SAxel Rasmussen return 0; 1891c949b097SAxel Rasmussen } 1892c949b097SAxel Rasmussen 1893b1d0ec3aSMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 1894da08e9b7SMatthew Wilcox (Oracle) error = shmem_swapin_folio(inode, index, &folio, 1895c5bf121eSVineeth Remanan Pillai sgp, gfp, vma, fault_type); 1896c5bf121eSVineeth Remanan Pillai if (error == -EEXIST) 1897c5bf121eSVineeth Remanan Pillai goto repeat; 1898c5bf121eSVineeth Remanan Pillai 1899fc26babbSMatthew Wilcox (Oracle) *foliop = folio; 1900c5bf121eSVineeth Remanan Pillai return error; 1901c5bf121eSVineeth Remanan Pillai } 1902c5bf121eSVineeth Remanan Pillai 1903b1d0ec3aSMatthew Wilcox (Oracle) if (folio) { 1904acdd9f8eSHugh Dickins if (sgp == SGP_WRITE) 1905b1d0ec3aSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 1906b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 1907acdd9f8eSHugh Dickins goto out; 1908fc26babbSMatthew Wilcox (Oracle) /* fallocated folio */ 1909c5bf121eSVineeth Remanan Pillai if (sgp != SGP_READ) 1910c5bf121eSVineeth Remanan Pillai goto clear; 1911b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 1912b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 1913c5bf121eSVineeth Remanan Pillai } 1914c5bf121eSVineeth Remanan Pillai 1915c5bf121eSVineeth Remanan Pillai /* 1916fc26babbSMatthew Wilcox (Oracle) * SGP_READ: succeed on hole, with NULL folio, letting caller zero. 1917fc26babbSMatthew Wilcox (Oracle) * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail. 1918acdd9f8eSHugh Dickins */ 1919fc26babbSMatthew Wilcox (Oracle) *foliop = NULL; 1920acdd9f8eSHugh Dickins if (sgp == SGP_READ) 1921acdd9f8eSHugh Dickins return 0; 1922acdd9f8eSHugh Dickins if (sgp == SGP_NOALLOC) 1923acdd9f8eSHugh Dickins return -ENOENT; 1924acdd9f8eSHugh Dickins 1925acdd9f8eSHugh Dickins /* 1926acdd9f8eSHugh Dickins * Fast cache lookup and swap lookup did not find it: allocate. 1927c5bf121eSVineeth Remanan Pillai */ 1928c5bf121eSVineeth Remanan Pillai 1929cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) { 1930cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1931cfda0526SMike Rapoport return 0; 1932cfda0526SMike Rapoport } 1933cfda0526SMike Rapoport 19342cf13384SDavid Stevens if (!shmem_is_huge(inode, index, false, 19352cf13384SDavid Stevens vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0)) 1936800d8c63SKirill A. Shutemov goto alloc_nohuge; 193727d80fa2SKees Cook 1938164cc4feSRik van Riel huge_gfp = vma_thp_gfp_mask(vma); 193978cc8cdcSRik van Riel huge_gfp = limit_gfp_mask(huge_gfp, gfp); 1940b1d0ec3aSMatthew Wilcox (Oracle) folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true); 1941b1d0ec3aSMatthew Wilcox (Oracle) if (IS_ERR(folio)) { 1942c5bf121eSVineeth Remanan Pillai alloc_nohuge: 1943b1d0ec3aSMatthew Wilcox (Oracle) folio = shmem_alloc_and_acct_folio(gfp, inode, index, false); 194454af6042SHugh Dickins } 1945b1d0ec3aSMatthew Wilcox (Oracle) if (IS_ERR(folio)) { 1946779750d2SKirill A. Shutemov int retry = 5; 1947c5bf121eSVineeth Remanan Pillai 1948b1d0ec3aSMatthew Wilcox (Oracle) error = PTR_ERR(folio); 1949b1d0ec3aSMatthew Wilcox (Oracle) folio = NULL; 1950779750d2SKirill A. Shutemov if (error != -ENOSPC) 1951c5bf121eSVineeth Remanan Pillai goto unlock; 1952779750d2SKirill A. Shutemov /* 1953fc26babbSMatthew Wilcox (Oracle) * Try to reclaim some space by splitting a large folio 1954779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1955779750d2SKirill A. Shutemov */ 1956779750d2SKirill A. Shutemov while (retry--) { 1957779750d2SKirill A. Shutemov int ret; 1958c5bf121eSVineeth Remanan Pillai 1959779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1960779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1961779750d2SKirill A. Shutemov break; 1962779750d2SKirill A. Shutemov if (ret) 1963779750d2SKirill A. Shutemov goto alloc_nohuge; 1964779750d2SKirill A. Shutemov } 1965c5bf121eSVineeth Remanan Pillai goto unlock; 1966800d8c63SKirill A. Shutemov } 1967800d8c63SKirill A. Shutemov 1968b1d0ec3aSMatthew Wilcox (Oracle) hindex = round_down(index, folio_nr_pages(folio)); 1969800d8c63SKirill A. Shutemov 197066d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1971b1d0ec3aSMatthew Wilcox (Oracle) __folio_set_referenced(folio); 197266d2f4d2SHugh Dickins 1973b7dd44a1SMatthew Wilcox (Oracle) error = shmem_add_to_page_cache(folio, mapping, hindex, 19743fea5a49SJohannes Weiner NULL, gfp & GFP_RECLAIM_MASK, 19753fea5a49SJohannes Weiner charge_mm); 19763fea5a49SJohannes Weiner if (error) 1977800d8c63SKirill A. Shutemov goto unacct; 1978b1d0ec3aSMatthew Wilcox (Oracle) folio_add_lru(folio); 197954af6042SHugh Dickins 19804595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1981b1d0ec3aSMatthew Wilcox (Oracle) info->alloced += folio_nr_pages(folio); 1982fa020a2bSAndrew Morton inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio); 198354af6042SHugh Dickins shmem_recalc_inode(inode); 19844595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 19851635f6a7SHugh Dickins alloced = true; 198654af6042SHugh Dickins 1987b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio) && 1988779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1989fc26babbSMatthew Wilcox (Oracle) folio_next_index(folio) - 1) { 1990779750d2SKirill A. Shutemov /* 1991fc26babbSMatthew Wilcox (Oracle) * Part of the large folio is beyond i_size: subject 1992779750d2SKirill A. Shutemov * to shrink under memory pressure. 1993779750d2SKirill A. Shutemov */ 1994779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1995d041353dSCong Wang /* 1996d041353dSCong Wang * _careful to defend against unlocked access to 1997d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1998d041353dSCong Wang */ 1999d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 2000779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 2001779750d2SKirill A. Shutemov &sbinfo->shrinklist); 2002779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 2003779750d2SKirill A. Shutemov } 2004779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 2005779750d2SKirill A. Shutemov } 2006779750d2SKirill A. Shutemov 2007ec9516fbSHugh Dickins /* 2008fc26babbSMatthew Wilcox (Oracle) * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. 20091635f6a7SHugh Dickins */ 20101635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 20111635f6a7SHugh Dickins sgp = SGP_WRITE; 20121635f6a7SHugh Dickins clear: 20131635f6a7SHugh Dickins /* 2014fc26babbSMatthew Wilcox (Oracle) * Let SGP_WRITE caller clear ends if write does not fill folio; 2015fc26babbSMatthew Wilcox (Oracle) * but SGP_FALLOC on a folio fallocated earlier must initialize 20161635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 2017ec9516fbSHugh Dickins */ 2018b1d0ec3aSMatthew Wilcox (Oracle) if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) { 2019b1d0ec3aSMatthew Wilcox (Oracle) long i, n = folio_nr_pages(folio); 2020800d8c63SKirill A. Shutemov 2021b1d0ec3aSMatthew Wilcox (Oracle) for (i = 0; i < n; i++) 2022b1d0ec3aSMatthew Wilcox (Oracle) clear_highpage(folio_page(folio, i)); 2023b1d0ec3aSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 2024b1d0ec3aSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 2025ec9516fbSHugh Dickins } 2026bde05d1cSHugh Dickins 202754af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 202875edd345SHugh Dickins if (sgp <= SGP_CACHE && 202909cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2030267a4c76SHugh Dickins if (alloced) { 2031b1d0ec3aSMatthew Wilcox (Oracle) folio_clear_dirty(folio); 2032b1d0ec3aSMatthew Wilcox (Oracle) filemap_remove_folio(folio); 20334595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 2034267a4c76SHugh Dickins shmem_recalc_inode(inode); 20354595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 2036267a4c76SHugh Dickins } 203754af6042SHugh Dickins error = -EINVAL; 2038267a4c76SHugh Dickins goto unlock; 2039ff36b801SShaohua Li } 204063ec1973SMatthew Wilcox (Oracle) out: 2041fc26babbSMatthew Wilcox (Oracle) *foliop = folio; 204254af6042SHugh Dickins return 0; 2043d00806b1SNick Piggin 2044d0217ac0SNick Piggin /* 204554af6042SHugh Dickins * Error recovery. 20461da177e4SLinus Torvalds */ 204754af6042SHugh Dickins unacct: 2048b1d0ec3aSMatthew Wilcox (Oracle) shmem_inode_unacct_blocks(inode, folio_nr_pages(folio)); 2049800d8c63SKirill A. Shutemov 2050b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 2051b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 2052b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 2053800d8c63SKirill A. Shutemov goto alloc_nohuge; 2054800d8c63SKirill A. Shutemov } 2055d1899228SHugh Dickins unlock: 2056b1d0ec3aSMatthew Wilcox (Oracle) if (folio) { 2057b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 2058b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 205954af6042SHugh Dickins } 206054af6042SHugh Dickins if (error == -ENOSPC && !once++) { 20614595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 206254af6042SHugh Dickins shmem_recalc_inode(inode); 20634595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 20641da177e4SLinus Torvalds goto repeat; 2065d8dc74f2SAdrian Bunk } 20667f4446eeSMatthew Wilcox if (error == -EEXIST) 206754af6042SHugh Dickins goto repeat; 206854af6042SHugh Dickins return error; 20691da177e4SLinus Torvalds } 20701da177e4SLinus Torvalds 20714e1fc793SMatthew Wilcox (Oracle) int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, 20724e1fc793SMatthew Wilcox (Oracle) enum sgp_type sgp) 20734e1fc793SMatthew Wilcox (Oracle) { 20744e1fc793SMatthew Wilcox (Oracle) return shmem_get_folio_gfp(inode, index, foliop, sgp, 20754e1fc793SMatthew Wilcox (Oracle) mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 20764e1fc793SMatthew Wilcox (Oracle) } 20774e1fc793SMatthew Wilcox (Oracle) 207810d20bd2SLinus Torvalds /* 207910d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 208010d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 208110d20bd2SLinus Torvalds * target. 208210d20bd2SLinus Torvalds */ 2083ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 208410d20bd2SLinus Torvalds { 208510d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 20862055da97SIngo Molnar list_del_init(&wait->entry); 208710d20bd2SLinus Torvalds return ret; 208810d20bd2SLinus Torvalds } 208910d20bd2SLinus Torvalds 209020acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf) 20911da177e4SLinus Torvalds { 209211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 2093496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 20949e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 209568a54100SMatthew Wilcox (Oracle) struct folio *folio = NULL; 209620acce67SSouptick Joarder int err; 209720acce67SSouptick Joarder vm_fault_t ret = VM_FAULT_LOCKED; 20981da177e4SLinus Torvalds 2099f00cdc6dSHugh Dickins /* 2100f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 2101f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 21029608703eSJan Kara * locks writers out with its hold on i_rwsem. So refrain from 21038e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 21048e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 21058e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 21068e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 21078e205f77SHugh Dickins * 21088e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 21098e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 21108e205f77SHugh Dickins * we just need to make racing faults a rare case. 21118e205f77SHugh Dickins * 21128e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 21139608703eSJan Kara * standard mutex or completion: but we cannot take i_rwsem in fault, 21148e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 2115f00cdc6dSHugh Dickins */ 2116f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 2117f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 2118f00cdc6dSHugh Dickins 2119f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2120f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 21218e205f77SHugh Dickins if (shmem_falloc && 21228e205f77SHugh Dickins shmem_falloc->waitq && 21238e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 21248e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 21258897c1b1SKirill A. Shutemov struct file *fpin; 21268e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 212710d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 21288e205f77SHugh Dickins 21298e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 21308897c1b1SKirill A. Shutemov fpin = maybe_unlock_mmap_for_io(vmf, NULL); 21318897c1b1SKirill A. Shutemov if (fpin) 21328e205f77SHugh Dickins ret = VM_FAULT_RETRY; 21338e205f77SHugh Dickins 21348e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 21358e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 21368e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 21378e205f77SHugh Dickins spin_unlock(&inode->i_lock); 21388e205f77SHugh Dickins schedule(); 21398e205f77SHugh Dickins 21408e205f77SHugh Dickins /* 21418e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 21428e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 21438e205f77SHugh Dickins * is usually invalid by the time we reach here, but 21448e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 21458e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 21468e205f77SHugh Dickins */ 21478e205f77SHugh Dickins spin_lock(&inode->i_lock); 21488e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 21498e205f77SHugh Dickins spin_unlock(&inode->i_lock); 21508897c1b1SKirill A. Shutemov 21518897c1b1SKirill A. Shutemov if (fpin) 21528897c1b1SKirill A. Shutemov fput(fpin); 21538e205f77SHugh Dickins return ret; 2154f00cdc6dSHugh Dickins } 21558e205f77SHugh Dickins spin_unlock(&inode->i_lock); 2156f00cdc6dSHugh Dickins } 2157f00cdc6dSHugh Dickins 215868a54100SMatthew Wilcox (Oracle) err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, 2159cfda0526SMike Rapoport gfp, vma, vmf, &ret); 216020acce67SSouptick Joarder if (err) 216120acce67SSouptick Joarder return vmf_error(err); 216268a54100SMatthew Wilcox (Oracle) if (folio) 216368a54100SMatthew Wilcox (Oracle) vmf->page = folio_file_page(folio, vmf->pgoff); 216468da9f05SHugh Dickins return ret; 21651da177e4SLinus Torvalds } 21661da177e4SLinus Torvalds 2167c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 2168c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 2169c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 2170c01d5b30SHugh Dickins { 2171c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 2172c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 2173c01d5b30SHugh Dickins unsigned long addr; 2174c01d5b30SHugh Dickins unsigned long offset; 2175c01d5b30SHugh Dickins unsigned long inflated_len; 2176c01d5b30SHugh Dickins unsigned long inflated_addr; 2177c01d5b30SHugh Dickins unsigned long inflated_offset; 2178c01d5b30SHugh Dickins 2179c01d5b30SHugh Dickins if (len > TASK_SIZE) 2180c01d5b30SHugh Dickins return -ENOMEM; 2181c01d5b30SHugh Dickins 2182c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 2183c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 2184c01d5b30SHugh Dickins 2185396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2186c01d5b30SHugh Dickins return addr; 2187c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 2188c01d5b30SHugh Dickins return addr; 2189c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 2190c01d5b30SHugh Dickins return addr; 2191c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2192c01d5b30SHugh Dickins return addr; 2193c01d5b30SHugh Dickins 2194c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2195c01d5b30SHugh Dickins return addr; 2196c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2197c01d5b30SHugh Dickins return addr; 2198c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2199c01d5b30SHugh Dickins return addr; 2200c01d5b30SHugh Dickins /* 2201c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2202c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 220399158997SKirill A. Shutemov * But if caller specified an address hint and we allocated area there 220499158997SKirill A. Shutemov * successfully, respect that as before. 2205c01d5b30SHugh Dickins */ 220699158997SKirill A. Shutemov if (uaddr == addr) 2207c01d5b30SHugh Dickins return addr; 2208c01d5b30SHugh Dickins 2209c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2210c01d5b30SHugh Dickins struct super_block *sb; 2211c01d5b30SHugh Dickins 2212c01d5b30SHugh Dickins if (file) { 2213c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2214c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2215c01d5b30SHugh Dickins } else { 2216c01d5b30SHugh Dickins /* 2217c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2218c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2219c01d5b30SHugh Dickins */ 2220c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2221c01d5b30SHugh Dickins return addr; 2222c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2223c01d5b30SHugh Dickins } 22243089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2225c01d5b30SHugh Dickins return addr; 2226c01d5b30SHugh Dickins } 2227c01d5b30SHugh Dickins 2228c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2229c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2230c01d5b30SHugh Dickins return addr; 2231c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2232c01d5b30SHugh Dickins return addr; 2233c01d5b30SHugh Dickins 2234c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2235c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2236c01d5b30SHugh Dickins return addr; 2237c01d5b30SHugh Dickins if (inflated_len < len) 2238c01d5b30SHugh Dickins return addr; 2239c01d5b30SHugh Dickins 224099158997SKirill A. Shutemov inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2241c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2242c01d5b30SHugh Dickins return addr; 2243c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2244c01d5b30SHugh Dickins return addr; 2245c01d5b30SHugh Dickins 2246c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2247c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2248c01d5b30SHugh Dickins if (inflated_offset > offset) 2249c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2250c01d5b30SHugh Dickins 2251c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2252c01d5b30SHugh Dickins return addr; 2253c01d5b30SHugh Dickins return inflated_addr; 2254c01d5b30SHugh Dickins } 2255c01d5b30SHugh Dickins 22561da177e4SLinus Torvalds #ifdef CONFIG_NUMA 225741ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 22581da177e4SLinus Torvalds { 2259496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 226041ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 22611da177e4SLinus Torvalds } 22621da177e4SLinus Torvalds 2263d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2264d8dc74f2SAdrian Bunk unsigned long addr) 22651da177e4SLinus Torvalds { 2266496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 226741ffe5d5SHugh Dickins pgoff_t index; 22681da177e4SLinus Torvalds 226941ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 227041ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 22711da177e4SLinus Torvalds } 22721da177e4SLinus Torvalds #endif 22731da177e4SLinus Torvalds 2274d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 22751da177e4SLinus Torvalds { 2276496ad9aaSAl Viro struct inode *inode = file_inode(file); 22771da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 22781da177e4SLinus Torvalds int retval = -ENOMEM; 22791da177e4SLinus Torvalds 2280ea0dfeb4SHugh Dickins /* 2281ea0dfeb4SHugh Dickins * What serializes the accesses to info->flags? 2282ea0dfeb4SHugh Dickins * ipc_lock_object() when called from shmctl_do_lock(), 2283ea0dfeb4SHugh Dickins * no serialization needed when called from shm_destroy(). 2284ea0dfeb4SHugh Dickins */ 22851da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 2286d7c9e99aSAlexey Gladkov if (!user_shm_lock(inode->i_size, ucounts)) 22871da177e4SLinus Torvalds goto out_nomem; 22881da177e4SLinus Torvalds info->flags |= VM_LOCKED; 228989e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 22901da177e4SLinus Torvalds } 2291d7c9e99aSAlexey Gladkov if (!lock && (info->flags & VM_LOCKED) && ucounts) { 2292d7c9e99aSAlexey Gladkov user_shm_unlock(inode->i_size, ucounts); 22931da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 229489e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 22951da177e4SLinus Torvalds } 22961da177e4SLinus Torvalds retval = 0; 229789e004eaSLee Schermerhorn 22981da177e4SLinus Torvalds out_nomem: 22991da177e4SLinus Torvalds return retval; 23001da177e4SLinus Torvalds } 23011da177e4SLinus Torvalds 23029b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 23031da177e4SLinus Torvalds { 2304d09e8ca6SPasha Tatashin struct inode *inode = file_inode(file); 2305d09e8ca6SPasha Tatashin struct shmem_inode_info *info = SHMEM_I(inode); 230622247efdSPeter Xu int ret; 2307ab3948f5SJoel Fernandes (Google) 230822247efdSPeter Xu ret = seal_check_future_write(info->seals, vma); 230922247efdSPeter Xu if (ret) 231022247efdSPeter Xu return ret; 2311ab3948f5SJoel Fernandes (Google) 231251b0bff2SCatalin Marinas /* arm64 - allow memory tagging on RAM-based files */ 23131c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_MTE_ALLOWED); 231451b0bff2SCatalin Marinas 23151da177e4SLinus Torvalds file_accessed(file); 2316d09e8ca6SPasha Tatashin /* This is anonymous shared memory if it is unlinked at the time of mmap */ 2317d09e8ca6SPasha Tatashin if (inode->i_nlink) 23181da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2319d09e8ca6SPasha Tatashin else 2320d09e8ca6SPasha Tatashin vma->vm_ops = &shmem_anon_vm_ops; 23211da177e4SLinus Torvalds return 0; 23221da177e4SLinus Torvalds } 23231da177e4SLinus Torvalds 2324cb241339SHugh Dickins #ifdef CONFIG_TMPFS_XATTR 2325cb241339SHugh Dickins static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 2326cb241339SHugh Dickins 2327cb241339SHugh Dickins /* 2328cb241339SHugh Dickins * chattr's fsflags are unrelated to extended attributes, 2329cb241339SHugh Dickins * but tmpfs has chosen to enable them under the same config option. 2330cb241339SHugh Dickins */ 2331cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2332e408e695STheodore Ts'o { 2333cb241339SHugh Dickins unsigned int i_flags = 0; 2334cb241339SHugh Dickins 2335cb241339SHugh Dickins if (fsflags & FS_NOATIME_FL) 2336cb241339SHugh Dickins i_flags |= S_NOATIME; 2337cb241339SHugh Dickins if (fsflags & FS_APPEND_FL) 2338cb241339SHugh Dickins i_flags |= S_APPEND; 2339cb241339SHugh Dickins if (fsflags & FS_IMMUTABLE_FL) 2340cb241339SHugh Dickins i_flags |= S_IMMUTABLE; 2341cb241339SHugh Dickins /* 2342cb241339SHugh Dickins * But FS_NODUMP_FL does not require any action in i_flags. 2343cb241339SHugh Dickins */ 2344cb241339SHugh Dickins inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE); 2345e408e695STheodore Ts'o } 2346cb241339SHugh Dickins #else 2347cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2348cb241339SHugh Dickins { 2349cb241339SHugh Dickins } 2350cb241339SHugh Dickins #define shmem_initxattrs NULL 2351cb241339SHugh Dickins #endif 2352e408e695STheodore Ts'o 23537a80e5b8SGiuseppe Scrivano static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, 23547a80e5b8SGiuseppe Scrivano struct inode *dir, umode_t mode, dev_t dev, 23557a80e5b8SGiuseppe Scrivano unsigned long flags) 23561da177e4SLinus Torvalds { 23571da177e4SLinus Torvalds struct inode *inode; 23581da177e4SLinus Torvalds struct shmem_inode_info *info; 23591da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2360e809d5f0SChris Down ino_t ino; 23611da177e4SLinus Torvalds 2362e809d5f0SChris Down if (shmem_reserve_inode(sb, &ino)) 23631da177e4SLinus Torvalds return NULL; 23641da177e4SLinus Torvalds 23651da177e4SLinus Torvalds inode = new_inode(sb); 23661da177e4SLinus Torvalds if (inode) { 2367e809d5f0SChris Down inode->i_ino = ino; 23687a80e5b8SGiuseppe Scrivano inode_init_owner(idmap, inode, dir, mode); 23691da177e4SLinus Torvalds inode->i_blocks = 0; 2370078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 2371a251c17aSJason A. Donenfeld inode->i_generation = get_random_u32(); 23721da177e4SLinus Torvalds info = SHMEM_I(inode); 23731da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 23741da177e4SLinus Torvalds spin_lock_init(&info->lock); 2375af53d3e9SHugh Dickins atomic_set(&info->stop_eviction, 0); 237640e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 23770b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2378f7cd16a5SXavier Roche info->i_crtime = inode->i_mtime; 2379e408e695STheodore Ts'o info->fsflags = (dir == NULL) ? 0 : 2380e408e695STheodore Ts'o SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; 2381cb241339SHugh Dickins if (info->fsflags) 2382cb241339SHugh Dickins shmem_set_inode_flags(inode, info->fsflags); 2383779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 23841da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 23852c6efe9cSLuis Chamberlain if (sbinfo->noswap) 23862c6efe9cSLuis Chamberlain mapping_set_unevictable(inode->i_mapping); 238738f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 238872c04902SAl Viro cache_no_acl(inode); 2389ff36da69SMatthew Wilcox (Oracle) mapping_set_large_folios(inode->i_mapping); 23901da177e4SLinus Torvalds 23911da177e4SLinus Torvalds switch (mode & S_IFMT) { 23921da177e4SLinus Torvalds default: 239339f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 23941da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 23951da177e4SLinus Torvalds break; 23961da177e4SLinus Torvalds case S_IFREG: 239714fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 23981da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 23991da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 240071fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 240171fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 24021da177e4SLinus Torvalds break; 24031da177e4SLinus Torvalds case S_IFDIR: 2404d8c76e6fSDave Hansen inc_nlink(inode); 24051da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 24061da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 24071da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 24081da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 24091da177e4SLinus Torvalds break; 24101da177e4SLinus Torvalds case S_IFLNK: 24111da177e4SLinus Torvalds /* 24121da177e4SLinus Torvalds * Must not load anything in the rbtree, 24131da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 24141da177e4SLinus Torvalds */ 241571fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 24161da177e4SLinus Torvalds break; 24171da177e4SLinus Torvalds } 2418b45d71fbSJoel Fernandes (Google) 2419b45d71fbSJoel Fernandes (Google) lockdep_annotate_inode_mutex_key(inode); 24205b04c689SPavel Emelyanov } else 24215b04c689SPavel Emelyanov shmem_free_inode(sb); 24221da177e4SLinus Torvalds return inode; 24231da177e4SLinus Torvalds } 24241da177e4SLinus Torvalds 24253460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD 24263460f6e5SAxel Rasmussen int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 24274c27fe4cSMike Rapoport pmd_t *dst_pmd, 24284c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 24294c27fe4cSMike Rapoport unsigned long dst_addr, 24304c27fe4cSMike Rapoport unsigned long src_addr, 24318ee79edfSPeter Xu bool zeropage, bool wp_copy, 24324c27fe4cSMike Rapoport struct page **pagep) 24334c27fe4cSMike Rapoport { 24344c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 24354c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 24364c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 24374c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 24384c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 24394c27fe4cSMike Rapoport void *page_kaddr; 2440b7dd44a1SMatthew Wilcox (Oracle) struct folio *folio; 24414c27fe4cSMike Rapoport int ret; 24423460f6e5SAxel Rasmussen pgoff_t max_off; 24434c27fe4cSMike Rapoport 24447ed9d238SAxel Rasmussen if (!shmem_inode_acct_block(inode, 1)) { 24457ed9d238SAxel Rasmussen /* 24467ed9d238SAxel Rasmussen * We may have got a page, returned -ENOENT triggering a retry, 24477ed9d238SAxel Rasmussen * and now we find ourselves with -ENOMEM. Release the page, to 24487ed9d238SAxel Rasmussen * avoid a BUG_ON in our caller. 24497ed9d238SAxel Rasmussen */ 24507ed9d238SAxel Rasmussen if (unlikely(*pagep)) { 24517ed9d238SAxel Rasmussen put_page(*pagep); 24527ed9d238SAxel Rasmussen *pagep = NULL; 24537ed9d238SAxel Rasmussen } 24547d64ae3aSAxel Rasmussen return -ENOMEM; 24557ed9d238SAxel Rasmussen } 24564c27fe4cSMike Rapoport 2457cb658a45SAndrea Arcangeli if (!*pagep) { 24587d64ae3aSAxel Rasmussen ret = -ENOMEM; 24597a7256d5SMatthew Wilcox (Oracle) folio = shmem_alloc_folio(gfp, info, pgoff); 24607a7256d5SMatthew Wilcox (Oracle) if (!folio) 24610f079694SMike Rapoport goto out_unacct_blocks; 24624c27fe4cSMike Rapoport 24633460f6e5SAxel Rasmussen if (!zeropage) { /* COPY */ 24647a7256d5SMatthew Wilcox (Oracle) page_kaddr = kmap_local_folio(folio, 0); 24655dc21f0cSIra Weiny /* 24665dc21f0cSIra Weiny * The read mmap_lock is held here. Despite the 24675dc21f0cSIra Weiny * mmap_lock being read recursive a deadlock is still 24685dc21f0cSIra Weiny * possible if a writer has taken a lock. For example: 24695dc21f0cSIra Weiny * 24705dc21f0cSIra Weiny * process A thread 1 takes read lock on own mmap_lock 24715dc21f0cSIra Weiny * process A thread 2 calls mmap, blocks taking write lock 24725dc21f0cSIra Weiny * process B thread 1 takes page fault, read lock on own mmap lock 24735dc21f0cSIra Weiny * process B thread 2 calls mmap, blocks taking write lock 24745dc21f0cSIra Weiny * process A thread 1 blocks taking read lock on process B 24755dc21f0cSIra Weiny * process B thread 1 blocks taking read lock on process A 24765dc21f0cSIra Weiny * 24775dc21f0cSIra Weiny * Disable page faults to prevent potential deadlock 24785dc21f0cSIra Weiny * and retry the copy outside the mmap_lock. 24795dc21f0cSIra Weiny */ 24805dc21f0cSIra Weiny pagefault_disable(); 24818d103963SMike Rapoport ret = copy_from_user(page_kaddr, 24828d103963SMike Rapoport (const void __user *)src_addr, 24834c27fe4cSMike Rapoport PAGE_SIZE); 24845dc21f0cSIra Weiny pagefault_enable(); 24857a7256d5SMatthew Wilcox (Oracle) kunmap_local(page_kaddr); 24864c27fe4cSMike Rapoport 2487c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 24884c27fe4cSMike Rapoport if (unlikely(ret)) { 24897a7256d5SMatthew Wilcox (Oracle) *pagep = &folio->page; 24907d64ae3aSAxel Rasmussen ret = -ENOENT; 24914c27fe4cSMike Rapoport /* don't free the page */ 24927d64ae3aSAxel Rasmussen goto out_unacct_blocks; 24934c27fe4cSMike Rapoport } 249419b482c2SMuchun Song 24957a7256d5SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 24963460f6e5SAxel Rasmussen } else { /* ZEROPAGE */ 24977a7256d5SMatthew Wilcox (Oracle) clear_user_highpage(&folio->page, dst_addr); 24988d103963SMike Rapoport } 24994c27fe4cSMike Rapoport } else { 25007a7256d5SMatthew Wilcox (Oracle) folio = page_folio(*pagep); 25017a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 25024c27fe4cSMike Rapoport *pagep = NULL; 25034c27fe4cSMike Rapoport } 25044c27fe4cSMike Rapoport 25057a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON(folio_test_locked(folio)); 25067a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON(folio_test_swapbacked(folio)); 25077a7256d5SMatthew Wilcox (Oracle) __folio_set_locked(folio); 25087a7256d5SMatthew Wilcox (Oracle) __folio_set_swapbacked(folio); 25097a7256d5SMatthew Wilcox (Oracle) __folio_mark_uptodate(folio); 25109cc90c66SAndrea Arcangeli 2511e2a50c1fSAndrea Arcangeli ret = -EFAULT; 2512e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 25133460f6e5SAxel Rasmussen if (unlikely(pgoff >= max_off)) 2514e2a50c1fSAndrea Arcangeli goto out_release; 2515e2a50c1fSAndrea Arcangeli 2516b7dd44a1SMatthew Wilcox (Oracle) ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, 25173fea5a49SJohannes Weiner gfp & GFP_RECLAIM_MASK, dst_mm); 25184c27fe4cSMike Rapoport if (ret) 25194c27fe4cSMike Rapoport goto out_release; 25204c27fe4cSMike Rapoport 25217d64ae3aSAxel Rasmussen ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 25227a7256d5SMatthew Wilcox (Oracle) &folio->page, true, wp_copy); 25237d64ae3aSAxel Rasmussen if (ret) 25247d64ae3aSAxel Rasmussen goto out_delete_from_cache; 25254c27fe4cSMike Rapoport 252694b7cc01SYang Shi spin_lock_irq(&info->lock); 25274c27fe4cSMike Rapoport info->alloced++; 25284c27fe4cSMike Rapoport inode->i_blocks += BLOCKS_PER_PAGE; 25294c27fe4cSMike Rapoport shmem_recalc_inode(inode); 253094b7cc01SYang Shi spin_unlock_irq(&info->lock); 25314c27fe4cSMike Rapoport 25327a7256d5SMatthew Wilcox (Oracle) folio_unlock(folio); 25337d64ae3aSAxel Rasmussen return 0; 25347d64ae3aSAxel Rasmussen out_delete_from_cache: 25357a7256d5SMatthew Wilcox (Oracle) filemap_remove_folio(folio); 25364c27fe4cSMike Rapoport out_release: 25377a7256d5SMatthew Wilcox (Oracle) folio_unlock(folio); 25387a7256d5SMatthew Wilcox (Oracle) folio_put(folio); 25394c27fe4cSMike Rapoport out_unacct_blocks: 25400f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 25417d64ae3aSAxel Rasmussen return ret; 25424c27fe4cSMike Rapoport } 25433460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */ 25448d103963SMike Rapoport 25451da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 254692e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 254769f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 25481da177e4SLinus Torvalds 25491da177e4SLinus Torvalds static int 2550800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 25519d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len, 2552800d15a5SNick Piggin struct page **pagep, void **fsdata) 25531da177e4SLinus Torvalds { 2554800d15a5SNick Piggin struct inode *inode = mapping->host; 255540e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 255609cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 2557eff1f906SMatthew Wilcox (Oracle) struct folio *folio; 2558a7605426SYang Shi int ret = 0; 255940e041a2SDavid Herrmann 25609608703eSJan Kara /* i_rwsem is held by caller */ 2561ab3948f5SJoel Fernandes (Google) if (unlikely(info->seals & (F_SEAL_GROW | 2562ab3948f5SJoel Fernandes (Google) F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 2563ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 256440e041a2SDavid Herrmann return -EPERM; 256540e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 256640e041a2SDavid Herrmann return -EPERM; 256740e041a2SDavid Herrmann } 256840e041a2SDavid Herrmann 2569eff1f906SMatthew Wilcox (Oracle) ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); 2570a7605426SYang Shi 2571a7605426SYang Shi if (ret) 2572a7605426SYang Shi return ret; 2573a7605426SYang Shi 2574eff1f906SMatthew Wilcox (Oracle) *pagep = folio_file_page(folio, index); 2575a7605426SYang Shi if (PageHWPoison(*pagep)) { 2576eff1f906SMatthew Wilcox (Oracle) folio_unlock(folio); 2577eff1f906SMatthew Wilcox (Oracle) folio_put(folio); 2578a7605426SYang Shi *pagep = NULL; 2579a7605426SYang Shi return -EIO; 2580a7605426SYang Shi } 2581a7605426SYang Shi 2582a7605426SYang Shi return 0; 2583800d15a5SNick Piggin } 2584800d15a5SNick Piggin 2585800d15a5SNick Piggin static int 2586800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2587800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2588800d15a5SNick Piggin struct page *page, void *fsdata) 2589800d15a5SNick Piggin { 259069bbb87bSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2591800d15a5SNick Piggin struct inode *inode = mapping->host; 2592800d15a5SNick Piggin 2593800d15a5SNick Piggin if (pos + copied > inode->i_size) 2594800d15a5SNick Piggin i_size_write(inode, pos + copied); 2595800d15a5SNick Piggin 259669bbb87bSMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 259769bbb87bSMatthew Wilcox (Oracle) if (copied < folio_size(folio)) { 259869bbb87bSMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos); 259969bbb87bSMatthew Wilcox (Oracle) folio_zero_segments(folio, 0, from, 260069bbb87bSMatthew Wilcox (Oracle) from + copied, folio_size(folio)); 2601800d8c63SKirill A. Shutemov } 260269bbb87bSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 2603800d8c63SKirill A. Shutemov } 260469bbb87bSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 260569bbb87bSMatthew Wilcox (Oracle) folio_unlock(folio); 260669bbb87bSMatthew Wilcox (Oracle) folio_put(folio); 2607d3602444SHugh Dickins 2608800d15a5SNick Piggin return copied; 26091da177e4SLinus Torvalds } 26101da177e4SLinus Torvalds 26112ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 26121da177e4SLinus Torvalds { 26136e58e79dSAl Viro struct file *file = iocb->ki_filp; 26146e58e79dSAl Viro struct inode *inode = file_inode(file); 26151da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 261641ffe5d5SHugh Dickins pgoff_t index; 261741ffe5d5SHugh Dickins unsigned long offset; 2618f7c1d074SGeert Uytterhoeven int error = 0; 2619cb66a7a1SAl Viro ssize_t retval = 0; 26206e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2621a0ee5ec5SHugh Dickins 262209cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 262309cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 26241da177e4SLinus Torvalds 26251da177e4SLinus Torvalds for (;;) { 26264601e2fcSMatthew Wilcox (Oracle) struct folio *folio = NULL; 26271da177e4SLinus Torvalds struct page *page = NULL; 262841ffe5d5SHugh Dickins pgoff_t end_index; 262941ffe5d5SHugh Dickins unsigned long nr, ret; 26301da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 26311da177e4SLinus Torvalds 263209cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 26331da177e4SLinus Torvalds if (index > end_index) 26341da177e4SLinus Torvalds break; 26351da177e4SLinus Torvalds if (index == end_index) { 263609cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 26371da177e4SLinus Torvalds if (nr <= offset) 26381da177e4SLinus Torvalds break; 26391da177e4SLinus Torvalds } 26401da177e4SLinus Torvalds 26414601e2fcSMatthew Wilcox (Oracle) error = shmem_get_folio(inode, index, &folio, SGP_READ); 26426e58e79dSAl Viro if (error) { 26436e58e79dSAl Viro if (error == -EINVAL) 26446e58e79dSAl Viro error = 0; 26451da177e4SLinus Torvalds break; 26461da177e4SLinus Torvalds } 26474601e2fcSMatthew Wilcox (Oracle) if (folio) { 26484601e2fcSMatthew Wilcox (Oracle) folio_unlock(folio); 2649a7605426SYang Shi 26504601e2fcSMatthew Wilcox (Oracle) page = folio_file_page(folio, index); 2651a7605426SYang Shi if (PageHWPoison(page)) { 26524601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 2653a7605426SYang Shi error = -EIO; 2654a7605426SYang Shi break; 2655a7605426SYang Shi } 265675edd345SHugh Dickins } 26571da177e4SLinus Torvalds 26581da177e4SLinus Torvalds /* 26591da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 26609608703eSJan Kara * are called without i_rwsem protection against truncate 26611da177e4SLinus Torvalds */ 266209cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 26631da177e4SLinus Torvalds i_size = i_size_read(inode); 266409cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 26651da177e4SLinus Torvalds if (index == end_index) { 266609cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 26671da177e4SLinus Torvalds if (nr <= offset) { 26684601e2fcSMatthew Wilcox (Oracle) if (folio) 26694601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 26701da177e4SLinus Torvalds break; 26711da177e4SLinus Torvalds } 26721da177e4SLinus Torvalds } 26731da177e4SLinus Torvalds nr -= offset; 26741da177e4SLinus Torvalds 26754601e2fcSMatthew Wilcox (Oracle) if (folio) { 26761da177e4SLinus Torvalds /* 26771da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 26781da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 26791da177e4SLinus Torvalds * before reading the page on the kernel side. 26801da177e4SLinus Torvalds */ 26811da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 26821da177e4SLinus Torvalds flush_dcache_page(page); 26831da177e4SLinus Torvalds /* 26841da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 26851da177e4SLinus Torvalds */ 26861da177e4SLinus Torvalds if (!offset) 26874601e2fcSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 26881da177e4SLinus Torvalds /* 26891da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 26901da177e4SLinus Torvalds * now we can copy it to user space... 26911da177e4SLinus Torvalds */ 26922ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 26934601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 26941bdec44bSHugh Dickins 2695fcb14cb1SAl Viro } else if (user_backed_iter(to)) { 26961bdec44bSHugh Dickins /* 26971bdec44bSHugh Dickins * Copy to user tends to be so well optimized, but 26981bdec44bSHugh Dickins * clear_user() not so much, that it is noticeably 26991bdec44bSHugh Dickins * faster to copy the zero page instead of clearing. 27001bdec44bSHugh Dickins */ 27011bdec44bSHugh Dickins ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to); 27021bdec44bSHugh Dickins } else { 27031bdec44bSHugh Dickins /* 27041bdec44bSHugh Dickins * But submitting the same page twice in a row to 27051bdec44bSHugh Dickins * splice() - or others? - can result in confusion: 27061bdec44bSHugh Dickins * so don't attempt that optimization on pipes etc. 27071bdec44bSHugh Dickins */ 27081bdec44bSHugh Dickins ret = iov_iter_zero(nr, to); 27091bdec44bSHugh Dickins } 27101bdec44bSHugh Dickins 27116e58e79dSAl Viro retval += ret; 27121da177e4SLinus Torvalds offset += ret; 271309cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 271409cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 27151da177e4SLinus Torvalds 27162ba5bbedSAl Viro if (!iov_iter_count(to)) 27171da177e4SLinus Torvalds break; 27186e58e79dSAl Viro if (ret < nr) { 27196e58e79dSAl Viro error = -EFAULT; 27206e58e79dSAl Viro break; 27216e58e79dSAl Viro } 27221da177e4SLinus Torvalds cond_resched(); 27231da177e4SLinus Torvalds } 27241da177e4SLinus Torvalds 272509cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 27266e58e79dSAl Viro file_accessed(file); 27276e58e79dSAl Viro return retval ? retval : error; 27281da177e4SLinus Torvalds } 27291da177e4SLinus Torvalds 2730965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2731220f2ac9SHugh Dickins { 2732220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2733220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2734220f2ac9SHugh Dickins 2735965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2736965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2737220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 273841139aa4SMatthew Wilcox (Oracle) if (offset < 0) 273941139aa4SMatthew Wilcox (Oracle) return -ENXIO; 274041139aa4SMatthew Wilcox (Oracle) 27415955102cSAl Viro inode_lock(inode); 27429608703eSJan Kara /* We're holding i_rwsem so we can access i_size directly */ 274341139aa4SMatthew Wilcox (Oracle) offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); 2744387aae6fSHugh Dickins if (offset >= 0) 274546a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 27465955102cSAl Viro inode_unlock(inode); 2747220f2ac9SHugh Dickins return offset; 2748220f2ac9SHugh Dickins } 2749220f2ac9SHugh Dickins 275083e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 275183e4fa9cSHugh Dickins loff_t len) 275283e4fa9cSHugh Dickins { 2753496ad9aaSAl Viro struct inode *inode = file_inode(file); 2754e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 275540e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 27561aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2757d144bf62SHugh Dickins pgoff_t start, index, end, undo_fallocend; 2758e2d12e22SHugh Dickins int error; 275983e4fa9cSHugh Dickins 276013ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 276113ace4d0SHugh Dickins return -EOPNOTSUPP; 276213ace4d0SHugh Dickins 27635955102cSAl Viro inode_lock(inode); 276483e4fa9cSHugh Dickins 276583e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 276683e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 276783e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 276883e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 27698e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 277083e4fa9cSHugh Dickins 27719608703eSJan Kara /* protected by i_rwsem */ 2772ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 277340e041a2SDavid Herrmann error = -EPERM; 277440e041a2SDavid Herrmann goto out; 277540e041a2SDavid Herrmann } 277640e041a2SDavid Herrmann 27778e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2778aa71ecd8SChen Jun shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 2779f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2780f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2781f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2782f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2783f00cdc6dSHugh Dickins 278483e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 278583e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 278683e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 278783e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 278883e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 27898e205f77SHugh Dickins 27908e205f77SHugh Dickins spin_lock(&inode->i_lock); 27918e205f77SHugh Dickins inode->i_private = NULL; 27928e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 27932055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 27948e205f77SHugh Dickins spin_unlock(&inode->i_lock); 279583e4fa9cSHugh Dickins error = 0; 27968e205f77SHugh Dickins goto out; 279783e4fa9cSHugh Dickins } 279883e4fa9cSHugh Dickins 2799e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2800e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2801e2d12e22SHugh Dickins if (error) 2802e2d12e22SHugh Dickins goto out; 2803e2d12e22SHugh Dickins 280440e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 280540e041a2SDavid Herrmann error = -EPERM; 280640e041a2SDavid Herrmann goto out; 280740e041a2SDavid Herrmann } 280840e041a2SDavid Herrmann 280909cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 281009cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2811e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2812e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2813e2d12e22SHugh Dickins error = -ENOSPC; 2814e2d12e22SHugh Dickins goto out; 2815e2d12e22SHugh Dickins } 2816e2d12e22SHugh Dickins 28178e205f77SHugh Dickins shmem_falloc.waitq = NULL; 28181aac1400SHugh Dickins shmem_falloc.start = start; 28191aac1400SHugh Dickins shmem_falloc.next = start; 28201aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 28211aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 28221aac1400SHugh Dickins spin_lock(&inode->i_lock); 28231aac1400SHugh Dickins inode->i_private = &shmem_falloc; 28241aac1400SHugh Dickins spin_unlock(&inode->i_lock); 28251aac1400SHugh Dickins 2826d144bf62SHugh Dickins /* 2827d144bf62SHugh Dickins * info->fallocend is only relevant when huge pages might be 2828d144bf62SHugh Dickins * involved: to prevent split_huge_page() freeing fallocated 2829d144bf62SHugh Dickins * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. 2830d144bf62SHugh Dickins */ 2831d144bf62SHugh Dickins undo_fallocend = info->fallocend; 2832d144bf62SHugh Dickins if (info->fallocend < end) 2833d144bf62SHugh Dickins info->fallocend = end; 2834d144bf62SHugh Dickins 2835050dcb5cSHugh Dickins for (index = start; index < end; ) { 2836b0802b22SMatthew Wilcox (Oracle) struct folio *folio; 2837e2d12e22SHugh Dickins 2838e2d12e22SHugh Dickins /* 2839e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2840e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2841e2d12e22SHugh Dickins */ 2842e2d12e22SHugh Dickins if (signal_pending(current)) 2843e2d12e22SHugh Dickins error = -EINTR; 28441aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 28451aac1400SHugh Dickins error = -ENOMEM; 2846e2d12e22SHugh Dickins else 2847b0802b22SMatthew Wilcox (Oracle) error = shmem_get_folio(inode, index, &folio, 2848b0802b22SMatthew Wilcox (Oracle) SGP_FALLOC); 2849e2d12e22SHugh Dickins if (error) { 2850d144bf62SHugh Dickins info->fallocend = undo_fallocend; 2851b0802b22SMatthew Wilcox (Oracle) /* Remove the !uptodate folios we added */ 28527f556567SHugh Dickins if (index > start) { 28531635f6a7SHugh Dickins shmem_undo_range(inode, 285409cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2855b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 28567f556567SHugh Dickins } 28571aac1400SHugh Dickins goto undone; 2858e2d12e22SHugh Dickins } 2859e2d12e22SHugh Dickins 2860050dcb5cSHugh Dickins /* 2861050dcb5cSHugh Dickins * Here is a more important optimization than it appears: 2862b0802b22SMatthew Wilcox (Oracle) * a second SGP_FALLOC on the same large folio will clear it, 2863b0802b22SMatthew Wilcox (Oracle) * making it uptodate and un-undoable if we fail later. 2864050dcb5cSHugh Dickins */ 2865b0802b22SMatthew Wilcox (Oracle) index = folio_next_index(folio); 2866050dcb5cSHugh Dickins /* Beware 32-bit wraparound */ 2867050dcb5cSHugh Dickins if (!index) 2868050dcb5cSHugh Dickins index--; 2869050dcb5cSHugh Dickins 2870e2d12e22SHugh Dickins /* 28711aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 28721aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 28731aac1400SHugh Dickins */ 2874b0802b22SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) 2875050dcb5cSHugh Dickins shmem_falloc.nr_falloced += index - shmem_falloc.next; 2876050dcb5cSHugh Dickins shmem_falloc.next = index; 28771aac1400SHugh Dickins 28781aac1400SHugh Dickins /* 2879b0802b22SMatthew Wilcox (Oracle) * If !uptodate, leave it that way so that freeable folios 28801635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 2881b0802b22SMatthew Wilcox (Oracle) * But mark it dirty so that memory pressure will swap rather 2882b0802b22SMatthew Wilcox (Oracle) * than free the folios we are allocating (and SGP_CACHE folios 2883e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2884e2d12e22SHugh Dickins */ 2885b0802b22SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 2886b0802b22SMatthew Wilcox (Oracle) folio_unlock(folio); 2887b0802b22SMatthew Wilcox (Oracle) folio_put(folio); 2888e2d12e22SHugh Dickins cond_resched(); 2889e2d12e22SHugh Dickins } 2890e2d12e22SHugh Dickins 2891e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2892e2d12e22SHugh Dickins i_size_write(inode, offset + len); 28931aac1400SHugh Dickins undone: 28941aac1400SHugh Dickins spin_lock(&inode->i_lock); 28951aac1400SHugh Dickins inode->i_private = NULL; 28961aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2897e2d12e22SHugh Dickins out: 289815f242bbSHugh Dickins if (!error) 289915f242bbSHugh Dickins file_modified(file); 29005955102cSAl Viro inode_unlock(inode); 290183e4fa9cSHugh Dickins return error; 290283e4fa9cSHugh Dickins } 290383e4fa9cSHugh Dickins 2904726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 29051da177e4SLinus Torvalds { 2906726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 29071da177e4SLinus Torvalds 29081da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 290909cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 29101da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 29110edd73b3SHugh Dickins if (sbinfo->max_blocks) { 29121da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 291341ffe5d5SHugh Dickins buf->f_bavail = 291441ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 291541ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 29160edd73b3SHugh Dickins } 29170edd73b3SHugh Dickins if (sbinfo->max_inodes) { 29181da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 29191da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 29201da177e4SLinus Torvalds } 29211da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 292259cda49eSAmir Goldstein 292359cda49eSAmir Goldstein buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); 292459cda49eSAmir Goldstein 29251da177e4SLinus Torvalds return 0; 29261da177e4SLinus Torvalds } 29271da177e4SLinus Torvalds 29281da177e4SLinus Torvalds /* 29291da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 29301da177e4SLinus Torvalds */ 29311da177e4SLinus Torvalds static int 29325ebb29beSChristian Brauner shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, 2933549c7297SChristian Brauner struct dentry *dentry, umode_t mode, dev_t dev) 29341da177e4SLinus Torvalds { 29350b0a0806SHugh Dickins struct inode *inode; 29361da177e4SLinus Torvalds int error = -ENOSPC; 29371da177e4SLinus Torvalds 29387a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); 29391da177e4SLinus Torvalds if (inode) { 2940feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2941feda821eSChristoph Hellwig if (error) 2942feda821eSChristoph Hellwig goto out_iput; 29432a7dba39SEric Paris error = security_inode_init_security(inode, dir, 29449d8f13baSMimi Zohar &dentry->d_name, 29456d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2946feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2947feda821eSChristoph Hellwig goto out_iput; 294837ec43cdSMimi Zohar 2949718deb6bSAl Viro error = 0; 29501da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2951078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 295236f05cabSJeff Layton inode_inc_iversion(dir); 29531da177e4SLinus Torvalds d_instantiate(dentry, inode); 29541da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 29551da177e4SLinus Torvalds } 29561da177e4SLinus Torvalds return error; 2957feda821eSChristoph Hellwig out_iput: 2958feda821eSChristoph Hellwig iput(inode); 2959feda821eSChristoph Hellwig return error; 29601da177e4SLinus Torvalds } 29611da177e4SLinus Torvalds 296260545d0dSAl Viro static int 2963011e2b71SChristian Brauner shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 2964863f144fSMiklos Szeredi struct file *file, umode_t mode) 296560545d0dSAl Viro { 296660545d0dSAl Viro struct inode *inode; 296760545d0dSAl Viro int error = -ENOSPC; 296860545d0dSAl Viro 29697a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); 297060545d0dSAl Viro if (inode) { 297160545d0dSAl Viro error = security_inode_init_security(inode, dir, 297260545d0dSAl Viro NULL, 297360545d0dSAl Viro shmem_initxattrs, NULL); 2974feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2975feda821eSChristoph Hellwig goto out_iput; 2976feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2977feda821eSChristoph Hellwig if (error) 2978feda821eSChristoph Hellwig goto out_iput; 2979863f144fSMiklos Szeredi d_tmpfile(file, inode); 298060545d0dSAl Viro } 2981863f144fSMiklos Szeredi return finish_open_simple(file, error); 2982feda821eSChristoph Hellwig out_iput: 2983feda821eSChristoph Hellwig iput(inode); 2984feda821eSChristoph Hellwig return error; 298560545d0dSAl Viro } 298660545d0dSAl Viro 2987c54bd91eSChristian Brauner static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir, 2988549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 29891da177e4SLinus Torvalds { 29901da177e4SLinus Torvalds int error; 29911da177e4SLinus Torvalds 29927a80e5b8SGiuseppe Scrivano error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0); 29937a80e5b8SGiuseppe Scrivano if (error) 29941da177e4SLinus Torvalds return error; 2995d8c76e6fSDave Hansen inc_nlink(dir); 29961da177e4SLinus Torvalds return 0; 29971da177e4SLinus Torvalds } 29981da177e4SLinus Torvalds 29996c960e68SChristian Brauner static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, 3000549c7297SChristian Brauner struct dentry *dentry, umode_t mode, bool excl) 30011da177e4SLinus Torvalds { 30027a80e5b8SGiuseppe Scrivano return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0); 30031da177e4SLinus Torvalds } 30041da177e4SLinus Torvalds 30051da177e4SLinus Torvalds /* 30061da177e4SLinus Torvalds * Link a file.. 30071da177e4SLinus Torvalds */ 30081da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 30091da177e4SLinus Torvalds { 301075c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 301129b00e60SDarrick J. Wong int ret = 0; 30121da177e4SLinus Torvalds 30131da177e4SLinus Torvalds /* 30141da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 30151da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 30161da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 30171062af92SDarrick J. Wong * But if an O_TMPFILE file is linked into the tmpfs, the 30181062af92SDarrick J. Wong * first link must skip that, to get the accounting right. 30191da177e4SLinus Torvalds */ 30201062af92SDarrick J. Wong if (inode->i_nlink) { 3021e809d5f0SChris Down ret = shmem_reserve_inode(inode->i_sb, NULL); 30225b04c689SPavel Emelyanov if (ret) 30235b04c689SPavel Emelyanov goto out; 30241062af92SDarrick J. Wong } 30251da177e4SLinus Torvalds 30261da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3027078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 302836f05cabSJeff Layton inode_inc_iversion(dir); 3029d8c76e6fSDave Hansen inc_nlink(inode); 30307de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 30311da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 30321da177e4SLinus Torvalds d_instantiate(dentry, inode); 30335b04c689SPavel Emelyanov out: 30345b04c689SPavel Emelyanov return ret; 30351da177e4SLinus Torvalds } 30361da177e4SLinus Torvalds 30371da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 30381da177e4SLinus Torvalds { 303975c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 30401da177e4SLinus Torvalds 30415b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 30425b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 30431da177e4SLinus Torvalds 30441da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 3045078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 304636f05cabSJeff Layton inode_inc_iversion(dir); 30479a53c3a7SDave Hansen drop_nlink(inode); 30481da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 30491da177e4SLinus Torvalds return 0; 30501da177e4SLinus Torvalds } 30511da177e4SLinus Torvalds 30521da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 30531da177e4SLinus Torvalds { 30541da177e4SLinus Torvalds if (!simple_empty(dentry)) 30551da177e4SLinus Torvalds return -ENOTEMPTY; 30561da177e4SLinus Torvalds 305775c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 30589a53c3a7SDave Hansen drop_nlink(dir); 30591da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 30601da177e4SLinus Torvalds } 30611da177e4SLinus Torvalds 3062e18275aeSChristian Brauner static int shmem_whiteout(struct mnt_idmap *idmap, 3063549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry) 306446fdb794SMiklos Szeredi { 306546fdb794SMiklos Szeredi struct dentry *whiteout; 306646fdb794SMiklos Szeredi int error; 306746fdb794SMiklos Szeredi 306846fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 306946fdb794SMiklos Szeredi if (!whiteout) 307046fdb794SMiklos Szeredi return -ENOMEM; 307146fdb794SMiklos Szeredi 30727a80e5b8SGiuseppe Scrivano error = shmem_mknod(idmap, old_dir, whiteout, 307346fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 307446fdb794SMiklos Szeredi dput(whiteout); 307546fdb794SMiklos Szeredi if (error) 307646fdb794SMiklos Szeredi return error; 307746fdb794SMiklos Szeredi 307846fdb794SMiklos Szeredi /* 307946fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 308046fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 308146fdb794SMiklos Szeredi * 308246fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 308346fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 308446fdb794SMiklos Szeredi */ 308546fdb794SMiklos Szeredi d_rehash(whiteout); 308646fdb794SMiklos Szeredi return 0; 308746fdb794SMiklos Szeredi } 308846fdb794SMiklos Szeredi 30891da177e4SLinus Torvalds /* 30901da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 30911da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 30921da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 30931da177e4SLinus Torvalds * gets overwritten. 30941da177e4SLinus Torvalds */ 3095e18275aeSChristian Brauner static int shmem_rename2(struct mnt_idmap *idmap, 3096549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry, 3097549c7297SChristian Brauner struct inode *new_dir, struct dentry *new_dentry, 3098549c7297SChristian Brauner unsigned int flags) 30991da177e4SLinus Torvalds { 310075c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 31011da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 31021da177e4SLinus Torvalds 310346fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 31043b69ff51SMiklos Szeredi return -EINVAL; 31053b69ff51SMiklos Szeredi 310637456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 31076429e463SLorenz Bauer return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry); 310837456771SMiklos Szeredi 31091da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 31101da177e4SLinus Torvalds return -ENOTEMPTY; 31111da177e4SLinus Torvalds 311246fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 311346fdb794SMiklos Szeredi int error; 311446fdb794SMiklos Szeredi 31157a80e5b8SGiuseppe Scrivano error = shmem_whiteout(idmap, old_dir, old_dentry); 311646fdb794SMiklos Szeredi if (error) 311746fdb794SMiklos Szeredi return error; 311846fdb794SMiklos Szeredi } 311946fdb794SMiklos Szeredi 312075c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 31211da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 3122b928095bSMiklos Szeredi if (they_are_dirs) { 312375c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 31249a53c3a7SDave Hansen drop_nlink(old_dir); 3125b928095bSMiklos Szeredi } 31261da177e4SLinus Torvalds } else if (they_are_dirs) { 31279a53c3a7SDave Hansen drop_nlink(old_dir); 3128d8c76e6fSDave Hansen inc_nlink(new_dir); 31291da177e4SLinus Torvalds } 31301da177e4SLinus Torvalds 31311da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 31321da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 31331da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 31341da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 3135078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 313636f05cabSJeff Layton inode_inc_iversion(old_dir); 313736f05cabSJeff Layton inode_inc_iversion(new_dir); 31381da177e4SLinus Torvalds return 0; 31391da177e4SLinus Torvalds } 31401da177e4SLinus Torvalds 31417a77db95SChristian Brauner static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, 3142549c7297SChristian Brauner struct dentry *dentry, const char *symname) 31431da177e4SLinus Torvalds { 31441da177e4SLinus Torvalds int error; 31451da177e4SLinus Torvalds int len; 31461da177e4SLinus Torvalds struct inode *inode; 31477ad0414bSMatthew Wilcox (Oracle) struct folio *folio; 31481da177e4SLinus Torvalds 31491da177e4SLinus Torvalds len = strlen(symname) + 1; 315009cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 31511da177e4SLinus Torvalds return -ENAMETOOLONG; 31521da177e4SLinus Torvalds 31537a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, 31540825a6f9SJoe Perches VM_NORESERVE); 31551da177e4SLinus Torvalds if (!inode) 31561da177e4SLinus Torvalds return -ENOSPC; 31571da177e4SLinus Torvalds 31589d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 31596d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3160343c3d7fSMateusz Nosek if (error && error != -EOPNOTSUPP) { 3161570bc1c2SStephen Smalley iput(inode); 3162570bc1c2SStephen Smalley return error; 3163570bc1c2SStephen Smalley } 3164570bc1c2SStephen Smalley 31651da177e4SLinus Torvalds inode->i_size = len-1; 316669f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 31673ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 31683ed47db3SAl Viro if (!inode->i_link) { 316969f07ec9SHugh Dickins iput(inode); 317069f07ec9SHugh Dickins return -ENOMEM; 317169f07ec9SHugh Dickins } 317269f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 31731da177e4SLinus Torvalds } else { 3174e8ecde25SAl Viro inode_nohighmem(inode); 31757ad0414bSMatthew Wilcox (Oracle) error = shmem_get_folio(inode, 0, &folio, SGP_WRITE); 31761da177e4SLinus Torvalds if (error) { 31771da177e4SLinus Torvalds iput(inode); 31781da177e4SLinus Torvalds return error; 31791da177e4SLinus Torvalds } 318014fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 31811da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 31827ad0414bSMatthew Wilcox (Oracle) memcpy(folio_address(folio), symname, len); 31837ad0414bSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 31847ad0414bSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 31857ad0414bSMatthew Wilcox (Oracle) folio_unlock(folio); 31867ad0414bSMatthew Wilcox (Oracle) folio_put(folio); 31871da177e4SLinus Torvalds } 31881da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3189078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 319036f05cabSJeff Layton inode_inc_iversion(dir); 31911da177e4SLinus Torvalds d_instantiate(dentry, inode); 31921da177e4SLinus Torvalds dget(dentry); 31931da177e4SLinus Torvalds return 0; 31941da177e4SLinus Torvalds } 31951da177e4SLinus Torvalds 3196fceef393SAl Viro static void shmem_put_link(void *arg) 3197fceef393SAl Viro { 3198e4b57722SMatthew Wilcox (Oracle) folio_mark_accessed(arg); 3199e4b57722SMatthew Wilcox (Oracle) folio_put(arg); 3200fceef393SAl Viro } 3201fceef393SAl Viro 32026b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3203fceef393SAl Viro struct inode *inode, 3204fceef393SAl Viro struct delayed_call *done) 32051da177e4SLinus Torvalds { 3206e4b57722SMatthew Wilcox (Oracle) struct folio *folio = NULL; 32076b255391SAl Viro int error; 3208e4b57722SMatthew Wilcox (Oracle) 32096a6c9904SAl Viro if (!dentry) { 3210e4b57722SMatthew Wilcox (Oracle) folio = filemap_get_folio(inode->i_mapping, 0); 3211e4b57722SMatthew Wilcox (Oracle) if (!folio) 32126b255391SAl Viro return ERR_PTR(-ECHILD); 32137459c149SMatthew Wilcox (Oracle) if (PageHWPoison(folio_page(folio, 0)) || 3214e4b57722SMatthew Wilcox (Oracle) !folio_test_uptodate(folio)) { 3215e4b57722SMatthew Wilcox (Oracle) folio_put(folio); 32166a6c9904SAl Viro return ERR_PTR(-ECHILD); 32176a6c9904SAl Viro } 32186a6c9904SAl Viro } else { 3219e4b57722SMatthew Wilcox (Oracle) error = shmem_get_folio(inode, 0, &folio, SGP_READ); 3220680baacbSAl Viro if (error) 3221680baacbSAl Viro return ERR_PTR(error); 3222e4b57722SMatthew Wilcox (Oracle) if (!folio) 3223a7605426SYang Shi return ERR_PTR(-ECHILD); 32247459c149SMatthew Wilcox (Oracle) if (PageHWPoison(folio_page(folio, 0))) { 3225e4b57722SMatthew Wilcox (Oracle) folio_unlock(folio); 3226e4b57722SMatthew Wilcox (Oracle) folio_put(folio); 3227a7605426SYang Shi return ERR_PTR(-ECHILD); 3228a7605426SYang Shi } 3229e4b57722SMatthew Wilcox (Oracle) folio_unlock(folio); 32301da177e4SLinus Torvalds } 3231e4b57722SMatthew Wilcox (Oracle) set_delayed_call(done, shmem_put_link, folio); 3232e4b57722SMatthew Wilcox (Oracle) return folio_address(folio); 32331da177e4SLinus Torvalds } 32341da177e4SLinus Torvalds 3235b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3236e408e695STheodore Ts'o 3237e408e695STheodore Ts'o static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa) 3238e408e695STheodore Ts'o { 3239e408e695STheodore Ts'o struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3240e408e695STheodore Ts'o 3241e408e695STheodore Ts'o fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); 3242e408e695STheodore Ts'o 3243e408e695STheodore Ts'o return 0; 3244e408e695STheodore Ts'o } 3245e408e695STheodore Ts'o 32468782a9aeSChristian Brauner static int shmem_fileattr_set(struct mnt_idmap *idmap, 3247e408e695STheodore Ts'o struct dentry *dentry, struct fileattr *fa) 3248e408e695STheodore Ts'o { 3249e408e695STheodore Ts'o struct inode *inode = d_inode(dentry); 3250e408e695STheodore Ts'o struct shmem_inode_info *info = SHMEM_I(inode); 3251e408e695STheodore Ts'o 3252e408e695STheodore Ts'o if (fileattr_has_fsx(fa)) 3253e408e695STheodore Ts'o return -EOPNOTSUPP; 3254cb241339SHugh Dickins if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) 3255cb241339SHugh Dickins return -EOPNOTSUPP; 3256e408e695STheodore Ts'o 3257e408e695STheodore Ts'o info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | 3258e408e695STheodore Ts'o (fa->flags & SHMEM_FL_USER_MODIFIABLE); 3259e408e695STheodore Ts'o 3260cb241339SHugh Dickins shmem_set_inode_flags(inode, info->fsflags); 3261e408e695STheodore Ts'o inode->i_ctime = current_time(inode); 326236f05cabSJeff Layton inode_inc_iversion(inode); 3263e408e695STheodore Ts'o return 0; 3264e408e695STheodore Ts'o } 3265e408e695STheodore Ts'o 3266b09e0fa4SEric Paris /* 3267b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3268b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3269b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3270b09e0fa4SEric Paris * filesystem level, though. 3271b09e0fa4SEric Paris */ 3272b09e0fa4SEric Paris 32736d9d88d0SJarkko Sakkinen /* 32746d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 32756d9d88d0SJarkko Sakkinen */ 32766d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 32776d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 32786d9d88d0SJarkko Sakkinen void *fs_info) 32796d9d88d0SJarkko Sakkinen { 32806d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 32816d9d88d0SJarkko Sakkinen const struct xattr *xattr; 328238f38657SAristeu Rozanski struct simple_xattr *new_xattr; 32836d9d88d0SJarkko Sakkinen size_t len; 32846d9d88d0SJarkko Sakkinen 32856d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 328638f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 32876d9d88d0SJarkko Sakkinen if (!new_xattr) 32886d9d88d0SJarkko Sakkinen return -ENOMEM; 32896d9d88d0SJarkko Sakkinen 32906d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 32916d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 32926d9d88d0SJarkko Sakkinen GFP_KERNEL); 32936d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 32943bef735aSChengguang Xu kvfree(new_xattr); 32956d9d88d0SJarkko Sakkinen return -ENOMEM; 32966d9d88d0SJarkko Sakkinen } 32976d9d88d0SJarkko Sakkinen 32986d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 32996d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 33006d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 33016d9d88d0SJarkko Sakkinen xattr->name, len); 33026d9d88d0SJarkko Sakkinen 33033b4c7bc0SChristian Brauner simple_xattr_add(&info->xattrs, new_xattr); 33046d9d88d0SJarkko Sakkinen } 33056d9d88d0SJarkko Sakkinen 33066d9d88d0SJarkko Sakkinen return 0; 33076d9d88d0SJarkko Sakkinen } 33086d9d88d0SJarkko Sakkinen 3309aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3310b296821aSAl Viro struct dentry *unused, struct inode *inode, 3311b296821aSAl Viro const char *name, void *buffer, size_t size) 3312aa7c5241SAndreas Gruenbacher { 3313b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3314aa7c5241SAndreas Gruenbacher 3315aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3316aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3317aa7c5241SAndreas Gruenbacher } 3318aa7c5241SAndreas Gruenbacher 3319aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 332039f60c1cSChristian Brauner struct mnt_idmap *idmap, 332159301226SAl Viro struct dentry *unused, struct inode *inode, 332259301226SAl Viro const char *name, const void *value, 332359301226SAl Viro size_t size, int flags) 3324aa7c5241SAndreas Gruenbacher { 332559301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 332636f05cabSJeff Layton int err; 3327aa7c5241SAndreas Gruenbacher 3328aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 332936f05cabSJeff Layton err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); 333036f05cabSJeff Layton if (!err) { 333136f05cabSJeff Layton inode->i_ctime = current_time(inode); 333236f05cabSJeff Layton inode_inc_iversion(inode); 333336f05cabSJeff Layton } 333436f05cabSJeff Layton return err; 3335aa7c5241SAndreas Gruenbacher } 3336aa7c5241SAndreas Gruenbacher 3337aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3338aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3339aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3340aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3341aa7c5241SAndreas Gruenbacher }; 3342aa7c5241SAndreas Gruenbacher 3343aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3344aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3345aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3346aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3347aa7c5241SAndreas Gruenbacher }; 3348aa7c5241SAndreas Gruenbacher 3349b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3350b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3351feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3352feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3353b09e0fa4SEric Paris #endif 3354aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3355aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3356b09e0fa4SEric Paris NULL 3357b09e0fa4SEric Paris }; 3358b09e0fa4SEric Paris 3359b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3360b09e0fa4SEric Paris { 336175c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3362786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3363b09e0fa4SEric Paris } 3364b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3365b09e0fa4SEric Paris 336669f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 3367f7cd16a5SXavier Roche .getattr = shmem_getattr, 33686b255391SAl Viro .get_link = simple_get_link, 3369b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3370b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3371b09e0fa4SEric Paris #endif 33721da177e4SLinus Torvalds }; 33731da177e4SLinus Torvalds 337492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 3375f7cd16a5SXavier Roche .getattr = shmem_getattr, 33766b255391SAl Viro .get_link = shmem_get_link, 3377b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3378b09e0fa4SEric Paris .listxattr = shmem_listxattr, 337939f0247dSAndreas Gruenbacher #endif 3380b09e0fa4SEric Paris }; 338139f0247dSAndreas Gruenbacher 338291828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 338391828a40SDavid M. Grimes { 338491828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 338591828a40SDavid M. Grimes } 338691828a40SDavid M. Grimes 338791828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 338891828a40SDavid M. Grimes { 338991828a40SDavid M. Grimes __u32 *fh = vfh; 339091828a40SDavid M. Grimes __u64 inum = fh[2]; 339191828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 339291828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 339391828a40SDavid M. Grimes } 339491828a40SDavid M. Grimes 339512ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */ 339612ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode) 339712ba780dSAmir Goldstein { 339812ba780dSAmir Goldstein struct dentry *alias = d_find_alias(inode); 339912ba780dSAmir Goldstein 340012ba780dSAmir Goldstein return alias ?: d_find_any_alias(inode); 340112ba780dSAmir Goldstein } 340212ba780dSAmir Goldstein 340312ba780dSAmir Goldstein 3404480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3405480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 340691828a40SDavid M. Grimes { 340791828a40SDavid M. Grimes struct inode *inode; 3408480b116cSChristoph Hellwig struct dentry *dentry = NULL; 340935c2a7f4SHugh Dickins u64 inum; 341091828a40SDavid M. Grimes 3411480b116cSChristoph Hellwig if (fh_len < 3) 3412480b116cSChristoph Hellwig return NULL; 3413480b116cSChristoph Hellwig 341435c2a7f4SHugh Dickins inum = fid->raw[2]; 341535c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 341635c2a7f4SHugh Dickins 3417480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3418480b116cSChristoph Hellwig shmem_match, fid->raw); 341991828a40SDavid M. Grimes if (inode) { 342012ba780dSAmir Goldstein dentry = shmem_find_alias(inode); 342191828a40SDavid M. Grimes iput(inode); 342291828a40SDavid M. Grimes } 342391828a40SDavid M. Grimes 3424480b116cSChristoph Hellwig return dentry; 342591828a40SDavid M. Grimes } 342691828a40SDavid M. Grimes 3427b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3428b0b0382bSAl Viro struct inode *parent) 342991828a40SDavid M. Grimes { 34305fe0c237SAneesh Kumar K.V if (*len < 3) { 34315fe0c237SAneesh Kumar K.V *len = 3; 343294e07a75SNamjae Jeon return FILEID_INVALID; 34335fe0c237SAneesh Kumar K.V } 343491828a40SDavid M. Grimes 34351d3382cbSAl Viro if (inode_unhashed(inode)) { 343691828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 343791828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 343891828a40SDavid M. Grimes * time, we need a lock to ensure we only try 343991828a40SDavid M. Grimes * to do it once 344091828a40SDavid M. Grimes */ 344191828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 344291828a40SDavid M. Grimes spin_lock(&lock); 34431d3382cbSAl Viro if (inode_unhashed(inode)) 344491828a40SDavid M. Grimes __insert_inode_hash(inode, 344591828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 344691828a40SDavid M. Grimes spin_unlock(&lock); 344791828a40SDavid M. Grimes } 344891828a40SDavid M. Grimes 344991828a40SDavid M. Grimes fh[0] = inode->i_generation; 345091828a40SDavid M. Grimes fh[1] = inode->i_ino; 345191828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 345291828a40SDavid M. Grimes 345391828a40SDavid M. Grimes *len = 3; 345491828a40SDavid M. Grimes return 1; 345591828a40SDavid M. Grimes } 345691828a40SDavid M. Grimes 345739655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 345891828a40SDavid M. Grimes .get_parent = shmem_get_parent, 345991828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3460480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 346191828a40SDavid M. Grimes }; 346291828a40SDavid M. Grimes 3463626c3920SAl Viro enum shmem_param { 3464626c3920SAl Viro Opt_gid, 3465626c3920SAl Viro Opt_huge, 3466626c3920SAl Viro Opt_mode, 3467626c3920SAl Viro Opt_mpol, 3468626c3920SAl Viro Opt_nr_blocks, 3469626c3920SAl Viro Opt_nr_inodes, 3470626c3920SAl Viro Opt_size, 3471626c3920SAl Viro Opt_uid, 3472ea3271f7SChris Down Opt_inode32, 3473ea3271f7SChris Down Opt_inode64, 34742c6efe9cSLuis Chamberlain Opt_noswap, 3475626c3920SAl Viro }; 34761da177e4SLinus Torvalds 34775eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = { 34782710c957SAl Viro {"never", SHMEM_HUGE_NEVER }, 34792710c957SAl Viro {"always", SHMEM_HUGE_ALWAYS }, 34802710c957SAl Viro {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 34812710c957SAl Viro {"advise", SHMEM_HUGE_ADVISE }, 34822710c957SAl Viro {} 34832710c957SAl Viro }; 34842710c957SAl Viro 3485d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = { 3486626c3920SAl Viro fsparam_u32 ("gid", Opt_gid), 34872710c957SAl Viro fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 3488626c3920SAl Viro fsparam_u32oct("mode", Opt_mode), 3489626c3920SAl Viro fsparam_string("mpol", Opt_mpol), 3490626c3920SAl Viro fsparam_string("nr_blocks", Opt_nr_blocks), 3491626c3920SAl Viro fsparam_string("nr_inodes", Opt_nr_inodes), 3492626c3920SAl Viro fsparam_string("size", Opt_size), 3493626c3920SAl Viro fsparam_u32 ("uid", Opt_uid), 3494ea3271f7SChris Down fsparam_flag ("inode32", Opt_inode32), 3495ea3271f7SChris Down fsparam_flag ("inode64", Opt_inode64), 34962c6efe9cSLuis Chamberlain fsparam_flag ("noswap", Opt_noswap), 3497626c3920SAl Viro {} 3498626c3920SAl Viro }; 3499626c3920SAl Viro 3500f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 3501626c3920SAl Viro { 3502f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3503626c3920SAl Viro struct fs_parse_result result; 3504e04dc423SAl Viro unsigned long long size; 3505626c3920SAl Viro char *rest; 3506626c3920SAl Viro int opt; 3507626c3920SAl Viro 3508d7167b14SAl Viro opt = fs_parse(fc, shmem_fs_parameters, param, &result); 3509f3235626SDavid Howells if (opt < 0) 3510626c3920SAl Viro return opt; 3511626c3920SAl Viro 3512626c3920SAl Viro switch (opt) { 3513626c3920SAl Viro case Opt_size: 3514626c3920SAl Viro size = memparse(param->string, &rest); 3515e04dc423SAl Viro if (*rest == '%') { 3516e04dc423SAl Viro size <<= PAGE_SHIFT; 3517e04dc423SAl Viro size *= totalram_pages(); 3518e04dc423SAl Viro do_div(size, 100); 3519e04dc423SAl Viro rest++; 3520e04dc423SAl Viro } 3521e04dc423SAl Viro if (*rest) 3522626c3920SAl Viro goto bad_value; 3523e04dc423SAl Viro ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 3524e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3525626c3920SAl Viro break; 3526626c3920SAl Viro case Opt_nr_blocks: 3527626c3920SAl Viro ctx->blocks = memparse(param->string, &rest); 35280c98c8e1SZhaoLong Wang if (*rest || ctx->blocks > S64_MAX) 3529626c3920SAl Viro goto bad_value; 3530e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3531626c3920SAl Viro break; 3532626c3920SAl Viro case Opt_nr_inodes: 3533626c3920SAl Viro ctx->inodes = memparse(param->string, &rest); 3534e04dc423SAl Viro if (*rest) 3535626c3920SAl Viro goto bad_value; 3536e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_INODES; 3537626c3920SAl Viro break; 3538626c3920SAl Viro case Opt_mode: 3539626c3920SAl Viro ctx->mode = result.uint_32 & 07777; 3540626c3920SAl Viro break; 3541626c3920SAl Viro case Opt_uid: 3542626c3920SAl Viro ctx->uid = make_kuid(current_user_ns(), result.uint_32); 3543e04dc423SAl Viro if (!uid_valid(ctx->uid)) 3544626c3920SAl Viro goto bad_value; 3545626c3920SAl Viro break; 3546626c3920SAl Viro case Opt_gid: 3547626c3920SAl Viro ctx->gid = make_kgid(current_user_ns(), result.uint_32); 3548e04dc423SAl Viro if (!gid_valid(ctx->gid)) 3549626c3920SAl Viro goto bad_value; 3550626c3920SAl Viro break; 3551626c3920SAl Viro case Opt_huge: 3552626c3920SAl Viro ctx->huge = result.uint_32; 3553626c3920SAl Viro if (ctx->huge != SHMEM_HUGE_NEVER && 3554396bcc52SMatthew Wilcox (Oracle) !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 3555626c3920SAl Viro has_transparent_hugepage())) 3556626c3920SAl Viro goto unsupported_parameter; 3557e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_HUGE; 3558626c3920SAl Viro break; 3559626c3920SAl Viro case Opt_mpol: 3560626c3920SAl Viro if (IS_ENABLED(CONFIG_NUMA)) { 3561e04dc423SAl Viro mpol_put(ctx->mpol); 3562e04dc423SAl Viro ctx->mpol = NULL; 3563626c3920SAl Viro if (mpol_parse_str(param->string, &ctx->mpol)) 3564626c3920SAl Viro goto bad_value; 3565626c3920SAl Viro break; 3566626c3920SAl Viro } 3567626c3920SAl Viro goto unsupported_parameter; 3568ea3271f7SChris Down case Opt_inode32: 3569ea3271f7SChris Down ctx->full_inums = false; 3570ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3571ea3271f7SChris Down break; 3572ea3271f7SChris Down case Opt_inode64: 3573ea3271f7SChris Down if (sizeof(ino_t) < 8) { 3574ea3271f7SChris Down return invalfc(fc, 3575ea3271f7SChris Down "Cannot use inode64 with <64bit inums in kernel\n"); 3576ea3271f7SChris Down } 3577ea3271f7SChris Down ctx->full_inums = true; 3578ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3579ea3271f7SChris Down break; 35802c6efe9cSLuis Chamberlain case Opt_noswap: 35812c6efe9cSLuis Chamberlain ctx->noswap = true; 35822c6efe9cSLuis Chamberlain ctx->seen |= SHMEM_SEEN_NOSWAP; 35832c6efe9cSLuis Chamberlain break; 3584e04dc423SAl Viro } 3585e04dc423SAl Viro return 0; 3586e04dc423SAl Viro 3587626c3920SAl Viro unsupported_parameter: 3588f35aa2bcSAl Viro return invalfc(fc, "Unsupported parameter '%s'", param->key); 3589626c3920SAl Viro bad_value: 3590f35aa2bcSAl Viro return invalfc(fc, "Bad value for '%s'", param->key); 3591e04dc423SAl Viro } 3592e04dc423SAl Viro 3593f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data) 3594e04dc423SAl Viro { 3595f3235626SDavid Howells char *options = data; 3596f3235626SDavid Howells 359733f37c64SAl Viro if (options) { 359833f37c64SAl Viro int err = security_sb_eat_lsm_opts(options, &fc->security); 359933f37c64SAl Viro if (err) 360033f37c64SAl Viro return err; 360133f37c64SAl Viro } 360233f37c64SAl Viro 3603b00dc3adSHugh Dickins while (options != NULL) { 3604626c3920SAl Viro char *this_char = options; 3605b00dc3adSHugh Dickins for (;;) { 3606b00dc3adSHugh Dickins /* 3607b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3608b00dc3adSHugh Dickins * mount options form a comma-separated list, 3609b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3610b00dc3adSHugh Dickins */ 3611b00dc3adSHugh Dickins options = strchr(options, ','); 3612b00dc3adSHugh Dickins if (options == NULL) 3613b00dc3adSHugh Dickins break; 3614b00dc3adSHugh Dickins options++; 3615b00dc3adSHugh Dickins if (!isdigit(*options)) { 3616b00dc3adSHugh Dickins options[-1] = '\0'; 3617b00dc3adSHugh Dickins break; 3618b00dc3adSHugh Dickins } 3619b00dc3adSHugh Dickins } 3620626c3920SAl Viro if (*this_char) { 3621626c3920SAl Viro char *value = strchr(this_char, '='); 3622f3235626SDavid Howells size_t len = 0; 3623626c3920SAl Viro int err; 3624626c3920SAl Viro 3625626c3920SAl Viro if (value) { 3626626c3920SAl Viro *value++ = '\0'; 3627f3235626SDavid Howells len = strlen(value); 36281da177e4SLinus Torvalds } 3629f3235626SDavid Howells err = vfs_parse_fs_string(fc, this_char, value, len); 3630f3235626SDavid Howells if (err < 0) 3631f3235626SDavid Howells return err; 36321da177e4SLinus Torvalds } 3633626c3920SAl Viro } 36341da177e4SLinus Torvalds return 0; 36351da177e4SLinus Torvalds } 36361da177e4SLinus Torvalds 3637f3235626SDavid Howells /* 3638f3235626SDavid Howells * Reconfigure a shmem filesystem. 3639f3235626SDavid Howells * 3640f3235626SDavid Howells * Note that we disallow change from limited->unlimited blocks/inodes while any 3641f3235626SDavid Howells * are in use; but we must separately disallow unlimited->limited, because in 3642f3235626SDavid Howells * that case we have no record of how much is already in use. 3643f3235626SDavid Howells */ 3644f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc) 36451da177e4SLinus Torvalds { 3646f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3647f3235626SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 36480edd73b3SHugh Dickins unsigned long inodes; 3649bf11b9a8SSebastian Andrzej Siewior struct mempolicy *mpol = NULL; 3650f3235626SDavid Howells const char *err; 36510edd73b3SHugh Dickins 3652bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 36530edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 36540c98c8e1SZhaoLong Wang 3655f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 3656f3235626SDavid Howells if (!sbinfo->max_blocks) { 3657f3235626SDavid Howells err = "Cannot retroactively limit size"; 36580edd73b3SHugh Dickins goto out; 36590b5071ddSAl Viro } 3660f3235626SDavid Howells if (percpu_counter_compare(&sbinfo->used_blocks, 3661f3235626SDavid Howells ctx->blocks) > 0) { 3662f3235626SDavid Howells err = "Too small a size for current use"; 36630b5071ddSAl Viro goto out; 3664f3235626SDavid Howells } 3665f3235626SDavid Howells } 3666f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 3667f3235626SDavid Howells if (!sbinfo->max_inodes) { 3668f3235626SDavid Howells err = "Cannot retroactively limit inodes"; 36690b5071ddSAl Viro goto out; 36700b5071ddSAl Viro } 3671f3235626SDavid Howells if (ctx->inodes < inodes) { 3672f3235626SDavid Howells err = "Too few inodes for current use"; 3673f3235626SDavid Howells goto out; 3674f3235626SDavid Howells } 3675f3235626SDavid Howells } 36760edd73b3SHugh Dickins 3677ea3271f7SChris Down if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 3678ea3271f7SChris Down sbinfo->next_ino > UINT_MAX) { 3679ea3271f7SChris Down err = "Current inum too high to switch to 32-bit inums"; 3680ea3271f7SChris Down goto out; 3681ea3271f7SChris Down } 36822c6efe9cSLuis Chamberlain if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { 36832c6efe9cSLuis Chamberlain err = "Cannot disable swap on remount"; 36842c6efe9cSLuis Chamberlain goto out; 36852c6efe9cSLuis Chamberlain } 36862c6efe9cSLuis Chamberlain if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { 36872c6efe9cSLuis Chamberlain err = "Cannot enable swap on remount if it was disabled on first mount"; 36882c6efe9cSLuis Chamberlain goto out; 36892c6efe9cSLuis Chamberlain } 3690ea3271f7SChris Down 3691f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_HUGE) 3692f3235626SDavid Howells sbinfo->huge = ctx->huge; 3693ea3271f7SChris Down if (ctx->seen & SHMEM_SEEN_INUMS) 3694ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 3695f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_BLOCKS) 3696f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3697f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_INODES) { 3698f3235626SDavid Howells sbinfo->max_inodes = ctx->inodes; 3699f3235626SDavid Howells sbinfo->free_inodes = ctx->inodes - inodes; 37000b5071ddSAl Viro } 370171fe804bSLee Schermerhorn 37025f00110fSGreg Thelen /* 37035f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 37045f00110fSGreg Thelen */ 3705f3235626SDavid Howells if (ctx->mpol) { 3706bf11b9a8SSebastian Andrzej Siewior mpol = sbinfo->mpol; 3707f3235626SDavid Howells sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 3708f3235626SDavid Howells ctx->mpol = NULL; 37095f00110fSGreg Thelen } 37102c6efe9cSLuis Chamberlain 37112c6efe9cSLuis Chamberlain if (ctx->noswap) 37122c6efe9cSLuis Chamberlain sbinfo->noswap = true; 37132c6efe9cSLuis Chamberlain 3714bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3715bf11b9a8SSebastian Andrzej Siewior mpol_put(mpol); 3716f3235626SDavid Howells return 0; 37170edd73b3SHugh Dickins out: 3718bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3719f35aa2bcSAl Viro return invalfc(fc, "%s", err); 37201da177e4SLinus Torvalds } 3721680d794bSakpm@linux-foundation.org 372234c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3723680d794bSakpm@linux-foundation.org { 372434c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3725680d794bSakpm@linux-foundation.org 3726680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3727680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 372809cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3729680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3730680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 37310825a6f9SJoe Perches if (sbinfo->mode != (0777 | S_ISVTX)) 373209208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 37338751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 37348751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 37358751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 37368751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 37378751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 37388751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3739ea3271f7SChris Down 3740ea3271f7SChris Down /* 3741ea3271f7SChris Down * Showing inode{64,32} might be useful even if it's the system default, 3742ea3271f7SChris Down * since then people don't have to resort to checking both here and 3743ea3271f7SChris Down * /proc/config.gz to confirm 64-bit inums were successfully applied 3744ea3271f7SChris Down * (which may not even exist if IKCONFIG_PROC isn't enabled). 3745ea3271f7SChris Down * 3746ea3271f7SChris Down * We hide it when inode64 isn't the default and we are using 32-bit 3747ea3271f7SChris Down * inodes, since that probably just means the feature isn't even under 3748ea3271f7SChris Down * consideration. 3749ea3271f7SChris Down * 3750ea3271f7SChris Down * As such: 3751ea3271f7SChris Down * 3752ea3271f7SChris Down * +-----------------+-----------------+ 3753ea3271f7SChris Down * | TMPFS_INODE64=y | TMPFS_INODE64=n | 3754ea3271f7SChris Down * +------------------+-----------------+-----------------+ 3755ea3271f7SChris Down * | full_inums=true | show | show | 3756ea3271f7SChris Down * | full_inums=false | show | hide | 3757ea3271f7SChris Down * +------------------+-----------------+-----------------+ 3758ea3271f7SChris Down * 3759ea3271f7SChris Down */ 3760ea3271f7SChris Down if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 3761ea3271f7SChris Down seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 3762396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 37635a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 37645a6e75f8SKirill A. Shutemov if (sbinfo->huge) 37655a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 37665a6e75f8SKirill A. Shutemov #endif 376771fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 37682c6efe9cSLuis Chamberlain if (sbinfo->noswap) 37692c6efe9cSLuis Chamberlain seq_printf(seq, ",noswap"); 3770680d794bSakpm@linux-foundation.org return 0; 3771680d794bSakpm@linux-foundation.org } 37729183df25SDavid Herrmann 3773680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 37741da177e4SLinus Torvalds 37751da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 37761da177e4SLinus Torvalds { 3777602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3778602586a8SHugh Dickins 3779e809d5f0SChris Down free_percpu(sbinfo->ino_batch); 3780602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 378149cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3782602586a8SHugh Dickins kfree(sbinfo); 37831da177e4SLinus Torvalds sb->s_fs_info = NULL; 37841da177e4SLinus Torvalds } 37851da177e4SLinus Torvalds 3786f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 37871da177e4SLinus Torvalds { 3788f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 37891da177e4SLinus Torvalds struct inode *inode; 37900edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3791680d794bSakpm@linux-foundation.org 3792680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3793425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3794680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3795680d794bSakpm@linux-foundation.org if (!sbinfo) 3796680d794bSakpm@linux-foundation.org return -ENOMEM; 3797680d794bSakpm@linux-foundation.org 3798680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 37991da177e4SLinus Torvalds 38000edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 38011da177e4SLinus Torvalds /* 38021da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 38031da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 38041da177e4SLinus Torvalds * but the internal instance is left unlimited. 38051da177e4SLinus Torvalds */ 38061751e8a6SLinus Torvalds if (!(sb->s_flags & SB_KERNMOUNT)) { 3807f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 3808f3235626SDavid Howells ctx->blocks = shmem_default_max_blocks(); 3809f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_INODES)) 3810f3235626SDavid Howells ctx->inodes = shmem_default_max_inodes(); 3811ea3271f7SChris Down if (!(ctx->seen & SHMEM_SEEN_INUMS)) 3812ea3271f7SChris Down ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 38132c6efe9cSLuis Chamberlain sbinfo->noswap = ctx->noswap; 3814ca4e0519SAl Viro } else { 38151751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 38161da177e4SLinus Torvalds } 381791828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 381836f05cabSJeff Layton sb->s_flags |= SB_NOSEC | SB_I_VERSION; 38190edd73b3SHugh Dickins #else 38201751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 38210edd73b3SHugh Dickins #endif 3822f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3823f3235626SDavid Howells sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; 3824e809d5f0SChris Down if (sb->s_flags & SB_KERNMOUNT) { 3825e809d5f0SChris Down sbinfo->ino_batch = alloc_percpu(ino_t); 3826e809d5f0SChris Down if (!sbinfo->ino_batch) 3827e809d5f0SChris Down goto failed; 3828e809d5f0SChris Down } 3829f3235626SDavid Howells sbinfo->uid = ctx->uid; 3830f3235626SDavid Howells sbinfo->gid = ctx->gid; 3831ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 3832f3235626SDavid Howells sbinfo->mode = ctx->mode; 3833f3235626SDavid Howells sbinfo->huge = ctx->huge; 3834f3235626SDavid Howells sbinfo->mpol = ctx->mpol; 3835f3235626SDavid Howells ctx->mpol = NULL; 38361da177e4SLinus Torvalds 3837bf11b9a8SSebastian Andrzej Siewior raw_spin_lock_init(&sbinfo->stat_lock); 3838908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3839602586a8SHugh Dickins goto failed; 3840779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3841779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 38421da177e4SLinus Torvalds 3843285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 384409cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 384509cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 38461da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 38471da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3848cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3849b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 385039f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3851b09e0fa4SEric Paris #endif 3852b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 38531751e8a6SLinus Torvalds sb->s_flags |= SB_POSIXACL; 385439f0247dSAndreas Gruenbacher #endif 38552b4db796SAmir Goldstein uuid_gen(&sb->s_uuid); 38560edd73b3SHugh Dickins 38577a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0, 38587a80e5b8SGiuseppe Scrivano VM_NORESERVE); 38591da177e4SLinus Torvalds if (!inode) 38601da177e4SLinus Torvalds goto failed; 3861680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3862680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3863318ceed0SAl Viro sb->s_root = d_make_root(inode); 3864318ceed0SAl Viro if (!sb->s_root) 386548fde701SAl Viro goto failed; 38661da177e4SLinus Torvalds return 0; 38671da177e4SLinus Torvalds 38681da177e4SLinus Torvalds failed: 38691da177e4SLinus Torvalds shmem_put_super(sb); 3870f2b346e4SMiaohe Lin return -ENOMEM; 38711da177e4SLinus Torvalds } 38721da177e4SLinus Torvalds 3873f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc) 3874f3235626SDavid Howells { 3875f3235626SDavid Howells return get_tree_nodev(fc, shmem_fill_super); 3876f3235626SDavid Howells } 3877f3235626SDavid Howells 3878f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc) 3879f3235626SDavid Howells { 3880f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3881f3235626SDavid Howells 3882f3235626SDavid Howells if (ctx) { 3883f3235626SDavid Howells mpol_put(ctx->mpol); 3884f3235626SDavid Howells kfree(ctx); 3885f3235626SDavid Howells } 3886f3235626SDavid Howells } 3887f3235626SDavid Howells 3888f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = { 3889f3235626SDavid Howells .free = shmem_free_fc, 3890f3235626SDavid Howells .get_tree = shmem_get_tree, 3891f3235626SDavid Howells #ifdef CONFIG_TMPFS 3892f3235626SDavid Howells .parse_monolithic = shmem_parse_options, 3893f3235626SDavid Howells .parse_param = shmem_parse_one, 3894f3235626SDavid Howells .reconfigure = shmem_reconfigure, 3895f3235626SDavid Howells #endif 3896f3235626SDavid Howells }; 3897f3235626SDavid Howells 3898fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 38991da177e4SLinus Torvalds 39001da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 39011da177e4SLinus Torvalds { 390241ffe5d5SHugh Dickins struct shmem_inode_info *info; 3903fd60b288SMuchun Song info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL); 390441ffe5d5SHugh Dickins if (!info) 39051da177e4SLinus Torvalds return NULL; 390641ffe5d5SHugh Dickins return &info->vfs_inode; 39071da177e4SLinus Torvalds } 39081da177e4SLinus Torvalds 390974b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode) 3910fa0d7e3dSNick Piggin { 391184e710daSAl Viro if (S_ISLNK(inode->i_mode)) 39123ed47db3SAl Viro kfree(inode->i_link); 3913fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3914fa0d7e3dSNick Piggin } 3915fa0d7e3dSNick Piggin 39161da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 39171da177e4SLinus Torvalds { 391809208d15SAl Viro if (S_ISREG(inode->i_mode)) 39191da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 39201da177e4SLinus Torvalds } 39211da177e4SLinus Torvalds 392241ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 39231da177e4SLinus Torvalds { 392441ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 392541ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 39261da177e4SLinus Torvalds } 39271da177e4SLinus Torvalds 39289a8ec03eSweiping zhang static void shmem_init_inodecache(void) 39291da177e4SLinus Torvalds { 39301da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 39311da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 39325d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 39331da177e4SLinus Torvalds } 39341da177e4SLinus Torvalds 393541ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 39361da177e4SLinus Torvalds { 39371a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 39381da177e4SLinus Torvalds } 39391da177e4SLinus Torvalds 3940a7605426SYang Shi /* Keep the page in page cache instead of truncating it */ 3941a7605426SYang Shi static int shmem_error_remove_page(struct address_space *mapping, 3942a7605426SYang Shi struct page *page) 3943a7605426SYang Shi { 3944a7605426SYang Shi return 0; 3945a7605426SYang Shi } 3946a7605426SYang Shi 394730e6a51dSHui Su const struct address_space_operations shmem_aops = { 39481da177e4SLinus Torvalds .writepage = shmem_writepage, 394946de8b97SMatthew Wilcox (Oracle) .dirty_folio = noop_dirty_folio, 39501da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3951800d15a5SNick Piggin .write_begin = shmem_write_begin, 3952800d15a5SNick Piggin .write_end = shmem_write_end, 39531da177e4SLinus Torvalds #endif 39541c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 395554184650SMatthew Wilcox (Oracle) .migrate_folio = migrate_folio, 39561c93923cSAndrew Morton #endif 3957a7605426SYang Shi .error_remove_page = shmem_error_remove_page, 39581da177e4SLinus Torvalds }; 395930e6a51dSHui Su EXPORT_SYMBOL(shmem_aops); 39601da177e4SLinus Torvalds 396115ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 39621da177e4SLinus Torvalds .mmap = shmem_mmap, 3963a5454f95SThomas Weißschuh .open = generic_file_open, 3964c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 39651da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3966220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 39672ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 39688174202bSAl Viro .write_iter = generic_file_write_iter, 39691b061d92SChristoph Hellwig .fsync = noop_fsync, 397082c156f8SAl Viro .splice_read = generic_file_splice_read, 3971f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 397283e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 39731da177e4SLinus Torvalds #endif 39741da177e4SLinus Torvalds }; 39751da177e4SLinus Torvalds 397692e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 397744a30220SYu Zhao .getattr = shmem_getattr, 397894c1e62dSHugh Dickins .setattr = shmem_setattr, 3979b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3980b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3981feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3982e408e695STheodore Ts'o .fileattr_get = shmem_fileattr_get, 3983e408e695STheodore Ts'o .fileattr_set = shmem_fileattr_set, 3984b09e0fa4SEric Paris #endif 39851da177e4SLinus Torvalds }; 39861da177e4SLinus Torvalds 398792e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 39881da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3989f7cd16a5SXavier Roche .getattr = shmem_getattr, 39901da177e4SLinus Torvalds .create = shmem_create, 39911da177e4SLinus Torvalds .lookup = simple_lookup, 39921da177e4SLinus Torvalds .link = shmem_link, 39931da177e4SLinus Torvalds .unlink = shmem_unlink, 39941da177e4SLinus Torvalds .symlink = shmem_symlink, 39951da177e4SLinus Torvalds .mkdir = shmem_mkdir, 39961da177e4SLinus Torvalds .rmdir = shmem_rmdir, 39971da177e4SLinus Torvalds .mknod = shmem_mknod, 39982773bf00SMiklos Szeredi .rename = shmem_rename2, 399960545d0dSAl Viro .tmpfile = shmem_tmpfile, 40001da177e4SLinus Torvalds #endif 4001b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 4002b09e0fa4SEric Paris .listxattr = shmem_listxattr, 4003e408e695STheodore Ts'o .fileattr_get = shmem_fileattr_get, 4004e408e695STheodore Ts'o .fileattr_set = shmem_fileattr_set, 4005b09e0fa4SEric Paris #endif 400639f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 400794c1e62dSHugh Dickins .setattr = shmem_setattr, 4008feda821eSChristoph Hellwig .set_acl = simple_set_acl, 400939f0247dSAndreas Gruenbacher #endif 401039f0247dSAndreas Gruenbacher }; 401139f0247dSAndreas Gruenbacher 401292e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 4013f7cd16a5SXavier Roche .getattr = shmem_getattr, 4014b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 4015b09e0fa4SEric Paris .listxattr = shmem_listxattr, 4016b09e0fa4SEric Paris #endif 401739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 401894c1e62dSHugh Dickins .setattr = shmem_setattr, 4019feda821eSChristoph Hellwig .set_acl = simple_set_acl, 402039f0247dSAndreas Gruenbacher #endif 40211da177e4SLinus Torvalds }; 40221da177e4SLinus Torvalds 4023759b9775SHugh Dickins static const struct super_operations shmem_ops = { 40241da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 402574b1da56SAl Viro .free_inode = shmem_free_in_core_inode, 40261da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 40271da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 40281da177e4SLinus Torvalds .statfs = shmem_statfs, 4029680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 40301da177e4SLinus Torvalds #endif 40311f895f75SAl Viro .evict_inode = shmem_evict_inode, 40321da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 40331da177e4SLinus Torvalds .put_super = shmem_put_super, 4034396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4035779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 4036779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 4037779750d2SKirill A. Shutemov #endif 40381da177e4SLinus Torvalds }; 40391da177e4SLinus Torvalds 4040f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 404154cb8821SNick Piggin .fault = shmem_fault, 4042d7c17551SNing Qu .map_pages = filemap_map_pages, 40431da177e4SLinus Torvalds #ifdef CONFIG_NUMA 40441da177e4SLinus Torvalds .set_policy = shmem_set_policy, 40451da177e4SLinus Torvalds .get_policy = shmem_get_policy, 40461da177e4SLinus Torvalds #endif 40471da177e4SLinus Torvalds }; 40481da177e4SLinus Torvalds 4049d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops = { 4050d09e8ca6SPasha Tatashin .fault = shmem_fault, 4051d09e8ca6SPasha Tatashin .map_pages = filemap_map_pages, 4052d09e8ca6SPasha Tatashin #ifdef CONFIG_NUMA 4053d09e8ca6SPasha Tatashin .set_policy = shmem_set_policy, 4054d09e8ca6SPasha Tatashin .get_policy = shmem_get_policy, 4055d09e8ca6SPasha Tatashin #endif 4056d09e8ca6SPasha Tatashin }; 4057d09e8ca6SPasha Tatashin 4058f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc) 40591da177e4SLinus Torvalds { 4060f3235626SDavid Howells struct shmem_options *ctx; 4061f3235626SDavid Howells 4062f3235626SDavid Howells ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 4063f3235626SDavid Howells if (!ctx) 4064f3235626SDavid Howells return -ENOMEM; 4065f3235626SDavid Howells 4066f3235626SDavid Howells ctx->mode = 0777 | S_ISVTX; 4067f3235626SDavid Howells ctx->uid = current_fsuid(); 4068f3235626SDavid Howells ctx->gid = current_fsgid(); 4069f3235626SDavid Howells 4070f3235626SDavid Howells fc->fs_private = ctx; 4071f3235626SDavid Howells fc->ops = &shmem_fs_context_ops; 4072f3235626SDavid Howells return 0; 40731da177e4SLinus Torvalds } 40741da177e4SLinus Torvalds 407541ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 40761da177e4SLinus Torvalds .owner = THIS_MODULE, 40771da177e4SLinus Torvalds .name = "tmpfs", 4078f3235626SDavid Howells .init_fs_context = shmem_init_fs_context, 4079f3235626SDavid Howells #ifdef CONFIG_TMPFS 4080d7167b14SAl Viro .parameters = shmem_fs_parameters, 4081f3235626SDavid Howells #endif 40821da177e4SLinus Torvalds .kill_sb = kill_litter_super, 40837a80e5b8SGiuseppe Scrivano #ifdef CONFIG_SHMEM 40847a80e5b8SGiuseppe Scrivano .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP, 40857a80e5b8SGiuseppe Scrivano #else 4086ff36da69SMatthew Wilcox (Oracle) .fs_flags = FS_USERNS_MOUNT, 40877a80e5b8SGiuseppe Scrivano #endif 40881da177e4SLinus Torvalds }; 40891da177e4SLinus Torvalds 40909096bbe9SMiaohe Lin void __init shmem_init(void) 40911da177e4SLinus Torvalds { 40921da177e4SLinus Torvalds int error; 40931da177e4SLinus Torvalds 40949a8ec03eSweiping zhang shmem_init_inodecache(); 40951da177e4SLinus Torvalds 409641ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 40971da177e4SLinus Torvalds if (error) { 40981170532bSJoe Perches pr_err("Could not register tmpfs\n"); 40991da177e4SLinus Torvalds goto out2; 41001da177e4SLinus Torvalds } 410195dc112aSGreg Kroah-Hartman 4102ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 41031da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 41041da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 41051170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 41061da177e4SLinus Torvalds goto out1; 41071da177e4SLinus Torvalds } 41085a6e75f8SKirill A. Shutemov 4109396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4110435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 41115a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 41125a6e75f8SKirill A. Shutemov else 41135e6e5a12SHugh Dickins shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ 41145a6e75f8SKirill A. Shutemov #endif 41159096bbe9SMiaohe Lin return; 41161da177e4SLinus Torvalds 41171da177e4SLinus Torvalds out1: 411841ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 41191da177e4SLinus Torvalds out2: 412041ffe5d5SHugh Dickins shmem_destroy_inodecache(); 41211da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 41221da177e4SLinus Torvalds } 4123853ac43aSMatt Mackall 4124396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 41255a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 41265a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 41275a6e75f8SKirill A. Shutemov { 412826083eb6SColin Ian King static const int values[] = { 41295a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 41305a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 41315a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 41325a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 41335a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 41345a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 41355a6e75f8SKirill A. Shutemov }; 413679d4d38aSJoe Perches int len = 0; 413779d4d38aSJoe Perches int i; 41385a6e75f8SKirill A. Shutemov 413979d4d38aSJoe Perches for (i = 0; i < ARRAY_SIZE(values); i++) { 414079d4d38aSJoe Perches len += sysfs_emit_at(buf, len, 414179d4d38aSJoe Perches shmem_huge == values[i] ? "%s[%s]" : "%s%s", 414279d4d38aSJoe Perches i ? " " : "", 41435a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 41445a6e75f8SKirill A. Shutemov } 414579d4d38aSJoe Perches 414679d4d38aSJoe Perches len += sysfs_emit_at(buf, len, "\n"); 414779d4d38aSJoe Perches 414879d4d38aSJoe Perches return len; 41495a6e75f8SKirill A. Shutemov } 41505a6e75f8SKirill A. Shutemov 41515a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 41525a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 41535a6e75f8SKirill A. Shutemov { 41545a6e75f8SKirill A. Shutemov char tmp[16]; 41555a6e75f8SKirill A. Shutemov int huge; 41565a6e75f8SKirill A. Shutemov 41575a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 41585a6e75f8SKirill A. Shutemov return -EINVAL; 41595a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 41605a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 41615a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 41625a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 41635a6e75f8SKirill A. Shutemov 41645a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 41655a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 41665a6e75f8SKirill A. Shutemov return -EINVAL; 41675a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 41685a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 41695a6e75f8SKirill A. Shutemov return -EINVAL; 41705a6e75f8SKirill A. Shutemov 41715a6e75f8SKirill A. Shutemov shmem_huge = huge; 4172435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY) 41735a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 41745a6e75f8SKirill A. Shutemov return count; 41755a6e75f8SKirill A. Shutemov } 41765a6e75f8SKirill A. Shutemov 41774bfa8adaSMiaohe Lin struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); 4178396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 4179f3f0e1d2SKirill A. Shutemov 4180853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 4181853ac43aSMatt Mackall 4182853ac43aSMatt Mackall /* 4183853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4184853ac43aSMatt Mackall * 4185853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 4186853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 4187853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 4188853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 4189853ac43aSMatt Mackall */ 4190853ac43aSMatt Mackall 419141ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 4192853ac43aSMatt Mackall .name = "tmpfs", 4193f3235626SDavid Howells .init_fs_context = ramfs_init_fs_context, 4194d7167b14SAl Viro .parameters = ramfs_fs_parameters, 4195853ac43aSMatt Mackall .kill_sb = kill_litter_super, 41962b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 4197853ac43aSMatt Mackall }; 4198853ac43aSMatt Mackall 41999096bbe9SMiaohe Lin void __init shmem_init(void) 4200853ac43aSMatt Mackall { 420141ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4202853ac43aSMatt Mackall 420341ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 4204853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 4205853ac43aSMatt Mackall } 4206853ac43aSMatt Mackall 420710a9c496SChristoph Hellwig int shmem_unuse(unsigned int type) 4208853ac43aSMatt Mackall { 4209853ac43aSMatt Mackall return 0; 4210853ac43aSMatt Mackall } 4211853ac43aSMatt Mackall 4212d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 42133f96b79aSHugh Dickins { 42143f96b79aSHugh Dickins return 0; 42153f96b79aSHugh Dickins } 42163f96b79aSHugh Dickins 421724513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 421824513264SHugh Dickins { 421924513264SHugh Dickins } 422024513264SHugh Dickins 4221c01d5b30SHugh Dickins #ifdef CONFIG_MMU 4222c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 4223c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 4224c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 4225c01d5b30SHugh Dickins { 4226c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4227c01d5b30SHugh Dickins } 4228c01d5b30SHugh Dickins #endif 4229c01d5b30SHugh Dickins 423041ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 423194c1e62dSHugh Dickins { 423241ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 423394c1e62dSHugh Dickins } 423494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 423594c1e62dSHugh Dickins 4236853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 4237d09e8ca6SPasha Tatashin #define shmem_anon_vm_ops generic_file_vm_ops 42380b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 42397a80e5b8SGiuseppe Scrivano #define shmem_get_inode(idmap, sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 42400b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 42410b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 4242853ac43aSMatt Mackall 4243853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 4244853ac43aSMatt Mackall 4245853ac43aSMatt Mackall /* common code */ 42461da177e4SLinus Torvalds 4247703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 4248c7277090SEric Paris unsigned long flags, unsigned int i_flags) 42491da177e4SLinus Torvalds { 42501da177e4SLinus Torvalds struct inode *inode; 425193dec2daSAl Viro struct file *res; 42521da177e4SLinus Torvalds 4253703321b6SMatthew Auld if (IS_ERR(mnt)) 4254703321b6SMatthew Auld return ERR_CAST(mnt); 42551da177e4SLinus Torvalds 4256285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 42571da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 42581da177e4SLinus Torvalds 42591da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 42601da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 42611da177e4SLinus Torvalds 42627a80e5b8SGiuseppe Scrivano if (is_idmapped_mnt(mnt)) 42637a80e5b8SGiuseppe Scrivano return ERR_PTR(-EINVAL); 42647a80e5b8SGiuseppe Scrivano 42657a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, 42667a80e5b8SGiuseppe Scrivano S_IFREG | S_IRWXUGO, 0, flags); 4267dac2d1f6SAl Viro if (unlikely(!inode)) { 4268dac2d1f6SAl Viro shmem_unacct_size(flags, size); 4269dac2d1f6SAl Viro return ERR_PTR(-ENOSPC); 4270dac2d1f6SAl Viro } 4271c7277090SEric Paris inode->i_flags |= i_flags; 42721da177e4SLinus Torvalds inode->i_size = size; 42736d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 427426567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 427593dec2daSAl Viro if (!IS_ERR(res)) 427693dec2daSAl Viro res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 42774b42af81SAl Viro &shmem_file_operations); 42786b4d0b27SAl Viro if (IS_ERR(res)) 427993dec2daSAl Viro iput(inode); 42806b4d0b27SAl Viro return res; 42811da177e4SLinus Torvalds } 4282c7277090SEric Paris 4283c7277090SEric Paris /** 4284c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4285c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4286c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4287e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4288e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4289c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4290c7277090SEric Paris * @size: size to be set for the file 4291c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4292c7277090SEric Paris */ 4293c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4294c7277090SEric Paris { 4295703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 4296c7277090SEric Paris } 4297c7277090SEric Paris 4298c7277090SEric Paris /** 4299c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4300c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4301c7277090SEric Paris * @size: size to be set for the file 4302c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4303c7277090SEric Paris */ 4304c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4305c7277090SEric Paris { 4306703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, 0); 4307c7277090SEric Paris } 4308395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 43091da177e4SLinus Torvalds 431046711810SRandy Dunlap /** 4311703321b6SMatthew Auld * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 4312703321b6SMatthew Auld * @mnt: the tmpfs mount where the file will be created 4313703321b6SMatthew Auld * @name: name for dentry (to be seen in /proc/<pid>/maps 4314703321b6SMatthew Auld * @size: size to be set for the file 4315703321b6SMatthew Auld * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4316703321b6SMatthew Auld */ 4317703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 4318703321b6SMatthew Auld loff_t size, unsigned long flags) 4319703321b6SMatthew Auld { 4320703321b6SMatthew Auld return __shmem_file_setup(mnt, name, size, flags, 0); 4321703321b6SMatthew Auld } 4322703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4323703321b6SMatthew Auld 4324703321b6SMatthew Auld /** 43251da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 432645e55300SPeter Collingbourne * @vma: the vma to be mmapped is prepared by do_mmap 43271da177e4SLinus Torvalds */ 43281da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 43291da177e4SLinus Torvalds { 43301da177e4SLinus Torvalds struct file *file; 43311da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 43321da177e4SLinus Torvalds 433366fc1303SHugh Dickins /* 4334c1e8d7c6SMichel Lespinasse * Cloning a new file under mmap_lock leads to a lock ordering conflict 433566fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 433666fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 433766fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 433866fc1303SHugh Dickins */ 4339703321b6SMatthew Auld file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 43401da177e4SLinus Torvalds if (IS_ERR(file)) 43411da177e4SLinus Torvalds return PTR_ERR(file); 43421da177e4SLinus Torvalds 43431da177e4SLinus Torvalds if (vma->vm_file) 43441da177e4SLinus Torvalds fput(vma->vm_file); 43451da177e4SLinus Torvalds vma->vm_file = file; 4346d09e8ca6SPasha Tatashin vma->vm_ops = &shmem_anon_vm_ops; 4347f3f0e1d2SKirill A. Shutemov 43481da177e4SLinus Torvalds return 0; 43491da177e4SLinus Torvalds } 4350d9d90e5eSHugh Dickins 4351d9d90e5eSHugh Dickins /** 4352f01b2b3eSMatthew Wilcox (Oracle) * shmem_read_folio_gfp - read into page cache, using specified page allocation flags. 4353f01b2b3eSMatthew Wilcox (Oracle) * @mapping: the folio's address_space 4354f01b2b3eSMatthew Wilcox (Oracle) * @index: the folio index 4355d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4356d9d90e5eSHugh Dickins * 4357d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4358d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 43597e0a1265SMatthew Wilcox (Oracle) * But read_cache_page_gfp() uses the ->read_folio() method: which does not 4360d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4361d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4362d9d90e5eSHugh Dickins * 436368da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 436468da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4365d9d90e5eSHugh Dickins */ 4366f01b2b3eSMatthew Wilcox (Oracle) struct folio *shmem_read_folio_gfp(struct address_space *mapping, 4367d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4368d9d90e5eSHugh Dickins { 436968da9f05SHugh Dickins #ifdef CONFIG_SHMEM 437068da9f05SHugh Dickins struct inode *inode = mapping->host; 4371a3a9c397SMatthew Wilcox (Oracle) struct folio *folio; 437268da9f05SHugh Dickins int error; 437368da9f05SHugh Dickins 437430e6a51dSHui Su BUG_ON(!shmem_mapping(mapping)); 4375a3a9c397SMatthew Wilcox (Oracle) error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, 4376cfda0526SMike Rapoport gfp, NULL, NULL, NULL); 437768da9f05SHugh Dickins if (error) 4378a7605426SYang Shi return ERR_PTR(error); 4379a7605426SYang Shi 4380a3a9c397SMatthew Wilcox (Oracle) folio_unlock(folio); 4381f01b2b3eSMatthew Wilcox (Oracle) return folio; 4382f01b2b3eSMatthew Wilcox (Oracle) #else 4383f01b2b3eSMatthew Wilcox (Oracle) /* 4384f01b2b3eSMatthew Wilcox (Oracle) * The tiny !SHMEM case uses ramfs without swap 4385f01b2b3eSMatthew Wilcox (Oracle) */ 4386f01b2b3eSMatthew Wilcox (Oracle) return mapping_read_folio_gfp(mapping, index, gfp); 4387f01b2b3eSMatthew Wilcox (Oracle) #endif 4388f01b2b3eSMatthew Wilcox (Oracle) } 4389f01b2b3eSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); 4390f01b2b3eSMatthew Wilcox (Oracle) 4391f01b2b3eSMatthew Wilcox (Oracle) struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4392f01b2b3eSMatthew Wilcox (Oracle) pgoff_t index, gfp_t gfp) 4393f01b2b3eSMatthew Wilcox (Oracle) { 4394f01b2b3eSMatthew Wilcox (Oracle) struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp); 4395f01b2b3eSMatthew Wilcox (Oracle) struct page *page; 4396f01b2b3eSMatthew Wilcox (Oracle) 4397f01b2b3eSMatthew Wilcox (Oracle) if (IS_ERR(folio)) 4398f01b2b3eSMatthew Wilcox (Oracle) return &folio->page; 4399f01b2b3eSMatthew Wilcox (Oracle) 4400a3a9c397SMatthew Wilcox (Oracle) page = folio_file_page(folio, index); 4401a7605426SYang Shi if (PageHWPoison(page)) { 4402a3a9c397SMatthew Wilcox (Oracle) folio_put(folio); 4403a7605426SYang Shi return ERR_PTR(-EIO); 4404a7605426SYang Shi } 4405a7605426SYang Shi 440668da9f05SHugh Dickins return page; 4407d9d90e5eSHugh Dickins } 4408d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4409