11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31e408e695STheodore Ts'o #include <linux/fileattr.h> 32853ac43aSMatt Mackall #include <linux/mm.h> 3346c9a946SArnd Bergmann #include <linux/random.h> 34174cd4b1SIngo Molnar #include <linux/sched/signal.h> 35b95f1b31SPaul Gortmaker #include <linux/export.h> 365ff2121aSMatthew Wilcox (Oracle) #include <linux/shmem_fs.h> 37853ac43aSMatt Mackall #include <linux/swap.h> 38e2e40f2cSChristoph Hellwig #include <linux/uio.h> 39749df87bSMike Kravetz #include <linux/hugetlb.h> 40626c3920SAl Viro #include <linux/fs_parser.h> 4186a2f3f2SMiaohe Lin #include <linux/swapfile.h> 4236f05cabSJeff Layton #include <linux/iversion.h> 43014bb1deSNeilBrown #include "swap.h" 4495cc09d6SAndrea Arcangeli 45853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 46853ac43aSMatt Mackall 47853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 481da177e4SLinus Torvalds /* 491da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 501da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 511da177e4SLinus Torvalds * which makes it a completely usable filesystem. 521da177e4SLinus Torvalds */ 531da177e4SLinus Torvalds 5439f0247dSAndreas Gruenbacher #include <linux/xattr.h> 55a5694255SChristoph Hellwig #include <linux/exportfs.h> 561c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 57feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 581da177e4SLinus Torvalds #include <linux/mman.h> 591da177e4SLinus Torvalds #include <linux/string.h> 601da177e4SLinus Torvalds #include <linux/slab.h> 611da177e4SLinus Torvalds #include <linux/backing-dev.h> 621da177e4SLinus Torvalds #include <linux/writeback.h> 63bda97eabSHugh Dickins #include <linux/pagevec.h> 6441ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 6583e4fa9cSHugh Dickins #include <linux/falloc.h> 66708e3508SHugh Dickins #include <linux/splice.h> 671da177e4SLinus Torvalds #include <linux/security.h> 681da177e4SLinus Torvalds #include <linux/swapops.h> 691da177e4SLinus Torvalds #include <linux/mempolicy.h> 701da177e4SLinus Torvalds #include <linux/namei.h> 71b00dc3adSHugh Dickins #include <linux/ctype.h> 72304dbdb7SLee Schermerhorn #include <linux/migrate.h> 73c1f60a5aSChristoph Lameter #include <linux/highmem.h> 74680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 7592562927SMimi Zohar #include <linux/magic.h> 769183df25SDavid Herrmann #include <linux/syscalls.h> 7740e041a2SDavid Herrmann #include <linux/fcntl.h> 789183df25SDavid Herrmann #include <uapi/linux/memfd.h> 794c27fe4cSMike Rapoport #include <linux/rmap.h> 802b4db796SAmir Goldstein #include <linux/uuid.h> 81e09764cfSCarlos Maiolino #include <linux/quotaops.h> 82304dbdb7SLee Schermerhorn 837c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 841da177e4SLinus Torvalds 85dd56b046SMel Gorman #include "internal.h" 86dd56b046SMel Gorman 8709cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8809cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 911da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 921da177e4SLinus Torvalds 93e07c469eSHugh Dickins /* Pretend that one inode + its dentry occupy this much memory */ 94e07c469eSHugh Dickins #define BOGO_INODE_SIZE 1024 95e07c469eSHugh Dickins 9669f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 9769f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 9869f07ec9SHugh Dickins 991aac1400SHugh Dickins /* 100f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 1019608703eSJan Kara * inode->i_private (with i_rwsem making sure that it has only one user at 102f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 1031aac1400SHugh Dickins */ 1041aac1400SHugh Dickins struct shmem_falloc { 1058e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 1061aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 1071aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 1081aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 1091aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1101aac1400SHugh Dickins }; 1111aac1400SHugh Dickins 1120b5071ddSAl Viro struct shmem_options { 1130b5071ddSAl Viro unsigned long long blocks; 1140b5071ddSAl Viro unsigned long long inodes; 1150b5071ddSAl Viro struct mempolicy *mpol; 1160b5071ddSAl Viro kuid_t uid; 1170b5071ddSAl Viro kgid_t gid; 1180b5071ddSAl Viro umode_t mode; 119ea3271f7SChris Down bool full_inums; 1200b5071ddSAl Viro int huge; 1210b5071ddSAl Viro int seen; 1222c6efe9cSLuis Chamberlain bool noswap; 123e09764cfSCarlos Maiolino unsigned short quota_types; 124de4c0e7cSLukas Czerner struct shmem_quota_limits qlimits; 1250b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1 1260b5071ddSAl Viro #define SHMEM_SEEN_INODES 2 1270b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4 128ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8 1292c6efe9cSLuis Chamberlain #define SHMEM_SEEN_NOSWAP 16 130e09764cfSCarlos Maiolino #define SHMEM_SEEN_QUOTA 32 1310b5071ddSAl Viro }; 1320b5071ddSAl Viro 133b76db735SAndrew Morton #ifdef CONFIG_TMPFS 134680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 135680d794bSakpm@linux-foundation.org { 136ca79b0c2SArun KS return totalram_pages() / 2; 137680d794bSakpm@linux-foundation.org } 138680d794bSakpm@linux-foundation.org 139680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 140680d794bSakpm@linux-foundation.org { 141ca79b0c2SArun KS unsigned long nr_pages = totalram_pages(); 142ca79b0c2SArun KS 143e07c469eSHugh Dickins return min3(nr_pages - totalhigh_pages(), nr_pages / 2, 144e07c469eSHugh Dickins ULONG_MAX / BOGO_INODE_SIZE); 145680d794bSakpm@linux-foundation.org } 146b76db735SAndrew Morton #endif 147680d794bSakpm@linux-foundation.org 148da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 149da08e9b7SMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, 150c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 151c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type); 1521da177e4SLinus Torvalds 1531da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1541da177e4SLinus Torvalds { 1551da177e4SLinus Torvalds return sb->s_fs_info; 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds 1581da177e4SLinus Torvalds /* 1591da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1601da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1611da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1621da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1631da177e4SLinus Torvalds */ 1641da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1651da177e4SLinus Torvalds { 1660b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 167191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1681da177e4SLinus Torvalds } 1691da177e4SLinus Torvalds 1701da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1711da177e4SLinus Torvalds { 1720b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1731da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1741da177e4SLinus Torvalds } 1751da177e4SLinus Torvalds 17677142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 17777142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 17877142517SKonstantin Khlebnikov { 17977142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 18077142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 18177142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 18277142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 18377142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 18477142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 18577142517SKonstantin Khlebnikov } 18677142517SKonstantin Khlebnikov return 0; 18777142517SKonstantin Khlebnikov } 18877142517SKonstantin Khlebnikov 1891da177e4SLinus Torvalds /* 1901da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 19175edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 192923e2f0eSMatthew Wilcox (Oracle) * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1931da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1941da177e4SLinus Torvalds */ 195800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 1961da177e4SLinus Torvalds { 197800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 198800d8c63SKirill A. Shutemov return 0; 199800d8c63SKirill A. Shutemov 200800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 201800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 2021da177e4SLinus Torvalds } 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 2051da177e4SLinus Torvalds { 2060b0a0806SHugh Dickins if (flags & VM_NORESERVE) 20709cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 2081da177e4SLinus Torvalds } 2091da177e4SLinus Torvalds 2103c1b7528SHugh Dickins static int shmem_inode_acct_block(struct inode *inode, long pages) 2110f079694SMike Rapoport { 2120f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2130f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 214c7e263abSLukas Czerner int err = -ENOSPC; 2150f079694SMike Rapoport 2160f079694SMike Rapoport if (shmem_acct_block(info->flags, pages)) 217c7e263abSLukas Czerner return err; 2180f079694SMike Rapoport 2193c1b7528SHugh Dickins might_sleep(); /* when quotas */ 2200f079694SMike Rapoport if (sbinfo->max_blocks) { 2210f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2220f079694SMike Rapoport sbinfo->max_blocks - pages) > 0) 2230f079694SMike Rapoport goto unacct; 224e09764cfSCarlos Maiolino 225e09764cfSCarlos Maiolino err = dquot_alloc_block_nodirty(inode, pages); 226e09764cfSCarlos Maiolino if (err) 227e09764cfSCarlos Maiolino goto unacct; 228e09764cfSCarlos Maiolino 2290f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages); 230e09764cfSCarlos Maiolino } else { 231e09764cfSCarlos Maiolino err = dquot_alloc_block_nodirty(inode, pages); 232e09764cfSCarlos Maiolino if (err) 233e09764cfSCarlos Maiolino goto unacct; 2340f079694SMike Rapoport } 2350f079694SMike Rapoport 236c7e263abSLukas Czerner return 0; 2370f079694SMike Rapoport 2380f079694SMike Rapoport unacct: 2390f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 240c7e263abSLukas Czerner return err; 2410f079694SMike Rapoport } 2420f079694SMike Rapoport 2433c1b7528SHugh Dickins static void shmem_inode_unacct_blocks(struct inode *inode, long pages) 2440f079694SMike Rapoport { 2450f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2460f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2470f079694SMike Rapoport 2483c1b7528SHugh Dickins might_sleep(); /* when quotas */ 249e09764cfSCarlos Maiolino dquot_free_block_nodirty(inode, pages); 250e09764cfSCarlos Maiolino 2510f079694SMike Rapoport if (sbinfo->max_blocks) 2520f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages); 2530f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2540f079694SMike Rapoport } 2550f079694SMike Rapoport 256759b9775SHugh Dickins static const struct super_operations shmem_ops; 25730e6a51dSHui Su const struct address_space_operations shmem_aops; 25815ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 25992e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 26092e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 26192e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 262f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 263d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops; 264779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 2651da177e4SLinus Torvalds 266d09e8ca6SPasha Tatashin bool vma_is_anon_shmem(struct vm_area_struct *vma) 267d09e8ca6SPasha Tatashin { 268d09e8ca6SPasha Tatashin return vma->vm_ops == &shmem_anon_vm_ops; 269d09e8ca6SPasha Tatashin } 270d09e8ca6SPasha Tatashin 271b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma) 272b0506e48SMike Rapoport { 273d09e8ca6SPasha Tatashin return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; 274b0506e48SMike Rapoport } 275b0506e48SMike Rapoport 2761da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 277cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 2781da177e4SLinus Torvalds 279e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 280e09764cfSCarlos Maiolino 281e09764cfSCarlos Maiolino static int shmem_enable_quotas(struct super_block *sb, 282e09764cfSCarlos Maiolino unsigned short quota_types) 283e09764cfSCarlos Maiolino { 284e09764cfSCarlos Maiolino int type, err = 0; 285e09764cfSCarlos Maiolino 286e09764cfSCarlos Maiolino sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 287e09764cfSCarlos Maiolino for (type = 0; type < SHMEM_MAXQUOTAS; type++) { 288e09764cfSCarlos Maiolino if (!(quota_types & (1 << type))) 289e09764cfSCarlos Maiolino continue; 290e09764cfSCarlos Maiolino err = dquot_load_quota_sb(sb, type, QFMT_SHMEM, 291e09764cfSCarlos Maiolino DQUOT_USAGE_ENABLED | 292e09764cfSCarlos Maiolino DQUOT_LIMITS_ENABLED); 293e09764cfSCarlos Maiolino if (err) 294e09764cfSCarlos Maiolino goto out_err; 295e09764cfSCarlos Maiolino } 296e09764cfSCarlos Maiolino return 0; 297e09764cfSCarlos Maiolino 298e09764cfSCarlos Maiolino out_err: 299e09764cfSCarlos Maiolino pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n", 300e09764cfSCarlos Maiolino type, err); 301e09764cfSCarlos Maiolino for (type--; type >= 0; type--) 302e09764cfSCarlos Maiolino dquot_quota_off(sb, type); 303e09764cfSCarlos Maiolino return err; 304e09764cfSCarlos Maiolino } 305e09764cfSCarlos Maiolino 306e09764cfSCarlos Maiolino static void shmem_disable_quotas(struct super_block *sb) 307e09764cfSCarlos Maiolino { 308e09764cfSCarlos Maiolino int type; 309e09764cfSCarlos Maiolino 310e09764cfSCarlos Maiolino for (type = 0; type < SHMEM_MAXQUOTAS; type++) 311e09764cfSCarlos Maiolino dquot_quota_off(sb, type); 312e09764cfSCarlos Maiolino } 313e09764cfSCarlos Maiolino 314e09764cfSCarlos Maiolino static struct dquot **shmem_get_dquots(struct inode *inode) 315e09764cfSCarlos Maiolino { 316e09764cfSCarlos Maiolino return SHMEM_I(inode)->i_dquot; 317e09764cfSCarlos Maiolino } 318e09764cfSCarlos Maiolino #endif /* CONFIG_TMPFS_QUOTA */ 319e09764cfSCarlos Maiolino 320e809d5f0SChris Down /* 321e809d5f0SChris Down * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 322e809d5f0SChris Down * produces a novel ino for the newly allocated inode. 323e809d5f0SChris Down * 324e809d5f0SChris Down * It may also be called when making a hard link to permit the space needed by 325e809d5f0SChris Down * each dentry. However, in that case, no new inode number is needed since that 326e809d5f0SChris Down * internally draws from another pool of inode numbers (currently global 327e809d5f0SChris Down * get_next_ino()). This case is indicated by passing NULL as inop. 328e809d5f0SChris Down */ 329e809d5f0SChris Down #define SHMEM_INO_BATCH 1024 330e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 3315b04c689SPavel Emelyanov { 3325b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 333e809d5f0SChris Down ino_t ino; 334e809d5f0SChris Down 335e809d5f0SChris Down if (!(sb->s_flags & SB_KERNMOUNT)) { 336bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 337bb3e96d6SByron Stanoszek if (sbinfo->max_inodes) { 338e07c469eSHugh Dickins if (sbinfo->free_ispace < BOGO_INODE_SIZE) { 339bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 3405b04c689SPavel Emelyanov return -ENOSPC; 3415b04c689SPavel Emelyanov } 342e07c469eSHugh Dickins sbinfo->free_ispace -= BOGO_INODE_SIZE; 343bb3e96d6SByron Stanoszek } 344e809d5f0SChris Down if (inop) { 345e809d5f0SChris Down ino = sbinfo->next_ino++; 346e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 347e809d5f0SChris Down ino = sbinfo->next_ino++; 348ea3271f7SChris Down if (unlikely(!sbinfo->full_inums && 349ea3271f7SChris Down ino > UINT_MAX)) { 350e809d5f0SChris Down /* 351e809d5f0SChris Down * Emulate get_next_ino uint wraparound for 352e809d5f0SChris Down * compatibility 353e809d5f0SChris Down */ 354ea3271f7SChris Down if (IS_ENABLED(CONFIG_64BIT)) 355ea3271f7SChris Down pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 356ea3271f7SChris Down __func__, MINOR(sb->s_dev)); 357ea3271f7SChris Down sbinfo->next_ino = 1; 358ea3271f7SChris Down ino = sbinfo->next_ino++; 3595b04c689SPavel Emelyanov } 360e809d5f0SChris Down *inop = ino; 361e809d5f0SChris Down } 362bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 363e809d5f0SChris Down } else if (inop) { 364e809d5f0SChris Down /* 365e809d5f0SChris Down * __shmem_file_setup, one of our callers, is lock-free: it 366e809d5f0SChris Down * doesn't hold stat_lock in shmem_reserve_inode since 367e809d5f0SChris Down * max_inodes is always 0, and is called from potentially 368e809d5f0SChris Down * unknown contexts. As such, use a per-cpu batched allocator 369e809d5f0SChris Down * which doesn't require the per-sb stat_lock unless we are at 370e809d5f0SChris Down * the batch boundary. 371ea3271f7SChris Down * 372ea3271f7SChris Down * We don't need to worry about inode{32,64} since SB_KERNMOUNT 373ea3271f7SChris Down * shmem mounts are not exposed to userspace, so we don't need 374ea3271f7SChris Down * to worry about things like glibc compatibility. 375e809d5f0SChris Down */ 376e809d5f0SChris Down ino_t *next_ino; 377bf11b9a8SSebastian Andrzej Siewior 378e809d5f0SChris Down next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 379e809d5f0SChris Down ino = *next_ino; 380e809d5f0SChris Down if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 381bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 382e809d5f0SChris Down ino = sbinfo->next_ino; 383e809d5f0SChris Down sbinfo->next_ino += SHMEM_INO_BATCH; 384bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 385e809d5f0SChris Down if (unlikely(is_zero_ino(ino))) 386e809d5f0SChris Down ino++; 387e809d5f0SChris Down } 388e809d5f0SChris Down *inop = ino; 389e809d5f0SChris Down *next_ino = ++ino; 390e809d5f0SChris Down put_cpu(); 391e809d5f0SChris Down } 392e809d5f0SChris Down 3935b04c689SPavel Emelyanov return 0; 3945b04c689SPavel Emelyanov } 3955b04c689SPavel Emelyanov 3962daf18a7SHugh Dickins static void shmem_free_inode(struct super_block *sb, size_t freed_ispace) 3975b04c689SPavel Emelyanov { 3985b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3995b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 400bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 4012daf18a7SHugh Dickins sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace; 402bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 4035b04c689SPavel Emelyanov } 4045b04c689SPavel Emelyanov } 4055b04c689SPavel Emelyanov 40646711810SRandy Dunlap /** 40741ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 4081da177e4SLinus Torvalds * @inode: inode to recalc 4093c1b7528SHugh Dickins * @alloced: the change in number of pages allocated to inode 4103c1b7528SHugh Dickins * @swapped: the change in number of pages swapped from inode 4111da177e4SLinus Torvalds * 4121da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 4131da177e4SLinus Torvalds * undirtied hole pages behind our back. 4141da177e4SLinus Torvalds * 4151da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 4161da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 4171da177e4SLinus Torvalds */ 4183c1b7528SHugh Dickins static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) 4191da177e4SLinus Torvalds { 4201da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 4211da177e4SLinus Torvalds long freed; 4221da177e4SLinus Torvalds 4233c1b7528SHugh Dickins spin_lock(&info->lock); 4243c1b7528SHugh Dickins info->alloced += alloced; 4253c1b7528SHugh Dickins info->swapped += swapped; 4263c1b7528SHugh Dickins freed = info->alloced - info->swapped - 4273c1b7528SHugh Dickins READ_ONCE(inode->i_mapping->nrpages); 4283c1b7528SHugh Dickins /* 4293c1b7528SHugh Dickins * Special case: whereas normally shmem_recalc_inode() is called 4303c1b7528SHugh Dickins * after i_mapping->nrpages has already been adjusted (up or down), 4313c1b7528SHugh Dickins * shmem_writepage() has to raise swapped before nrpages is lowered - 4323c1b7528SHugh Dickins * to stop a racing shmem_recalc_inode() from thinking that a page has 4333c1b7528SHugh Dickins * been freed. Compensate here, to avoid the need for a followup call. 4343c1b7528SHugh Dickins */ 4353c1b7528SHugh Dickins if (swapped > 0) 4363c1b7528SHugh Dickins freed += swapped; 4373c1b7528SHugh Dickins if (freed > 0) 4381da177e4SLinus Torvalds info->alloced -= freed; 4393c1b7528SHugh Dickins spin_unlock(&info->lock); 4403c1b7528SHugh Dickins 4413c1b7528SHugh Dickins /* The quota case may block */ 4423c1b7528SHugh Dickins if (freed > 0) 4430f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed); 4441da177e4SLinus Torvalds } 4451da177e4SLinus Torvalds 446800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 447800d8c63SKirill A. Shutemov { 448509f0069SHugh Dickins struct address_space *mapping = inode->i_mapping; 449800d8c63SKirill A. Shutemov 450c7e263abSLukas Czerner if (shmem_inode_acct_block(inode, pages)) 451800d8c63SKirill A. Shutemov return false; 452b1cc94abSMike Rapoport 453aaa52e34SHugh Dickins /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 454509f0069SHugh Dickins xa_lock_irq(&mapping->i_pages); 455509f0069SHugh Dickins mapping->nrpages += pages; 456509f0069SHugh Dickins xa_unlock_irq(&mapping->i_pages); 457aaa52e34SHugh Dickins 4583c1b7528SHugh Dickins shmem_recalc_inode(inode, pages, 0); 459800d8c63SKirill A. Shutemov return true; 460800d8c63SKirill A. Shutemov } 461800d8c63SKirill A. Shutemov 462800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 463800d8c63SKirill A. Shutemov { 4643c1b7528SHugh Dickins /* pages argument is currently unused: keep it to help debugging */ 4656ffcd825SMatthew Wilcox (Oracle) /* nrpages adjustment done by __filemap_remove_folio() or caller */ 466aaa52e34SHugh Dickins 4673c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 0); 468800d8c63SKirill A. Shutemov } 469800d8c63SKirill A. Shutemov 4707a5d0fbbSHugh Dickins /* 47162f945b6SMatthew Wilcox * Replace item expected in xarray by a new item, while holding xa_lock. 4727a5d0fbbSHugh Dickins */ 47362f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping, 4747a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 4757a5d0fbbSHugh Dickins { 47662f945b6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 4776dbaf22cSJohannes Weiner void *item; 4787a5d0fbbSHugh Dickins 4797a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 4806dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 48162f945b6SMatthew Wilcox item = xas_load(&xas); 4827a5d0fbbSHugh Dickins if (item != expected) 4837a5d0fbbSHugh Dickins return -ENOENT; 48462f945b6SMatthew Wilcox xas_store(&xas, replacement); 4857a5d0fbbSHugh Dickins return 0; 4867a5d0fbbSHugh Dickins } 4877a5d0fbbSHugh Dickins 4887a5d0fbbSHugh Dickins /* 489d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 490d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 491d1899228SHugh Dickins * 492d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 493d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 494d1899228SHugh Dickins */ 495d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 496d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 497d1899228SHugh Dickins { 498a12831bfSMatthew Wilcox return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 499d1899228SHugh Dickins } 500d1899228SHugh Dickins 501d1899228SHugh Dickins /* 5025a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 5035a6e75f8SKirill A. Shutemov * 5045a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 5055a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 5065a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 5075a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 5085a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 5095a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 5105a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 5115a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 5125a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 5135a6e75f8SKirill A. Shutemov */ 5145a6e75f8SKirill A. Shutemov 5155a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 5165a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 5175a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 5185a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 5195a6e75f8SKirill A. Shutemov 5205a6e75f8SKirill A. Shutemov /* 5215a6e75f8SKirill A. Shutemov * Special values. 5225a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 5235a6e75f8SKirill A. Shutemov * 5245a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 5255a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 5265a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 5275a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 5285a6e75f8SKirill A. Shutemov * 5295a6e75f8SKirill A. Shutemov */ 5305a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 5315a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 5325a6e75f8SKirill A. Shutemov 533396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5345a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 5355a6e75f8SKirill A. Shutemov 5365e6e5a12SHugh Dickins static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; 5375a6e75f8SKirill A. Shutemov 5382cf13384SDavid Stevens bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, 5392cf13384SDavid Stevens struct mm_struct *mm, unsigned long vm_flags) 540c852023eSHugh Dickins { 541c852023eSHugh Dickins loff_t i_size; 542c852023eSHugh Dickins 543f7cd16a5SXavier Roche if (!S_ISREG(inode->i_mode)) 544f7cd16a5SXavier Roche return false; 5452cf13384SDavid Stevens if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) 546c852023eSHugh Dickins return false; 5477c6c6cc4SZach O'Keefe if (shmem_huge == SHMEM_HUGE_DENY) 5487c6c6cc4SZach O'Keefe return false; 5493de0c269SZach O'Keefe if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) 5503de0c269SZach O'Keefe return true; 5515e6e5a12SHugh Dickins 5525e6e5a12SHugh Dickins switch (SHMEM_SB(inode->i_sb)->huge) { 553c852023eSHugh Dickins case SHMEM_HUGE_ALWAYS: 554c852023eSHugh Dickins return true; 555c852023eSHugh Dickins case SHMEM_HUGE_WITHIN_SIZE: 556de6ee659SLiu Yuntao index = round_up(index + 1, HPAGE_PMD_NR); 557c852023eSHugh Dickins i_size = round_up(i_size_read(inode), PAGE_SIZE); 558de6ee659SLiu Yuntao if (i_size >> PAGE_SHIFT >= index) 559c852023eSHugh Dickins return true; 560c852023eSHugh Dickins fallthrough; 561c852023eSHugh Dickins case SHMEM_HUGE_ADVISE: 5622cf13384SDavid Stevens if (mm && (vm_flags & VM_HUGEPAGE)) 5635e6e5a12SHugh Dickins return true; 5645e6e5a12SHugh Dickins fallthrough; 565c852023eSHugh Dickins default: 566c852023eSHugh Dickins return false; 567c852023eSHugh Dickins } 568c852023eSHugh Dickins } 5695a6e75f8SKirill A. Shutemov 570e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) 5715a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 5725a6e75f8SKirill A. Shutemov { 5735a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 5745a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 5755a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 5765a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 5775a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 5785a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 5795a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 5805a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 5815a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 5825a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 5835a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 5845a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 5855a6e75f8SKirill A. Shutemov return -EINVAL; 5865a6e75f8SKirill A. Shutemov } 587e5f2249aSArnd Bergmann #endif 5885a6e75f8SKirill A. Shutemov 589e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 5905a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 5915a6e75f8SKirill A. Shutemov { 5925a6e75f8SKirill A. Shutemov switch (huge) { 5935a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 5945a6e75f8SKirill A. Shutemov return "never"; 5955a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 5965a6e75f8SKirill A. Shutemov return "always"; 5975a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 5985a6e75f8SKirill A. Shutemov return "within_size"; 5995a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 6005a6e75f8SKirill A. Shutemov return "advise"; 6015a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 6025a6e75f8SKirill A. Shutemov return "deny"; 6035a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 6045a6e75f8SKirill A. Shutemov return "force"; 6055a6e75f8SKirill A. Shutemov default: 6065a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 6075a6e75f8SKirill A. Shutemov return "bad_val"; 6085a6e75f8SKirill A. Shutemov } 6095a6e75f8SKirill A. Shutemov } 610f1f5929cSJérémy Lefaure #endif 6115a6e75f8SKirill A. Shutemov 612779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 613779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 614779750d2SKirill A. Shutemov { 615779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 616253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 617779750d2SKirill A. Shutemov struct inode *inode; 618779750d2SKirill A. Shutemov struct shmem_inode_info *info; 61905624571SMatthew Wilcox (Oracle) struct folio *folio; 620779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 62162c9827cSGang Li int split = 0; 622779750d2SKirill A. Shutemov 623779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 624779750d2SKirill A. Shutemov return SHRINK_STOP; 625779750d2SKirill A. Shutemov 626779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 627779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 628779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 629779750d2SKirill A. Shutemov 630779750d2SKirill A. Shutemov /* pin the inode */ 631779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 632779750d2SKirill A. Shutemov 633779750d2SKirill A. Shutemov /* inode is about to be evicted */ 634779750d2SKirill A. Shutemov if (!inode) { 635779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 636779750d2SKirill A. Shutemov goto next; 637779750d2SKirill A. Shutemov } 638779750d2SKirill A. Shutemov 639779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 640779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 641779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 642253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 643779750d2SKirill A. Shutemov goto next; 644779750d2SKirill A. Shutemov } 645779750d2SKirill A. Shutemov 646779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 647779750d2SKirill A. Shutemov next: 64862c9827cSGang Li sbinfo->shrinklist_len--; 649779750d2SKirill A. Shutemov if (!--batch) 650779750d2SKirill A. Shutemov break; 651779750d2SKirill A. Shutemov } 652779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 653779750d2SKirill A. Shutemov 654253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 655253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 656253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 657253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 658253fd0f0SKirill A. Shutemov iput(inode); 659253fd0f0SKirill A. Shutemov } 660253fd0f0SKirill A. Shutemov 661779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 662779750d2SKirill A. Shutemov int ret; 66305624571SMatthew Wilcox (Oracle) pgoff_t index; 664779750d2SKirill A. Shutemov 665779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 666779750d2SKirill A. Shutemov inode = &info->vfs_inode; 667779750d2SKirill A. Shutemov 668b3cd54b2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) 66962c9827cSGang Li goto move_back; 670779750d2SKirill A. Shutemov 67105624571SMatthew Wilcox (Oracle) index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; 67205624571SMatthew Wilcox (Oracle) folio = filemap_get_folio(inode->i_mapping, index); 67366dabbb6SChristoph Hellwig if (IS_ERR(folio)) 674779750d2SKirill A. Shutemov goto drop; 675779750d2SKirill A. Shutemov 676b3cd54b2SKirill A. Shutemov /* No huge page at the end of the file: nothing to split */ 67705624571SMatthew Wilcox (Oracle) if (!folio_test_large(folio)) { 67805624571SMatthew Wilcox (Oracle) folio_put(folio); 679779750d2SKirill A. Shutemov goto drop; 680779750d2SKirill A. Shutemov } 681779750d2SKirill A. Shutemov 682b3cd54b2SKirill A. Shutemov /* 68362c9827cSGang Li * Move the inode on the list back to shrinklist if we failed 68462c9827cSGang Li * to lock the page at this time. 685b3cd54b2SKirill A. Shutemov * 686b3cd54b2SKirill A. Shutemov * Waiting for the lock may lead to deadlock in the 687b3cd54b2SKirill A. Shutemov * reclaim path. 688b3cd54b2SKirill A. Shutemov */ 68905624571SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) { 69005624571SMatthew Wilcox (Oracle) folio_put(folio); 69162c9827cSGang Li goto move_back; 692b3cd54b2SKirill A. Shutemov } 693b3cd54b2SKirill A. Shutemov 694d788f5b3SMatthew Wilcox (Oracle) ret = split_folio(folio); 69505624571SMatthew Wilcox (Oracle) folio_unlock(folio); 69605624571SMatthew Wilcox (Oracle) folio_put(folio); 697779750d2SKirill A. Shutemov 69862c9827cSGang Li /* If split failed move the inode on the list back to shrinklist */ 699b3cd54b2SKirill A. Shutemov if (ret) 70062c9827cSGang Li goto move_back; 701779750d2SKirill A. Shutemov 702779750d2SKirill A. Shutemov split++; 703779750d2SKirill A. Shutemov drop: 704779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 70562c9827cSGang Li goto put; 70662c9827cSGang Li move_back: 70762c9827cSGang Li /* 70862c9827cSGang Li * Make sure the inode is either on the global list or deleted 70962c9827cSGang Li * from any local list before iput() since it could be deleted 71062c9827cSGang Li * in another thread once we put the inode (then the local list 71162c9827cSGang Li * is corrupted). 71262c9827cSGang Li */ 71362c9827cSGang Li spin_lock(&sbinfo->shrinklist_lock); 71462c9827cSGang Li list_move(&info->shrinklist, &sbinfo->shrinklist); 71562c9827cSGang Li sbinfo->shrinklist_len++; 71662c9827cSGang Li spin_unlock(&sbinfo->shrinklist_lock); 71762c9827cSGang Li put: 718779750d2SKirill A. Shutemov iput(inode); 719779750d2SKirill A. Shutemov } 720779750d2SKirill A. Shutemov 721779750d2SKirill A. Shutemov return split; 722779750d2SKirill A. Shutemov } 723779750d2SKirill A. Shutemov 724779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 725779750d2SKirill A. Shutemov struct shrink_control *sc) 726779750d2SKirill A. Shutemov { 727779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 728779750d2SKirill A. Shutemov 729779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 730779750d2SKirill A. Shutemov return SHRINK_STOP; 731779750d2SKirill A. Shutemov 732779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 733779750d2SKirill A. Shutemov } 734779750d2SKirill A. Shutemov 735779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 736779750d2SKirill A. Shutemov struct shrink_control *sc) 737779750d2SKirill A. Shutemov { 738779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 739779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 740779750d2SKirill A. Shutemov } 741396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 7425a6e75f8SKirill A. Shutemov 7435a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 7445a6e75f8SKirill A. Shutemov 7452cf13384SDavid Stevens bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, 7462cf13384SDavid Stevens struct mm_struct *mm, unsigned long vm_flags) 7475e6e5a12SHugh Dickins { 7485e6e5a12SHugh Dickins return false; 7495e6e5a12SHugh Dickins } 7505e6e5a12SHugh Dickins 751779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 752779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 753779750d2SKirill A. Shutemov { 754779750d2SKirill A. Shutemov return 0; 755779750d2SKirill A. Shutemov } 756396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 7575a6e75f8SKirill A. Shutemov 7585a6e75f8SKirill A. Shutemov /* 7592bb876b5SMatthew Wilcox (Oracle) * Like filemap_add_folio, but error if expected item has gone. 76046f65ec1SHugh Dickins */ 761b7dd44a1SMatthew Wilcox (Oracle) static int shmem_add_to_page_cache(struct folio *folio, 76246f65ec1SHugh Dickins struct address_space *mapping, 7633fea5a49SJohannes Weiner pgoff_t index, void *expected, gfp_t gfp, 7643fea5a49SJohannes Weiner struct mm_struct *charge_mm) 76546f65ec1SHugh Dickins { 766b7dd44a1SMatthew Wilcox (Oracle) XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); 767b7dd44a1SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio); 7683fea5a49SJohannes Weiner int error; 76946f65ec1SHugh Dickins 770b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); 771b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 772b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); 773b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON(expected && folio_test_large(folio)); 77446f65ec1SHugh Dickins 775b7dd44a1SMatthew Wilcox (Oracle) folio_ref_add(folio, nr); 776b7dd44a1SMatthew Wilcox (Oracle) folio->mapping = mapping; 777b7dd44a1SMatthew Wilcox (Oracle) folio->index = index; 77846f65ec1SHugh Dickins 779b7dd44a1SMatthew Wilcox (Oracle) if (!folio_test_swapcache(folio)) { 780b7dd44a1SMatthew Wilcox (Oracle) error = mem_cgroup_charge(folio, charge_mm, gfp); 7813fea5a49SJohannes Weiner if (error) { 782b7dd44a1SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio)) { 7833fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK); 7843fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK_CHARGE); 7853fea5a49SJohannes Weiner } 7863fea5a49SJohannes Weiner goto error; 7873fea5a49SJohannes Weiner } 7884c6355b2SJohannes Weiner } 789b7dd44a1SMatthew Wilcox (Oracle) folio_throttle_swaprate(folio, gfp); 7903fea5a49SJohannes Weiner 791552446a4SMatthew Wilcox do { 792552446a4SMatthew Wilcox xas_lock_irq(&xas); 7936b24ca4aSMatthew Wilcox (Oracle) if (expected != xas_find_conflict(&xas)) { 794552446a4SMatthew Wilcox xas_set_err(&xas, -EEXIST); 7956b24ca4aSMatthew Wilcox (Oracle) goto unlock; 7966b24ca4aSMatthew Wilcox (Oracle) } 7976b24ca4aSMatthew Wilcox (Oracle) if (expected && xas_find_conflict(&xas)) { 7986b24ca4aSMatthew Wilcox (Oracle) xas_set_err(&xas, -EEXIST); 7996b24ca4aSMatthew Wilcox (Oracle) goto unlock; 8006b24ca4aSMatthew Wilcox (Oracle) } 801b7dd44a1SMatthew Wilcox (Oracle) xas_store(&xas, folio); 802552446a4SMatthew Wilcox if (xas_error(&xas)) 803552446a4SMatthew Wilcox goto unlock; 804b7dd44a1SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio)) { 805800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 806b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); 807552446a4SMatthew Wilcox } 808552446a4SMatthew Wilcox mapping->nrpages += nr; 809b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 810b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); 811552446a4SMatthew Wilcox unlock: 812552446a4SMatthew Wilcox xas_unlock_irq(&xas); 813552446a4SMatthew Wilcox } while (xas_nomem(&xas, gfp)); 814552446a4SMatthew Wilcox 815552446a4SMatthew Wilcox if (xas_error(&xas)) { 8163fea5a49SJohannes Weiner error = xas_error(&xas); 8173fea5a49SJohannes Weiner goto error; 81846f65ec1SHugh Dickins } 819552446a4SMatthew Wilcox 820552446a4SMatthew Wilcox return 0; 8213fea5a49SJohannes Weiner error: 822b7dd44a1SMatthew Wilcox (Oracle) folio->mapping = NULL; 823b7dd44a1SMatthew Wilcox (Oracle) folio_ref_sub(folio, nr); 8243fea5a49SJohannes Weiner return error; 82546f65ec1SHugh Dickins } 82646f65ec1SHugh Dickins 82746f65ec1SHugh Dickins /* 8284cd400fdSMatthew Wilcox (Oracle) * Like delete_from_page_cache, but substitutes swap for @folio. 8296922c0c7SHugh Dickins */ 8304cd400fdSMatthew Wilcox (Oracle) static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) 8316922c0c7SHugh Dickins { 8324cd400fdSMatthew Wilcox (Oracle) struct address_space *mapping = folio->mapping; 8334cd400fdSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio); 8346922c0c7SHugh Dickins int error; 8356922c0c7SHugh Dickins 836b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 8374cd400fdSMatthew Wilcox (Oracle) error = shmem_replace_entry(mapping, folio->index, folio, radswap); 8384cd400fdSMatthew Wilcox (Oracle) folio->mapping = NULL; 8394cd400fdSMatthew Wilcox (Oracle) mapping->nrpages -= nr; 8404cd400fdSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 8414cd400fdSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 842b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 8434cd400fdSMatthew Wilcox (Oracle) folio_put(folio); 8446922c0c7SHugh Dickins BUG_ON(error); 8456922c0c7SHugh Dickins } 8466922c0c7SHugh Dickins 8476922c0c7SHugh Dickins /* 848c121d3bbSMatthew Wilcox * Remove swap entry from page cache, free the swap and its page cache. 8497a5d0fbbSHugh Dickins */ 8507a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 8517a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 8527a5d0fbbSHugh Dickins { 8536dbaf22cSJohannes Weiner void *old; 8547a5d0fbbSHugh Dickins 85555f3f7eaSMatthew Wilcox old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 8566dbaf22cSJohannes Weiner if (old != radswap) 8576dbaf22cSJohannes Weiner return -ENOENT; 8587a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 8596dbaf22cSJohannes Weiner return 0; 8607a5d0fbbSHugh Dickins } 8617a5d0fbbSHugh Dickins 8627a5d0fbbSHugh Dickins /* 8636a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 86448131e03SVlastimil Babka * given offsets are swapped out. 8656a15a370SVlastimil Babka * 8669608703eSJan Kara * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 8676a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 8686a15a370SVlastimil Babka */ 86948131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 87048131e03SVlastimil Babka pgoff_t start, pgoff_t end) 8716a15a370SVlastimil Babka { 8727ae3424fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 8736a15a370SVlastimil Babka struct page *page; 87448131e03SVlastimil Babka unsigned long swapped = 0; 875e5548f85SHugh Dickins unsigned long max = end - 1; 8766a15a370SVlastimil Babka 8776a15a370SVlastimil Babka rcu_read_lock(); 878e5548f85SHugh Dickins xas_for_each(&xas, page, max) { 8797ae3424fSMatthew Wilcox if (xas_retry(&xas, page)) 8802cf938aaSMatthew Wilcox continue; 8813159f943SMatthew Wilcox if (xa_is_value(page)) 8826a15a370SVlastimil Babka swapped++; 883e5548f85SHugh Dickins if (xas.xa_index == max) 884e5548f85SHugh Dickins break; 8856a15a370SVlastimil Babka if (need_resched()) { 8867ae3424fSMatthew Wilcox xas_pause(&xas); 8876a15a370SVlastimil Babka cond_resched_rcu(); 8886a15a370SVlastimil Babka } 8896a15a370SVlastimil Babka } 8906a15a370SVlastimil Babka 8916a15a370SVlastimil Babka rcu_read_unlock(); 8926a15a370SVlastimil Babka 8936a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 8946a15a370SVlastimil Babka } 8956a15a370SVlastimil Babka 8966a15a370SVlastimil Babka /* 89748131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 89848131e03SVlastimil Babka * given vma is swapped out. 89948131e03SVlastimil Babka * 9009608703eSJan Kara * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 90148131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 90248131e03SVlastimil Babka */ 90348131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 90448131e03SVlastimil Babka { 90548131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 90648131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 90748131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 90848131e03SVlastimil Babka unsigned long swapped; 90948131e03SVlastimil Babka 91048131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 91148131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 91248131e03SVlastimil Babka 91348131e03SVlastimil Babka /* 91448131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 91548131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 91648131e03SVlastimil Babka * already track. 91748131e03SVlastimil Babka */ 91848131e03SVlastimil Babka if (!swapped) 91948131e03SVlastimil Babka return 0; 92048131e03SVlastimil Babka 92148131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 92248131e03SVlastimil Babka return swapped << PAGE_SHIFT; 92348131e03SVlastimil Babka 92448131e03SVlastimil Babka /* Here comes the more involved part */ 92502399c88SPeter Xu return shmem_partial_swap_usage(mapping, vma->vm_pgoff, 92602399c88SPeter Xu vma->vm_pgoff + vma_pages(vma)); 92748131e03SVlastimil Babka } 92848131e03SVlastimil Babka 92948131e03SVlastimil Babka /* 93024513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 93124513264SHugh Dickins */ 93224513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 93324513264SHugh Dickins { 934105c988fSMatthew Wilcox (Oracle) struct folio_batch fbatch; 93524513264SHugh Dickins pgoff_t index = 0; 93624513264SHugh Dickins 937105c988fSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 93824513264SHugh Dickins /* 93924513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 94024513264SHugh Dickins */ 941105c988fSMatthew Wilcox (Oracle) while (!mapping_unevictable(mapping) && 942105c988fSMatthew Wilcox (Oracle) filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { 943105c988fSMatthew Wilcox (Oracle) check_move_unevictable_folios(&fbatch); 944105c988fSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 94524513264SHugh Dickins cond_resched(); 94624513264SHugh Dickins } 9477a5d0fbbSHugh Dickins } 9487a5d0fbbSHugh Dickins 949b9a8a419SMatthew Wilcox (Oracle) static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) 95071725ed1SHugh Dickins { 951b9a8a419SMatthew Wilcox (Oracle) struct folio *folio; 95271725ed1SHugh Dickins 953b9a8a419SMatthew Wilcox (Oracle) /* 954a7f5862cSMatthew Wilcox (Oracle) * At first avoid shmem_get_folio(,,,SGP_READ): that fails 95581914affSHugh Dickins * beyond i_size, and reports fallocated folios as holes. 956b9a8a419SMatthew Wilcox (Oracle) */ 95781914affSHugh Dickins folio = filemap_get_entry(inode->i_mapping, index); 95881914affSHugh Dickins if (!folio) 959b9a8a419SMatthew Wilcox (Oracle) return folio; 96081914affSHugh Dickins if (!xa_is_value(folio)) { 96181914affSHugh Dickins folio_lock(folio); 96281914affSHugh Dickins if (folio->mapping == inode->i_mapping) 96381914affSHugh Dickins return folio; 96481914affSHugh Dickins /* The folio has been swapped out */ 96581914affSHugh Dickins folio_unlock(folio); 96681914affSHugh Dickins folio_put(folio); 96781914affSHugh Dickins } 968b9a8a419SMatthew Wilcox (Oracle) /* 96981914affSHugh Dickins * But read a folio back from swap if any of it is within i_size 970b9a8a419SMatthew Wilcox (Oracle) * (although in some cases this is just a waste of time). 971b9a8a419SMatthew Wilcox (Oracle) */ 972a7f5862cSMatthew Wilcox (Oracle) folio = NULL; 973a7f5862cSMatthew Wilcox (Oracle) shmem_get_folio(inode, index, &folio, SGP_READ); 974a7f5862cSMatthew Wilcox (Oracle) return folio; 97571725ed1SHugh Dickins } 97671725ed1SHugh Dickins 97771725ed1SHugh Dickins /* 9787f4446eeSMatthew Wilcox * Remove range of pages and swap entries from page cache, and free them. 9791635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 9807a5d0fbbSHugh Dickins */ 9811635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 9821635f6a7SHugh Dickins bool unfalloc) 9831da177e4SLinus Torvalds { 984285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 9851da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 98609cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 98709cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 9880e499ed3SMatthew Wilcox (Oracle) struct folio_batch fbatch; 9897a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 990b9a8a419SMatthew Wilcox (Oracle) struct folio *folio; 991b9a8a419SMatthew Wilcox (Oracle) bool same_folio; 9927a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 993285b2c4fSHugh Dickins pgoff_t index; 994bda97eabSHugh Dickins int i; 9951da177e4SLinus Torvalds 99683e4fa9cSHugh Dickins if (lend == -1) 99783e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 998bda97eabSHugh Dickins 999d144bf62SHugh Dickins if (info->fallocend > start && info->fallocend <= end && !unfalloc) 1000d144bf62SHugh Dickins info->fallocend = start; 1001d144bf62SHugh Dickins 100251dcbdacSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 1003bda97eabSHugh Dickins index = start; 10043392ca12SVishal Moola (Oracle) while (index < end && find_lock_entries(mapping, &index, end - 1, 100551dcbdacSMatthew Wilcox (Oracle) &fbatch, indices)) { 100651dcbdacSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) { 1007b9a8a419SMatthew Wilcox (Oracle) folio = fbatch.folios[i]; 1008bda97eabSHugh Dickins 10097b774aabSMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 10101635f6a7SHugh Dickins if (unfalloc) 10111635f6a7SHugh Dickins continue; 10127a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 10133392ca12SVishal Moola (Oracle) indices[i], folio); 10147a5d0fbbSHugh Dickins continue; 10157a5d0fbbSHugh Dickins } 10167a5d0fbbSHugh Dickins 10177b774aabSMatthew Wilcox (Oracle) if (!unfalloc || !folio_test_uptodate(folio)) 10181e84a3d9SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio); 10197b774aabSMatthew Wilcox (Oracle) folio_unlock(folio); 1020bda97eabSHugh Dickins } 102151dcbdacSMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch); 102251dcbdacSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 1023bda97eabSHugh Dickins cond_resched(); 1024bda97eabSHugh Dickins } 1025bda97eabSHugh Dickins 102644bcabd7SHugh Dickins /* 102744bcabd7SHugh Dickins * When undoing a failed fallocate, we want none of the partial folio 102844bcabd7SHugh Dickins * zeroing and splitting below, but shall want to truncate the whole 102944bcabd7SHugh Dickins * folio when !uptodate indicates that it was added by this fallocate, 103044bcabd7SHugh Dickins * even when [lstart, lend] covers only a part of the folio. 103144bcabd7SHugh Dickins */ 103244bcabd7SHugh Dickins if (unfalloc) 103344bcabd7SHugh Dickins goto whole_folios; 103444bcabd7SHugh Dickins 1035b9a8a419SMatthew Wilcox (Oracle) same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 1036b9a8a419SMatthew Wilcox (Oracle) folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); 1037b9a8a419SMatthew Wilcox (Oracle) if (folio) { 1038b9a8a419SMatthew Wilcox (Oracle) same_folio = lend < folio_pos(folio) + folio_size(folio); 1039b9a8a419SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1040b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend)) { 104187b11f86SSidhartha Kumar start = folio_next_index(folio); 1042b9a8a419SMatthew Wilcox (Oracle) if (same_folio) 1043b9a8a419SMatthew Wilcox (Oracle) end = folio->index; 104483e4fa9cSHugh Dickins } 1045b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio); 1046b9a8a419SMatthew Wilcox (Oracle) folio_put(folio); 1047b9a8a419SMatthew Wilcox (Oracle) folio = NULL; 1048bda97eabSHugh Dickins } 1049b9a8a419SMatthew Wilcox (Oracle) 1050b9a8a419SMatthew Wilcox (Oracle) if (!same_folio) 1051b9a8a419SMatthew Wilcox (Oracle) folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); 1052b9a8a419SMatthew Wilcox (Oracle) if (folio) { 1053b9a8a419SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1054b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend)) 1055b9a8a419SMatthew Wilcox (Oracle) end = folio->index; 1056b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio); 1057b9a8a419SMatthew Wilcox (Oracle) folio_put(folio); 1058bda97eabSHugh Dickins } 1059bda97eabSHugh Dickins 106044bcabd7SHugh Dickins whole_folios: 106144bcabd7SHugh Dickins 1062bda97eabSHugh Dickins index = start; 1063b1a36650SHugh Dickins while (index < end) { 1064bda97eabSHugh Dickins cond_resched(); 10650cd6144aSJohannes Weiner 10669fb6beeaSVishal Moola (Oracle) if (!find_get_entries(mapping, &index, end - 1, &fbatch, 1067cf2039afSMatthew Wilcox (Oracle) indices)) { 1068b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 1069b1a36650SHugh Dickins if (index == start || end != -1) 1070bda97eabSHugh Dickins break; 1071b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 1072bda97eabSHugh Dickins index = start; 1073bda97eabSHugh Dickins continue; 1074bda97eabSHugh Dickins } 10750e499ed3SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) { 1076b9a8a419SMatthew Wilcox (Oracle) folio = fbatch.folios[i]; 1077bda97eabSHugh Dickins 10780e499ed3SMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 10791635f6a7SHugh Dickins if (unfalloc) 10801635f6a7SHugh Dickins continue; 10819fb6beeaSVishal Moola (Oracle) if (shmem_free_swap(mapping, indices[i], folio)) { 1082b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 10839fb6beeaSVishal Moola (Oracle) index = indices[i]; 1084b1a36650SHugh Dickins break; 1085b1a36650SHugh Dickins } 1086b1a36650SHugh Dickins nr_swaps_freed++; 10877a5d0fbbSHugh Dickins continue; 10887a5d0fbbSHugh Dickins } 10897a5d0fbbSHugh Dickins 10900e499ed3SMatthew Wilcox (Oracle) folio_lock(folio); 1091800d8c63SKirill A. Shutemov 10920e499ed3SMatthew Wilcox (Oracle) if (!unfalloc || !folio_test_uptodate(folio)) { 10930e499ed3SMatthew Wilcox (Oracle) if (folio_mapping(folio) != mapping) { 1094b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 10950e499ed3SMatthew Wilcox (Oracle) folio_unlock(folio); 10969fb6beeaSVishal Moola (Oracle) index = indices[i]; 1097b1a36650SHugh Dickins break; 10987a5d0fbbSHugh Dickins } 10990e499ed3SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_writeback(folio), 11000e499ed3SMatthew Wilcox (Oracle) folio); 1101*7a4ae7acSDavid Stevens 1102*7a4ae7acSDavid Stevens if (!folio_test_large(folio)) { 11030e499ed3SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio); 1104*7a4ae7acSDavid Stevens } else if (truncate_inode_partial_folio(folio, lstart, lend)) { 1105*7a4ae7acSDavid Stevens /* 1106*7a4ae7acSDavid Stevens * If we split a page, reset the loop so 1107*7a4ae7acSDavid Stevens * that we pick up the new sub pages. 1108*7a4ae7acSDavid Stevens * Otherwise the THP was entirely 1109*7a4ae7acSDavid Stevens * dropped or the target range was 1110*7a4ae7acSDavid Stevens * zeroed, so just continue the loop as 1111*7a4ae7acSDavid Stevens * is. 1112*7a4ae7acSDavid Stevens */ 1113*7a4ae7acSDavid Stevens if (!folio_test_large(folio)) { 1114*7a4ae7acSDavid Stevens folio_unlock(folio); 1115*7a4ae7acSDavid Stevens index = start; 1116*7a4ae7acSDavid Stevens break; 1117*7a4ae7acSDavid Stevens } 1118*7a4ae7acSDavid Stevens } 111971725ed1SHugh Dickins } 11200e499ed3SMatthew Wilcox (Oracle) folio_unlock(folio); 1121bda97eabSHugh Dickins } 11220e499ed3SMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch); 11230e499ed3SMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 1124bda97eabSHugh Dickins } 112594c1e62dSHugh Dickins 11263c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, -nr_swaps_freed); 11271635f6a7SHugh Dickins } 11281da177e4SLinus Torvalds 11291635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 11301635f6a7SHugh Dickins { 11311635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 113265287334SJeff Layton inode->i_mtime = inode_set_ctime_current(inode); 113336f05cabSJeff Layton inode_inc_iversion(inode); 11341da177e4SLinus Torvalds } 113594c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 11361da177e4SLinus Torvalds 1137b74d24f7SChristian Brauner static int shmem_getattr(struct mnt_idmap *idmap, 1138549c7297SChristian Brauner const struct path *path, struct kstat *stat, 1139a528d35eSDavid Howells u32 request_mask, unsigned int query_flags) 114044a30220SYu Zhao { 1141a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode; 114244a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 114344a30220SYu Zhao 11443c1b7528SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) 11453c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 0); 11463c1b7528SHugh Dickins 1147e408e695STheodore Ts'o if (info->fsflags & FS_APPEND_FL) 1148e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_APPEND; 1149e408e695STheodore Ts'o if (info->fsflags & FS_IMMUTABLE_FL) 1150e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_IMMUTABLE; 1151e408e695STheodore Ts'o if (info->fsflags & FS_NODUMP_FL) 1152e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_NODUMP; 1153e408e695STheodore Ts'o stat->attributes_mask |= (STATX_ATTR_APPEND | 1154e408e695STheodore Ts'o STATX_ATTR_IMMUTABLE | 1155e408e695STheodore Ts'o STATX_ATTR_NODUMP); 11560d72b928SJeff Layton generic_fillattr(idmap, request_mask, inode, stat); 115789fdcd26SYang Shi 11582cf13384SDavid Stevens if (shmem_is_huge(inode, 0, false, NULL, 0)) 115989fdcd26SYang Shi stat->blksize = HPAGE_PMD_SIZE; 116089fdcd26SYang Shi 1161f7cd16a5SXavier Roche if (request_mask & STATX_BTIME) { 1162f7cd16a5SXavier Roche stat->result_mask |= STATX_BTIME; 1163f7cd16a5SXavier Roche stat->btime.tv_sec = info->i_crtime.tv_sec; 1164f7cd16a5SXavier Roche stat->btime.tv_nsec = info->i_crtime.tv_nsec; 1165f7cd16a5SXavier Roche } 1166f7cd16a5SXavier Roche 116744a30220SYu Zhao return 0; 116844a30220SYu Zhao } 116944a30220SYu Zhao 1170c1632a0fSChristian Brauner static int shmem_setattr(struct mnt_idmap *idmap, 1171549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 11721da177e4SLinus Torvalds { 117375c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 117440e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 11751da177e4SLinus Torvalds int error; 117636f05cabSJeff Layton bool update_mtime = false; 117736f05cabSJeff Layton bool update_ctime = true; 11781da177e4SLinus Torvalds 11797a80e5b8SGiuseppe Scrivano error = setattr_prepare(idmap, dentry, attr); 1180db78b877SChristoph Hellwig if (error) 1181db78b877SChristoph Hellwig return error; 1182db78b877SChristoph Hellwig 11836fd73538SDaniel Verkamp if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { 11846fd73538SDaniel Verkamp if ((inode->i_mode ^ attr->ia_mode) & 0111) { 11856fd73538SDaniel Verkamp return -EPERM; 11866fd73538SDaniel Verkamp } 11876fd73538SDaniel Verkamp } 11886fd73538SDaniel Verkamp 118994c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 119094c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 119194c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 11923889e6e7Snpiggin@suse.de 11939608703eSJan Kara /* protected by i_rwsem */ 119440e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 119540e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 119640e041a2SDavid Herrmann return -EPERM; 119740e041a2SDavid Herrmann 119894c1e62dSHugh Dickins if (newsize != oldsize) { 119977142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 120077142517SKonstantin Khlebnikov oldsize, newsize); 120177142517SKonstantin Khlebnikov if (error) 120277142517SKonstantin Khlebnikov return error; 120394c1e62dSHugh Dickins i_size_write(inode, newsize); 120436f05cabSJeff Layton update_mtime = true; 120536f05cabSJeff Layton } else { 120636f05cabSJeff Layton update_ctime = false; 120794c1e62dSHugh Dickins } 1208afa2db2fSJosef Bacik if (newsize <= oldsize) { 120994c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 1210d0424c42SHugh Dickins if (oldsize > holebegin) 1211d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1212d0424c42SHugh Dickins holebegin, 0, 1); 1213d0424c42SHugh Dickins if (info->alloced) 1214d0424c42SHugh Dickins shmem_truncate_range(inode, 1215d0424c42SHugh Dickins newsize, (loff_t)-1); 121694c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1217d0424c42SHugh Dickins if (oldsize > holebegin) 1218d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1219d0424c42SHugh Dickins holebegin, 0, 1); 122094c1e62dSHugh Dickins } 12211da177e4SLinus Torvalds } 12221da177e4SLinus Torvalds 1223e09764cfSCarlos Maiolino if (is_quota_modification(idmap, inode, attr)) { 1224e09764cfSCarlos Maiolino error = dquot_initialize(inode); 1225e09764cfSCarlos Maiolino if (error) 1226e09764cfSCarlos Maiolino return error; 1227e09764cfSCarlos Maiolino } 1228e09764cfSCarlos Maiolino 1229e09764cfSCarlos Maiolino /* Transfer quota accounting */ 1230e09764cfSCarlos Maiolino if (i_uid_needs_update(idmap, attr, inode) || 1231e09764cfSCarlos Maiolino i_gid_needs_update(idmap, attr, inode)) { 1232e09764cfSCarlos Maiolino error = dquot_transfer(idmap, inode, attr); 1233e09764cfSCarlos Maiolino 1234e09764cfSCarlos Maiolino if (error) 1235e09764cfSCarlos Maiolino return error; 1236e09764cfSCarlos Maiolino } 1237e09764cfSCarlos Maiolino 12387a80e5b8SGiuseppe Scrivano setattr_copy(idmap, inode, attr); 1239db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 12407a80e5b8SGiuseppe Scrivano error = posix_acl_chmod(idmap, dentry, inode->i_mode); 124136f05cabSJeff Layton if (!error && update_ctime) { 124265287334SJeff Layton inode_set_ctime_current(inode); 124336f05cabSJeff Layton if (update_mtime) 124465287334SJeff Layton inode->i_mtime = inode_get_ctime(inode); 124536f05cabSJeff Layton inode_inc_iversion(inode); 124636f05cabSJeff Layton } 12471da177e4SLinus Torvalds return error; 12481da177e4SLinus Torvalds } 12491da177e4SLinus Torvalds 12501f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 12511da177e4SLinus Torvalds { 12521da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1253779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 12542daf18a7SHugh Dickins size_t freed = 0; 12551da177e4SLinus Torvalds 125630e6a51dSHui Su if (shmem_mapping(inode->i_mapping)) { 12571da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 12581da177e4SLinus Torvalds inode->i_size = 0; 1259bc786390SHugh Dickins mapping_set_exiting(inode->i_mapping); 12603889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1261779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1262779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1263779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1264779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1265779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1266779750d2SKirill A. Shutemov } 1267779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1268779750d2SKirill A. Shutemov } 1269af53d3e9SHugh Dickins while (!list_empty(&info->swaplist)) { 1270af53d3e9SHugh Dickins /* Wait while shmem_unuse() is scanning this inode... */ 1271af53d3e9SHugh Dickins wait_var_event(&info->stop_eviction, 1272af53d3e9SHugh Dickins !atomic_read(&info->stop_eviction)); 1273cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1274af53d3e9SHugh Dickins /* ...but beware of the race if we peeked too early */ 1275af53d3e9SHugh Dickins if (!atomic_read(&info->stop_eviction)) 12761da177e4SLinus Torvalds list_del_init(&info->swaplist); 1277cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 12781da177e4SLinus Torvalds } 12793ed47db3SAl Viro } 1280b09e0fa4SEric Paris 12812daf18a7SHugh Dickins simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL); 12822daf18a7SHugh Dickins shmem_free_inode(inode->i_sb, freed); 12830f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 1284dbd5768fSJan Kara clear_inode(inode); 1285e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 1286e09764cfSCarlos Maiolino dquot_free_inode(inode); 1287e09764cfSCarlos Maiolino dquot_drop(inode); 1288e09764cfSCarlos Maiolino #endif 12891da177e4SLinus Torvalds } 12901da177e4SLinus Torvalds 1291b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping, 1292da08e9b7SMatthew Wilcox (Oracle) pgoff_t start, struct folio_batch *fbatch, 1293da08e9b7SMatthew Wilcox (Oracle) pgoff_t *indices, unsigned int type) 1294478922e2SMatthew Wilcox { 1295b56a2d8aSVineeth Remanan Pillai XA_STATE(xas, &mapping->i_pages, start); 1296da08e9b7SMatthew Wilcox (Oracle) struct folio *folio; 129787039546SHugh Dickins swp_entry_t entry; 1298478922e2SMatthew Wilcox 1299478922e2SMatthew Wilcox rcu_read_lock(); 1300da08e9b7SMatthew Wilcox (Oracle) xas_for_each(&xas, folio, ULONG_MAX) { 1301da08e9b7SMatthew Wilcox (Oracle) if (xas_retry(&xas, folio)) 13025b9c98f3SMike Kravetz continue; 1303b56a2d8aSVineeth Remanan Pillai 1304da08e9b7SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 1305478922e2SMatthew Wilcox continue; 1306b56a2d8aSVineeth Remanan Pillai 1307da08e9b7SMatthew Wilcox (Oracle) entry = radix_to_swp_entry(folio); 13086cec2b95SMiaohe Lin /* 13096cec2b95SMiaohe Lin * swapin error entries can be found in the mapping. But they're 13106cec2b95SMiaohe Lin * deliberately ignored here as we've done everything we can do. 13116cec2b95SMiaohe Lin */ 131287039546SHugh Dickins if (swp_type(entry) != type) 1313b56a2d8aSVineeth Remanan Pillai continue; 1314b56a2d8aSVineeth Remanan Pillai 1315e384200eSHugh Dickins indices[folio_batch_count(fbatch)] = xas.xa_index; 1316da08e9b7SMatthew Wilcox (Oracle) if (!folio_batch_add(fbatch, folio)) 1317da08e9b7SMatthew Wilcox (Oracle) break; 1318b56a2d8aSVineeth Remanan Pillai 1319b56a2d8aSVineeth Remanan Pillai if (need_resched()) { 1320e21a2955SMatthew Wilcox xas_pause(&xas); 1321478922e2SMatthew Wilcox cond_resched_rcu(); 1322478922e2SMatthew Wilcox } 1323b56a2d8aSVineeth Remanan Pillai } 1324478922e2SMatthew Wilcox rcu_read_unlock(); 1325e21a2955SMatthew Wilcox 1326da08e9b7SMatthew Wilcox (Oracle) return xas.xa_index; 1327b56a2d8aSVineeth Remanan Pillai } 1328b56a2d8aSVineeth Remanan Pillai 1329b56a2d8aSVineeth Remanan Pillai /* 1330b56a2d8aSVineeth Remanan Pillai * Move the swapped pages for an inode to page cache. Returns the count 1331b56a2d8aSVineeth Remanan Pillai * of pages swapped in, or the error in case of failure. 1332b56a2d8aSVineeth Remanan Pillai */ 1333da08e9b7SMatthew Wilcox (Oracle) static int shmem_unuse_swap_entries(struct inode *inode, 1334da08e9b7SMatthew Wilcox (Oracle) struct folio_batch *fbatch, pgoff_t *indices) 1335b56a2d8aSVineeth Remanan Pillai { 1336b56a2d8aSVineeth Remanan Pillai int i = 0; 1337b56a2d8aSVineeth Remanan Pillai int ret = 0; 1338b56a2d8aSVineeth Remanan Pillai int error = 0; 1339b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1340b56a2d8aSVineeth Remanan Pillai 1341da08e9b7SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(fbatch); i++) { 1342da08e9b7SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 1343b56a2d8aSVineeth Remanan Pillai 1344da08e9b7SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 1345b56a2d8aSVineeth Remanan Pillai continue; 1346da08e9b7SMatthew Wilcox (Oracle) error = shmem_swapin_folio(inode, indices[i], 1347da08e9b7SMatthew Wilcox (Oracle) &folio, SGP_CACHE, 1348b56a2d8aSVineeth Remanan Pillai mapping_gfp_mask(mapping), 1349b56a2d8aSVineeth Remanan Pillai NULL, NULL); 1350b56a2d8aSVineeth Remanan Pillai if (error == 0) { 1351da08e9b7SMatthew Wilcox (Oracle) folio_unlock(folio); 1352da08e9b7SMatthew Wilcox (Oracle) folio_put(folio); 1353b56a2d8aSVineeth Remanan Pillai ret++; 1354b56a2d8aSVineeth Remanan Pillai } 1355b56a2d8aSVineeth Remanan Pillai if (error == -ENOMEM) 1356b56a2d8aSVineeth Remanan Pillai break; 1357b56a2d8aSVineeth Remanan Pillai error = 0; 1358b56a2d8aSVineeth Remanan Pillai } 1359b56a2d8aSVineeth Remanan Pillai return error ? error : ret; 1360478922e2SMatthew Wilcox } 1361478922e2SMatthew Wilcox 136246f65ec1SHugh Dickins /* 136346f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 136446f65ec1SHugh Dickins */ 136510a9c496SChristoph Hellwig static int shmem_unuse_inode(struct inode *inode, unsigned int type) 13661da177e4SLinus Torvalds { 1367b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1368b56a2d8aSVineeth Remanan Pillai pgoff_t start = 0; 1369da08e9b7SMatthew Wilcox (Oracle) struct folio_batch fbatch; 1370b56a2d8aSVineeth Remanan Pillai pgoff_t indices[PAGEVEC_SIZE]; 1371b56a2d8aSVineeth Remanan Pillai int ret = 0; 13721da177e4SLinus Torvalds 1373b56a2d8aSVineeth Remanan Pillai do { 1374da08e9b7SMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 1375da08e9b7SMatthew Wilcox (Oracle) shmem_find_swap_entries(mapping, start, &fbatch, indices, type); 1376da08e9b7SMatthew Wilcox (Oracle) if (folio_batch_count(&fbatch) == 0) { 1377b56a2d8aSVineeth Remanan Pillai ret = 0; 1378778dd893SHugh Dickins break; 1379b56a2d8aSVineeth Remanan Pillai } 1380b56a2d8aSVineeth Remanan Pillai 1381da08e9b7SMatthew Wilcox (Oracle) ret = shmem_unuse_swap_entries(inode, &fbatch, indices); 1382b56a2d8aSVineeth Remanan Pillai if (ret < 0) 1383b56a2d8aSVineeth Remanan Pillai break; 1384b56a2d8aSVineeth Remanan Pillai 1385da08e9b7SMatthew Wilcox (Oracle) start = indices[folio_batch_count(&fbatch) - 1]; 1386b56a2d8aSVineeth Remanan Pillai } while (true); 1387b56a2d8aSVineeth Remanan Pillai 1388b56a2d8aSVineeth Remanan Pillai return ret; 1389b56a2d8aSVineeth Remanan Pillai } 1390b56a2d8aSVineeth Remanan Pillai 1391b56a2d8aSVineeth Remanan Pillai /* 1392b56a2d8aSVineeth Remanan Pillai * Read all the shared memory data that resides in the swap 1393b56a2d8aSVineeth Remanan Pillai * device 'type' back into memory, so the swap device can be 1394b56a2d8aSVineeth Remanan Pillai * unused. 1395b56a2d8aSVineeth Remanan Pillai */ 139610a9c496SChristoph Hellwig int shmem_unuse(unsigned int type) 1397b56a2d8aSVineeth Remanan Pillai { 1398b56a2d8aSVineeth Remanan Pillai struct shmem_inode_info *info, *next; 1399b56a2d8aSVineeth Remanan Pillai int error = 0; 1400b56a2d8aSVineeth Remanan Pillai 1401b56a2d8aSVineeth Remanan Pillai if (list_empty(&shmem_swaplist)) 1402b56a2d8aSVineeth Remanan Pillai return 0; 1403b56a2d8aSVineeth Remanan Pillai 1404b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1405b56a2d8aSVineeth Remanan Pillai list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1406b56a2d8aSVineeth Remanan Pillai if (!info->swapped) { 1407b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1408b56a2d8aSVineeth Remanan Pillai continue; 1409b56a2d8aSVineeth Remanan Pillai } 1410af53d3e9SHugh Dickins /* 1411af53d3e9SHugh Dickins * Drop the swaplist mutex while searching the inode for swap; 1412af53d3e9SHugh Dickins * but before doing so, make sure shmem_evict_inode() will not 1413af53d3e9SHugh Dickins * remove placeholder inode from swaplist, nor let it be freed 1414af53d3e9SHugh Dickins * (igrab() would protect from unlink, but not from unmount). 1415af53d3e9SHugh Dickins */ 1416af53d3e9SHugh Dickins atomic_inc(&info->stop_eviction); 1417b56a2d8aSVineeth Remanan Pillai mutex_unlock(&shmem_swaplist_mutex); 1418b56a2d8aSVineeth Remanan Pillai 141910a9c496SChristoph Hellwig error = shmem_unuse_inode(&info->vfs_inode, type); 1420b56a2d8aSVineeth Remanan Pillai cond_resched(); 1421b56a2d8aSVineeth Remanan Pillai 1422b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1423b56a2d8aSVineeth Remanan Pillai next = list_next_entry(info, swaplist); 1424b56a2d8aSVineeth Remanan Pillai if (!info->swapped) 1425b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1426af53d3e9SHugh Dickins if (atomic_dec_and_test(&info->stop_eviction)) 1427af53d3e9SHugh Dickins wake_up_var(&info->stop_eviction); 1428b56a2d8aSVineeth Remanan Pillai if (error) 1429b56a2d8aSVineeth Remanan Pillai break; 14301da177e4SLinus Torvalds } 1431cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1432778dd893SHugh Dickins 1433778dd893SHugh Dickins return error; 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds 14361da177e4SLinus Torvalds /* 14371da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 14381da177e4SLinus Torvalds */ 14391da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 14401da177e4SLinus Torvalds { 1441e2e3fdc7SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 14428ccee8c1SLuis Chamberlain struct address_space *mapping = folio->mapping; 14438ccee8c1SLuis Chamberlain struct inode *inode = mapping->host; 14448ccee8c1SLuis Chamberlain struct shmem_inode_info *info = SHMEM_I(inode); 14452c6efe9cSLuis Chamberlain struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 14466922c0c7SHugh Dickins swp_entry_t swap; 14476922c0c7SHugh Dickins pgoff_t index; 14481da177e4SLinus Torvalds 14491e6decf3SHugh Dickins /* 1450cf7992bfSLuis Chamberlain * Our capabilities prevent regular writeback or sync from ever calling 1451cf7992bfSLuis Chamberlain * shmem_writepage; but a stacking filesystem might use ->writepage of 1452cf7992bfSLuis Chamberlain * its underlying filesystem, in which case tmpfs should write out to 1453cf7992bfSLuis Chamberlain * swap only in response to memory pressure, and not for the writeback 1454cf7992bfSLuis Chamberlain * threads or sync. 1455cf7992bfSLuis Chamberlain */ 1456cf7992bfSLuis Chamberlain if (WARN_ON_ONCE(!wbc->for_reclaim)) 1457cf7992bfSLuis Chamberlain goto redirty; 1458cf7992bfSLuis Chamberlain 14592c6efe9cSLuis Chamberlain if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) 14609a976f0cSLuis Chamberlain goto redirty; 14619a976f0cSLuis Chamberlain 14629a976f0cSLuis Chamberlain if (!total_swap_pages) 14639a976f0cSLuis Chamberlain goto redirty; 14649a976f0cSLuis Chamberlain 1465cf7992bfSLuis Chamberlain /* 14661e6decf3SHugh Dickins * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or 14671e6decf3SHugh Dickins * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages, 14681e6decf3SHugh Dickins * and its shmem_writeback() needs them to be split when swapping. 14691e6decf3SHugh Dickins */ 1470f530ed0eSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 14711e6decf3SHugh Dickins /* Ensure the subpages are still dirty */ 1472f530ed0eSMatthew Wilcox (Oracle) folio_test_set_dirty(folio); 14731e6decf3SHugh Dickins if (split_huge_page(page) < 0) 14741e6decf3SHugh Dickins goto redirty; 1475f530ed0eSMatthew Wilcox (Oracle) folio = page_folio(page); 1476f530ed0eSMatthew Wilcox (Oracle) folio_clear_dirty(folio); 14771e6decf3SHugh Dickins } 14781e6decf3SHugh Dickins 1479f530ed0eSMatthew Wilcox (Oracle) index = folio->index; 14801635f6a7SHugh Dickins 14811635f6a7SHugh Dickins /* 14821635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 14831635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 1484f530ed0eSMatthew Wilcox (Oracle) * fallocated folio arriving here is now to initialize it and write it. 14851aac1400SHugh Dickins * 1486f530ed0eSMatthew Wilcox (Oracle) * That's okay for a folio already fallocated earlier, but if we have 14871aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 1488f530ed0eSMatthew Wilcox (Oracle) * of this folio in case we have to undo it, and (b) it may not be a 14891aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 1490f530ed0eSMatthew Wilcox (Oracle) * reactivate the folio, and let shmem_fallocate() quit when too many. 14911635f6a7SHugh Dickins */ 1492f530ed0eSMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 14931aac1400SHugh Dickins if (inode->i_private) { 14941aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 14951aac1400SHugh Dickins spin_lock(&inode->i_lock); 14961aac1400SHugh Dickins shmem_falloc = inode->i_private; 14971aac1400SHugh Dickins if (shmem_falloc && 14988e205f77SHugh Dickins !shmem_falloc->waitq && 14991aac1400SHugh Dickins index >= shmem_falloc->start && 15001aac1400SHugh Dickins index < shmem_falloc->next) 15011aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 15021aac1400SHugh Dickins else 15031aac1400SHugh Dickins shmem_falloc = NULL; 15041aac1400SHugh Dickins spin_unlock(&inode->i_lock); 15051aac1400SHugh Dickins if (shmem_falloc) 15061aac1400SHugh Dickins goto redirty; 15071aac1400SHugh Dickins } 1508f530ed0eSMatthew Wilcox (Oracle) folio_zero_range(folio, 0, folio_size(folio)); 1509f530ed0eSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 1510f530ed0eSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 15111635f6a7SHugh Dickins } 15121635f6a7SHugh Dickins 1513e2e3fdc7SMatthew Wilcox (Oracle) swap = folio_alloc_swap(folio); 151448f170fbSHugh Dickins if (!swap.val) 151548f170fbSHugh Dickins goto redirty; 1516d9fe526aSHugh Dickins 1517b1dea800SHugh Dickins /* 1518b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 1519f530ed0eSMatthew Wilcox (Oracle) * if it's not already there. Do it now before the folio is 15206922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1521b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 15226922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 15236922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1524b1dea800SHugh Dickins */ 1525b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 152605bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 1527b56a2d8aSVineeth Remanan Pillai list_add(&info->swaplist, &shmem_swaplist); 1528b1dea800SHugh Dickins 1529a4c366f0SMatthew Wilcox (Oracle) if (add_to_swap_cache(folio, swap, 15303852f676SJoonsoo Kim __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 15313852f676SJoonsoo Kim NULL) == 0) { 15323c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 1); 1533aaa46865SHugh Dickins swap_shmem_alloc(swap); 15344cd400fdSMatthew Wilcox (Oracle) shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); 15356922c0c7SHugh Dickins 15366922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1537f530ed0eSMatthew Wilcox (Oracle) BUG_ON(folio_mapped(folio)); 1538f530ed0eSMatthew Wilcox (Oracle) swap_writepage(&folio->page, wbc); 15391da177e4SLinus Torvalds return 0; 15401da177e4SLinus Torvalds } 15411da177e4SLinus Torvalds 15426922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 15434081f744SMatthew Wilcox (Oracle) put_swap_folio(folio, swap); 15441da177e4SLinus Torvalds redirty: 1545f530ed0eSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1546d9fe526aSHugh Dickins if (wbc->for_reclaim) 1547f530ed0eSMatthew Wilcox (Oracle) return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ 1548f530ed0eSMatthew Wilcox (Oracle) folio_unlock(folio); 1549d9fe526aSHugh Dickins return 0; 15501da177e4SLinus Torvalds } 15511da177e4SLinus Torvalds 155275edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 155371fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1554680d794bSakpm@linux-foundation.org { 1555680d794bSakpm@linux-foundation.org char buffer[64]; 1556680d794bSakpm@linux-foundation.org 155771fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1558095f1fc4SLee Schermerhorn return; /* show nothing */ 1559095f1fc4SLee Schermerhorn 1560a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1561095f1fc4SLee Schermerhorn 1562095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1563680d794bSakpm@linux-foundation.org } 156471fe804bSLee Schermerhorn 156571fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 156671fe804bSLee Schermerhorn { 156771fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 156871fe804bSLee Schermerhorn if (sbinfo->mpol) { 1569bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 157071fe804bSLee Schermerhorn mpol = sbinfo->mpol; 157171fe804bSLee Schermerhorn mpol_get(mpol); 1572bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 157371fe804bSLee Schermerhorn } 157471fe804bSLee Schermerhorn return mpol; 157571fe804bSLee Schermerhorn } 157675edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 157775edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 157875edd345SHugh Dickins { 157975edd345SHugh Dickins } 158075edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 158175edd345SHugh Dickins { 158275edd345SHugh Dickins return NULL; 158375edd345SHugh Dickins } 158475edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 158575edd345SHugh Dickins #ifndef CONFIG_NUMA 158675edd345SHugh Dickins #define vm_policy vm_private_data 158775edd345SHugh Dickins #endif 1588680d794bSakpm@linux-foundation.org 1589800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1590800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1591800d8c63SKirill A. Shutemov { 1592800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 15932c4541e2SKirill A. Shutemov vma_init(vma, NULL); 1594800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1595800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1596800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1597800d8c63SKirill A. Shutemov } 1598800d8c63SKirill A. Shutemov 1599800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1600800d8c63SKirill A. Shutemov { 1601800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1602800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1603800d8c63SKirill A. Shutemov } 1604800d8c63SKirill A. Shutemov 16055739a81cSMatthew Wilcox (Oracle) static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp, 160641ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 16071da177e4SLinus Torvalds { 16081da177e4SLinus Torvalds struct vm_area_struct pvma; 160918a2f371SMel Gorman struct page *page; 16108c63ca5bSWill Deacon struct vm_fault vmf = { 16118c63ca5bSWill Deacon .vma = &pvma, 16128c63ca5bSWill Deacon }; 16131da177e4SLinus Torvalds 1614800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1615e9e9b7ecSMinchan Kim page = swap_cluster_readahead(swap, gfp, &vmf); 1616800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 161718a2f371SMel Gorman 16185739a81cSMatthew Wilcox (Oracle) if (!page) 16195739a81cSMatthew Wilcox (Oracle) return NULL; 16205739a81cSMatthew Wilcox (Oracle) return page_folio(page); 1621800d8c63SKirill A. Shutemov } 162218a2f371SMel Gorman 162378cc8cdcSRik van Riel /* 162478cc8cdcSRik van Riel * Make sure huge_gfp is always more limited than limit_gfp. 162578cc8cdcSRik van Riel * Some of the flags set permissions, while others set limitations. 162678cc8cdcSRik van Riel */ 162778cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) 162878cc8cdcSRik van Riel { 162978cc8cdcSRik van Riel gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 163078cc8cdcSRik van Riel gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; 1631187df5ddSRik van Riel gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; 1632187df5ddSRik van Riel gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); 1633187df5ddSRik van Riel 1634187df5ddSRik van Riel /* Allow allocations only from the originally specified zones. */ 1635187df5ddSRik van Riel result |= zoneflags; 163678cc8cdcSRik van Riel 163778cc8cdcSRik van Riel /* 163878cc8cdcSRik van Riel * Minimize the result gfp by taking the union with the deny flags, 163978cc8cdcSRik van Riel * and the intersection of the allow flags. 164078cc8cdcSRik van Riel */ 164178cc8cdcSRik van Riel result |= (limit_gfp & denyflags); 164278cc8cdcSRik van Riel result |= (huge_gfp & limit_gfp) & allowflags; 164378cc8cdcSRik van Riel 164478cc8cdcSRik van Riel return result; 164578cc8cdcSRik van Riel } 164678cc8cdcSRik van Riel 164772827e5cSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_hugefolio(gfp_t gfp, 1648800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1649800d8c63SKirill A. Shutemov { 1650800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 16517b8d046fSMatthew Wilcox struct address_space *mapping = info->vfs_inode.i_mapping; 16527b8d046fSMatthew Wilcox pgoff_t hindex; 1653dfe98499SMatthew Wilcox (Oracle) struct folio *folio; 1654800d8c63SKirill A. Shutemov 16554620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 16567b8d046fSMatthew Wilcox if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 16577b8d046fSMatthew Wilcox XA_PRESENT)) 1658800d8c63SKirill A. Shutemov return NULL; 1659800d8c63SKirill A. Shutemov 1660800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1661dfe98499SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); 1662800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1663dfe98499SMatthew Wilcox (Oracle) if (!folio) 1664dcdf11eeSDavid Rientjes count_vm_event(THP_FILE_FALLBACK); 166572827e5cSMatthew Wilcox (Oracle) return folio; 166618a2f371SMel Gorman } 166718a2f371SMel Gorman 16680c023ef5SMatthew Wilcox (Oracle) static struct folio *shmem_alloc_folio(gfp_t gfp, 166918a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 167018a2f371SMel Gorman { 167118a2f371SMel Gorman struct vm_area_struct pvma; 16720c023ef5SMatthew Wilcox (Oracle) struct folio *folio; 167318a2f371SMel Gorman 1674800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 16750c023ef5SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, 0, &pvma, 0, false); 1676800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 167718a2f371SMel Gorman 16780c023ef5SMatthew Wilcox (Oracle) return folio; 167918a2f371SMel Gorman } 168018a2f371SMel Gorman 1681b1d0ec3aSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, 1682800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1683800d8c63SKirill A. Shutemov { 16840f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 168572827e5cSMatthew Wilcox (Oracle) struct folio *folio; 1686800d8c63SKirill A. Shutemov int nr; 1687c7e263abSLukas Czerner int err; 1688800d8c63SKirill A. Shutemov 1689396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1690800d8c63SKirill A. Shutemov huge = false; 1691800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1692800d8c63SKirill A. Shutemov 1693c7e263abSLukas Czerner err = shmem_inode_acct_block(inode, nr); 1694c7e263abSLukas Czerner if (err) 1695800d8c63SKirill A. Shutemov goto failed; 1696800d8c63SKirill A. Shutemov 1697800d8c63SKirill A. Shutemov if (huge) 169872827e5cSMatthew Wilcox (Oracle) folio = shmem_alloc_hugefolio(gfp, info, index); 1699800d8c63SKirill A. Shutemov else 170072827e5cSMatthew Wilcox (Oracle) folio = shmem_alloc_folio(gfp, info, index); 170172827e5cSMatthew Wilcox (Oracle) if (folio) { 170272827e5cSMatthew Wilcox (Oracle) __folio_set_locked(folio); 170372827e5cSMatthew Wilcox (Oracle) __folio_set_swapbacked(folio); 1704b1d0ec3aSMatthew Wilcox (Oracle) return folio; 170575edd345SHugh Dickins } 170618a2f371SMel Gorman 1707800d8c63SKirill A. Shutemov err = -ENOMEM; 17080f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr); 1709800d8c63SKirill A. Shutemov failed: 1710800d8c63SKirill A. Shutemov return ERR_PTR(err); 17111da177e4SLinus Torvalds } 171271fe804bSLee Schermerhorn 17131da177e4SLinus Torvalds /* 1714bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1715fc26babbSMatthew Wilcox (Oracle) * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of 1716bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1717bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1718bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1719bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1720bde05d1cSHugh Dickins * 1721bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1722bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1723bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1724bde05d1cSHugh Dickins */ 1725069d849cSMatthew Wilcox (Oracle) static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) 1726bde05d1cSHugh Dickins { 1727069d849cSMatthew Wilcox (Oracle) return folio_zonenum(folio) > gfp_zone(gfp); 1728bde05d1cSHugh Dickins } 1729bde05d1cSHugh Dickins 17300d698e25SMatthew Wilcox (Oracle) static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, 1731bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1732bde05d1cSHugh Dickins { 1733d21bba2bSMatthew Wilcox (Oracle) struct folio *old, *new; 1734bde05d1cSHugh Dickins struct address_space *swap_mapping; 1735c1cb20d4SYu Zhao swp_entry_t entry; 1736bde05d1cSHugh Dickins pgoff_t swap_index; 1737bde05d1cSHugh Dickins int error; 1738bde05d1cSHugh Dickins 17390d698e25SMatthew Wilcox (Oracle) old = *foliop; 17403d2c9087SDavid Hildenbrand entry = old->swap; 1741c1cb20d4SYu Zhao swap_index = swp_offset(entry); 1742907ea17eSMatthew Wilcox (Oracle) swap_mapping = swap_address_space(entry); 1743bde05d1cSHugh Dickins 1744bde05d1cSHugh Dickins /* 1745bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1746bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1747bde05d1cSHugh Dickins */ 1748bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1749907ea17eSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(old), old); 1750907ea17eSMatthew Wilcox (Oracle) new = shmem_alloc_folio(gfp, info, index); 1751907ea17eSMatthew Wilcox (Oracle) if (!new) 1752bde05d1cSHugh Dickins return -ENOMEM; 1753bde05d1cSHugh Dickins 1754907ea17eSMatthew Wilcox (Oracle) folio_get(new); 1755907ea17eSMatthew Wilcox (Oracle) folio_copy(new, old); 1756907ea17eSMatthew Wilcox (Oracle) flush_dcache_folio(new); 1757bde05d1cSHugh Dickins 1758907ea17eSMatthew Wilcox (Oracle) __folio_set_locked(new); 1759907ea17eSMatthew Wilcox (Oracle) __folio_set_swapbacked(new); 1760907ea17eSMatthew Wilcox (Oracle) folio_mark_uptodate(new); 17613d2c9087SDavid Hildenbrand new->swap = entry; 1762907ea17eSMatthew Wilcox (Oracle) folio_set_swapcache(new); 1763bde05d1cSHugh Dickins 1764bde05d1cSHugh Dickins /* 1765bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1766bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1767bde05d1cSHugh Dickins */ 1768b93b0163SMatthew Wilcox xa_lock_irq(&swap_mapping->i_pages); 1769907ea17eSMatthew Wilcox (Oracle) error = shmem_replace_entry(swap_mapping, swap_index, old, new); 17700142ef6cSHugh Dickins if (!error) { 1771d21bba2bSMatthew Wilcox (Oracle) mem_cgroup_migrate(old, new); 1772907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); 1773907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new, NR_SHMEM, 1); 1774907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); 1775907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(old, NR_SHMEM, -1); 17760142ef6cSHugh Dickins } 1777b93b0163SMatthew Wilcox xa_unlock_irq(&swap_mapping->i_pages); 1778bde05d1cSHugh Dickins 17790142ef6cSHugh Dickins if (unlikely(error)) { 17800142ef6cSHugh Dickins /* 17810142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 17820142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 17830142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 17840142ef6cSHugh Dickins */ 1785907ea17eSMatthew Wilcox (Oracle) old = new; 17860142ef6cSHugh Dickins } else { 1787907ea17eSMatthew Wilcox (Oracle) folio_add_lru(new); 17880d698e25SMatthew Wilcox (Oracle) *foliop = new; 17890142ef6cSHugh Dickins } 1790bde05d1cSHugh Dickins 1791907ea17eSMatthew Wilcox (Oracle) folio_clear_swapcache(old); 1792907ea17eSMatthew Wilcox (Oracle) old->private = NULL; 1793bde05d1cSHugh Dickins 1794907ea17eSMatthew Wilcox (Oracle) folio_unlock(old); 1795907ea17eSMatthew Wilcox (Oracle) folio_put_refs(old, 2); 17960142ef6cSHugh Dickins return error; 1797bde05d1cSHugh Dickins } 1798bde05d1cSHugh Dickins 17996cec2b95SMiaohe Lin static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, 18006cec2b95SMiaohe Lin struct folio *folio, swp_entry_t swap) 18016cec2b95SMiaohe Lin { 18026cec2b95SMiaohe Lin struct address_space *mapping = inode->i_mapping; 18036cec2b95SMiaohe Lin swp_entry_t swapin_error; 18046cec2b95SMiaohe Lin void *old; 18056cec2b95SMiaohe Lin 1806af19487fSAxel Rasmussen swapin_error = make_poisoned_swp_entry(); 18076cec2b95SMiaohe Lin old = xa_cmpxchg_irq(&mapping->i_pages, index, 18086cec2b95SMiaohe Lin swp_to_radix_entry(swap), 18096cec2b95SMiaohe Lin swp_to_radix_entry(swapin_error), 0); 18106cec2b95SMiaohe Lin if (old != swp_to_radix_entry(swap)) 18116cec2b95SMiaohe Lin return; 18126cec2b95SMiaohe Lin 18136cec2b95SMiaohe Lin folio_wait_writeback(folio); 181475fa68a5SMatthew Wilcox (Oracle) delete_from_swap_cache(folio); 18156cec2b95SMiaohe Lin /* 18163c1b7528SHugh Dickins * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks 18173c1b7528SHugh Dickins * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) 18183c1b7528SHugh Dickins * in shmem_evict_inode(). 18196cec2b95SMiaohe Lin */ 18203c1b7528SHugh Dickins shmem_recalc_inode(inode, -1, -1); 18216cec2b95SMiaohe Lin swap_free(swap); 18226cec2b95SMiaohe Lin } 18236cec2b95SMiaohe Lin 1824bde05d1cSHugh Dickins /* 1825833de10fSMiaohe Lin * Swap in the folio pointed to by *foliop. 1826833de10fSMiaohe Lin * Caller has to make sure that *foliop contains a valid swapped folio. 1827833de10fSMiaohe Lin * Returns 0 and the folio in foliop if success. On failure, returns the 1828833de10fSMiaohe Lin * error code and NULL in *foliop. 18291da177e4SLinus Torvalds */ 1830da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 1831da08e9b7SMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, 1832c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 18332b740303SSouptick Joarder vm_fault_t *fault_type) 18341da177e4SLinus Torvalds { 18351da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 183623f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 183704f94e3fSDan Schatzberg struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; 1838cbc2bd98SKairui Song struct swap_info_struct *si; 1839da08e9b7SMatthew Wilcox (Oracle) struct folio *folio = NULL; 18401da177e4SLinus Torvalds swp_entry_t swap; 18411da177e4SLinus Torvalds int error; 18421da177e4SLinus Torvalds 1843da08e9b7SMatthew Wilcox (Oracle) VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); 1844da08e9b7SMatthew Wilcox (Oracle) swap = radix_to_swp_entry(*foliop); 1845da08e9b7SMatthew Wilcox (Oracle) *foliop = NULL; 184654af6042SHugh Dickins 1847af19487fSAxel Rasmussen if (is_poisoned_swp_entry(swap)) 18486cec2b95SMiaohe Lin return -EIO; 18496cec2b95SMiaohe Lin 1850cbc2bd98SKairui Song si = get_swap_device(swap); 1851cbc2bd98SKairui Song if (!si) { 1852cbc2bd98SKairui Song if (!shmem_confirm_swap(mapping, index, swap)) 1853cbc2bd98SKairui Song return -EEXIST; 1854cbc2bd98SKairui Song else 1855cbc2bd98SKairui Song return -EINVAL; 1856cbc2bd98SKairui Song } 1857cbc2bd98SKairui Song 18581da177e4SLinus Torvalds /* Look it up and read it in.. */ 18595739a81cSMatthew Wilcox (Oracle) folio = swap_cache_get_folio(swap, NULL, 0); 18605739a81cSMatthew Wilcox (Oracle) if (!folio) { 18619e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 18629e18eb29SAndres Lagar-Cavilla if (fault_type) { 186368da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 18649e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 18652262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT); 18669e18eb29SAndres Lagar-Cavilla } 18679e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 18685739a81cSMatthew Wilcox (Oracle) folio = shmem_swapin(swap, gfp, info, index); 18695739a81cSMatthew Wilcox (Oracle) if (!folio) { 18701da177e4SLinus Torvalds error = -ENOMEM; 187154af6042SHugh Dickins goto failed; 1872285b2c4fSHugh Dickins } 18731da177e4SLinus Torvalds } 18741da177e4SLinus Torvalds 1875833de10fSMiaohe Lin /* We have to do this with folio locked to prevent races */ 1876da08e9b7SMatthew Wilcox (Oracle) folio_lock(folio); 1877da08e9b7SMatthew Wilcox (Oracle) if (!folio_test_swapcache(folio) || 18783d2c9087SDavid Hildenbrand folio->swap.val != swap.val || 1879d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1880c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1881d1899228SHugh Dickins goto unlock; 1882bde05d1cSHugh Dickins } 1883da08e9b7SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 18841da177e4SLinus Torvalds error = -EIO; 188554af6042SHugh Dickins goto failed; 188654af6042SHugh Dickins } 1887da08e9b7SMatthew Wilcox (Oracle) folio_wait_writeback(folio); 188854af6042SHugh Dickins 18898a84802eSSteven Price /* 18908a84802eSSteven Price * Some architectures may have to restore extra metadata to the 1891da08e9b7SMatthew Wilcox (Oracle) * folio after reading from swap. 18928a84802eSSteven Price */ 1893da08e9b7SMatthew Wilcox (Oracle) arch_swap_restore(swap, folio); 18948a84802eSSteven Price 1895069d849cSMatthew Wilcox (Oracle) if (shmem_should_replace_folio(folio, gfp)) { 18960d698e25SMatthew Wilcox (Oracle) error = shmem_replace_folio(&folio, gfp, info, index); 1897bde05d1cSHugh Dickins if (error) 189854af6042SHugh Dickins goto failed; 18991da177e4SLinus Torvalds } 19001da177e4SLinus Torvalds 1901b7dd44a1SMatthew Wilcox (Oracle) error = shmem_add_to_page_cache(folio, mapping, index, 19023fea5a49SJohannes Weiner swp_to_radix_entry(swap), gfp, 19033fea5a49SJohannes Weiner charge_mm); 190454af6042SHugh Dickins if (error) 190554af6042SHugh Dickins goto failed; 190654af6042SHugh Dickins 19073c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, -1); 190827ab7006SHugh Dickins 190966d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1910da08e9b7SMatthew Wilcox (Oracle) folio_mark_accessed(folio); 191166d2f4d2SHugh Dickins 191275fa68a5SMatthew Wilcox (Oracle) delete_from_swap_cache(folio); 1913da08e9b7SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 191427ab7006SHugh Dickins swap_free(swap); 1915cbc2bd98SKairui Song put_swap_device(si); 191627ab7006SHugh Dickins 1917da08e9b7SMatthew Wilcox (Oracle) *foliop = folio; 1918c5bf121eSVineeth Remanan Pillai return 0; 1919c5bf121eSVineeth Remanan Pillai failed: 1920c5bf121eSVineeth Remanan Pillai if (!shmem_confirm_swap(mapping, index, swap)) 1921c5bf121eSVineeth Remanan Pillai error = -EEXIST; 19226cec2b95SMiaohe Lin if (error == -EIO) 19236cec2b95SMiaohe Lin shmem_set_folio_swapin_error(inode, index, folio, swap); 1924c5bf121eSVineeth Remanan Pillai unlock: 1925da08e9b7SMatthew Wilcox (Oracle) if (folio) { 1926da08e9b7SMatthew Wilcox (Oracle) folio_unlock(folio); 1927da08e9b7SMatthew Wilcox (Oracle) folio_put(folio); 1928c5bf121eSVineeth Remanan Pillai } 1929cbc2bd98SKairui Song put_swap_device(si); 1930c5bf121eSVineeth Remanan Pillai 1931c5bf121eSVineeth Remanan Pillai return error; 1932c5bf121eSVineeth Remanan Pillai } 1933c5bf121eSVineeth Remanan Pillai 1934c5bf121eSVineeth Remanan Pillai /* 1935fc26babbSMatthew Wilcox (Oracle) * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate 1936c5bf121eSVineeth Remanan Pillai * 1937c5bf121eSVineeth Remanan Pillai * If we allocate a new one we do not mark it dirty. That's up to the 1938c5bf121eSVineeth Remanan Pillai * vm. If we swap it in we mark it dirty since we also free the swap 1939c5bf121eSVineeth Remanan Pillai * entry since a page cannot live in both the swap and page cache. 1940c5bf121eSVineeth Remanan Pillai * 1941c949b097SAxel Rasmussen * vma, vmf, and fault_type are only supplied by shmem_fault: 1942c5bf121eSVineeth Remanan Pillai * otherwise they are NULL. 1943c5bf121eSVineeth Remanan Pillai */ 1944fc26babbSMatthew Wilcox (Oracle) static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, 1945fc26babbSMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, gfp_t gfp, 1946c5bf121eSVineeth Remanan Pillai struct vm_area_struct *vma, struct vm_fault *vmf, 1947c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type) 1948c5bf121eSVineeth Remanan Pillai { 1949c5bf121eSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1950c5bf121eSVineeth Remanan Pillai struct shmem_inode_info *info = SHMEM_I(inode); 1951c5bf121eSVineeth Remanan Pillai struct shmem_sb_info *sbinfo; 1952c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm; 1953b7dd44a1SMatthew Wilcox (Oracle) struct folio *folio; 19546fe7d712SLukas Bulwahn pgoff_t hindex; 1955164cc4feSRik van Riel gfp_t huge_gfp; 1956c5bf121eSVineeth Remanan Pillai int error; 1957c5bf121eSVineeth Remanan Pillai int once = 0; 1958c5bf121eSVineeth Remanan Pillai int alloced = 0; 1959c5bf121eSVineeth Remanan Pillai 1960c5bf121eSVineeth Remanan Pillai if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 1961c5bf121eSVineeth Remanan Pillai return -EFBIG; 1962c5bf121eSVineeth Remanan Pillai repeat: 1963c5bf121eSVineeth Remanan Pillai if (sgp <= SGP_CACHE && 1964c5bf121eSVineeth Remanan Pillai ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1965c5bf121eSVineeth Remanan Pillai return -EINVAL; 1966c5bf121eSVineeth Remanan Pillai } 1967c5bf121eSVineeth Remanan Pillai 1968c5bf121eSVineeth Remanan Pillai sbinfo = SHMEM_SB(inode->i_sb); 196904f94e3fSDan Schatzberg charge_mm = vma ? vma->vm_mm : NULL; 1970c5bf121eSVineeth Remanan Pillai 1971aaeb94ebSChristoph Hellwig folio = filemap_get_entry(mapping, index); 1972b1d0ec3aSMatthew Wilcox (Oracle) if (folio && vma && userfaultfd_minor(vma)) { 1973aaeb94ebSChristoph Hellwig if (!xa_is_value(folio)) 1974b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 1975c949b097SAxel Rasmussen *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 1976c949b097SAxel Rasmussen return 0; 1977c949b097SAxel Rasmussen } 1978c949b097SAxel Rasmussen 1979b1d0ec3aSMatthew Wilcox (Oracle) if (xa_is_value(folio)) { 1980da08e9b7SMatthew Wilcox (Oracle) error = shmem_swapin_folio(inode, index, &folio, 1981c5bf121eSVineeth Remanan Pillai sgp, gfp, vma, fault_type); 1982c5bf121eSVineeth Remanan Pillai if (error == -EEXIST) 1983c5bf121eSVineeth Remanan Pillai goto repeat; 1984c5bf121eSVineeth Remanan Pillai 1985fc26babbSMatthew Wilcox (Oracle) *foliop = folio; 1986c5bf121eSVineeth Remanan Pillai return error; 1987c5bf121eSVineeth Remanan Pillai } 1988c5bf121eSVineeth Remanan Pillai 1989b1d0ec3aSMatthew Wilcox (Oracle) if (folio) { 1990aaeb94ebSChristoph Hellwig folio_lock(folio); 1991aaeb94ebSChristoph Hellwig 1992aaeb94ebSChristoph Hellwig /* Has the folio been truncated or swapped out? */ 1993aaeb94ebSChristoph Hellwig if (unlikely(folio->mapping != mapping)) { 1994aaeb94ebSChristoph Hellwig folio_unlock(folio); 1995aaeb94ebSChristoph Hellwig folio_put(folio); 1996aaeb94ebSChristoph Hellwig goto repeat; 1997aaeb94ebSChristoph Hellwig } 1998acdd9f8eSHugh Dickins if (sgp == SGP_WRITE) 1999b1d0ec3aSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 2000b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 2001acdd9f8eSHugh Dickins goto out; 2002fc26babbSMatthew Wilcox (Oracle) /* fallocated folio */ 2003c5bf121eSVineeth Remanan Pillai if (sgp != SGP_READ) 2004c5bf121eSVineeth Remanan Pillai goto clear; 2005b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 2006b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 2007c5bf121eSVineeth Remanan Pillai } 2008c5bf121eSVineeth Remanan Pillai 2009c5bf121eSVineeth Remanan Pillai /* 2010fc26babbSMatthew Wilcox (Oracle) * SGP_READ: succeed on hole, with NULL folio, letting caller zero. 2011fc26babbSMatthew Wilcox (Oracle) * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail. 2012acdd9f8eSHugh Dickins */ 2013fc26babbSMatthew Wilcox (Oracle) *foliop = NULL; 2014acdd9f8eSHugh Dickins if (sgp == SGP_READ) 2015acdd9f8eSHugh Dickins return 0; 2016acdd9f8eSHugh Dickins if (sgp == SGP_NOALLOC) 2017acdd9f8eSHugh Dickins return -ENOENT; 2018acdd9f8eSHugh Dickins 2019acdd9f8eSHugh Dickins /* 2020acdd9f8eSHugh Dickins * Fast cache lookup and swap lookup did not find it: allocate. 2021c5bf121eSVineeth Remanan Pillai */ 2022c5bf121eSVineeth Remanan Pillai 2023cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) { 2024cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 2025cfda0526SMike Rapoport return 0; 2026cfda0526SMike Rapoport } 2027cfda0526SMike Rapoport 20282cf13384SDavid Stevens if (!shmem_is_huge(inode, index, false, 20292cf13384SDavid Stevens vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0)) 2030800d8c63SKirill A. Shutemov goto alloc_nohuge; 203127d80fa2SKees Cook 2032164cc4feSRik van Riel huge_gfp = vma_thp_gfp_mask(vma); 203378cc8cdcSRik van Riel huge_gfp = limit_gfp_mask(huge_gfp, gfp); 2034b1d0ec3aSMatthew Wilcox (Oracle) folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true); 2035b1d0ec3aSMatthew Wilcox (Oracle) if (IS_ERR(folio)) { 2036c5bf121eSVineeth Remanan Pillai alloc_nohuge: 2037b1d0ec3aSMatthew Wilcox (Oracle) folio = shmem_alloc_and_acct_folio(gfp, inode, index, false); 203854af6042SHugh Dickins } 2039b1d0ec3aSMatthew Wilcox (Oracle) if (IS_ERR(folio)) { 2040779750d2SKirill A. Shutemov int retry = 5; 2041c5bf121eSVineeth Remanan Pillai 2042b1d0ec3aSMatthew Wilcox (Oracle) error = PTR_ERR(folio); 2043b1d0ec3aSMatthew Wilcox (Oracle) folio = NULL; 2044779750d2SKirill A. Shutemov if (error != -ENOSPC) 2045c5bf121eSVineeth Remanan Pillai goto unlock; 2046779750d2SKirill A. Shutemov /* 2047fc26babbSMatthew Wilcox (Oracle) * Try to reclaim some space by splitting a large folio 2048779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 2049779750d2SKirill A. Shutemov */ 2050779750d2SKirill A. Shutemov while (retry--) { 2051779750d2SKirill A. Shutemov int ret; 2052c5bf121eSVineeth Remanan Pillai 2053779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 2054779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 2055779750d2SKirill A. Shutemov break; 2056779750d2SKirill A. Shutemov if (ret) 2057779750d2SKirill A. Shutemov goto alloc_nohuge; 2058779750d2SKirill A. Shutemov } 2059c5bf121eSVineeth Remanan Pillai goto unlock; 2060800d8c63SKirill A. Shutemov } 2061800d8c63SKirill A. Shutemov 2062b1d0ec3aSMatthew Wilcox (Oracle) hindex = round_down(index, folio_nr_pages(folio)); 2063800d8c63SKirill A. Shutemov 206466d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 2065b1d0ec3aSMatthew Wilcox (Oracle) __folio_set_referenced(folio); 206666d2f4d2SHugh Dickins 2067b7dd44a1SMatthew Wilcox (Oracle) error = shmem_add_to_page_cache(folio, mapping, hindex, 20683fea5a49SJohannes Weiner NULL, gfp & GFP_RECLAIM_MASK, 20693fea5a49SJohannes Weiner charge_mm); 20703fea5a49SJohannes Weiner if (error) 2071800d8c63SKirill A. Shutemov goto unacct; 207254af6042SHugh Dickins 20733c1b7528SHugh Dickins folio_add_lru(folio); 20743c1b7528SHugh Dickins shmem_recalc_inode(inode, folio_nr_pages(folio), 0); 20751635f6a7SHugh Dickins alloced = true; 207654af6042SHugh Dickins 2077b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio) && 2078779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 2079fc26babbSMatthew Wilcox (Oracle) folio_next_index(folio) - 1) { 2080779750d2SKirill A. Shutemov /* 2081fc26babbSMatthew Wilcox (Oracle) * Part of the large folio is beyond i_size: subject 2082779750d2SKirill A. Shutemov * to shrink under memory pressure. 2083779750d2SKirill A. Shutemov */ 2084779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 2085d041353dSCong Wang /* 2086d041353dSCong Wang * _careful to defend against unlocked access to 2087d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 2088d041353dSCong Wang */ 2089d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 2090779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 2091779750d2SKirill A. Shutemov &sbinfo->shrinklist); 2092779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 2093779750d2SKirill A. Shutemov } 2094779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 2095779750d2SKirill A. Shutemov } 2096779750d2SKirill A. Shutemov 2097ec9516fbSHugh Dickins /* 2098fc26babbSMatthew Wilcox (Oracle) * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. 20991635f6a7SHugh Dickins */ 21001635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 21011635f6a7SHugh Dickins sgp = SGP_WRITE; 21021635f6a7SHugh Dickins clear: 21031635f6a7SHugh Dickins /* 2104fc26babbSMatthew Wilcox (Oracle) * Let SGP_WRITE caller clear ends if write does not fill folio; 2105fc26babbSMatthew Wilcox (Oracle) * but SGP_FALLOC on a folio fallocated earlier must initialize 21061635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 2107ec9516fbSHugh Dickins */ 2108b1d0ec3aSMatthew Wilcox (Oracle) if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) { 2109b1d0ec3aSMatthew Wilcox (Oracle) long i, n = folio_nr_pages(folio); 2110800d8c63SKirill A. Shutemov 2111b1d0ec3aSMatthew Wilcox (Oracle) for (i = 0; i < n; i++) 2112b1d0ec3aSMatthew Wilcox (Oracle) clear_highpage(folio_page(folio, i)); 2113b1d0ec3aSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 2114b1d0ec3aSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 2115ec9516fbSHugh Dickins } 2116bde05d1cSHugh Dickins 211754af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 211875edd345SHugh Dickins if (sgp <= SGP_CACHE && 211909cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2120267a4c76SHugh Dickins if (alloced) { 2121b1d0ec3aSMatthew Wilcox (Oracle) folio_clear_dirty(folio); 2122b1d0ec3aSMatthew Wilcox (Oracle) filemap_remove_folio(folio); 21233c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 0); 2124267a4c76SHugh Dickins } 212554af6042SHugh Dickins error = -EINVAL; 2126267a4c76SHugh Dickins goto unlock; 2127ff36b801SShaohua Li } 212863ec1973SMatthew Wilcox (Oracle) out: 2129fc26babbSMatthew Wilcox (Oracle) *foliop = folio; 213054af6042SHugh Dickins return 0; 2131d00806b1SNick Piggin 2132d0217ac0SNick Piggin /* 213354af6042SHugh Dickins * Error recovery. 21341da177e4SLinus Torvalds */ 213554af6042SHugh Dickins unacct: 2136b1d0ec3aSMatthew Wilcox (Oracle) shmem_inode_unacct_blocks(inode, folio_nr_pages(folio)); 2137800d8c63SKirill A. Shutemov 2138b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 2139b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 2140b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 2141800d8c63SKirill A. Shutemov goto alloc_nohuge; 2142800d8c63SKirill A. Shutemov } 2143d1899228SHugh Dickins unlock: 2144b1d0ec3aSMatthew Wilcox (Oracle) if (folio) { 2145b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio); 2146b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio); 214754af6042SHugh Dickins } 214854af6042SHugh Dickins if (error == -ENOSPC && !once++) { 21493c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 0); 21501da177e4SLinus Torvalds goto repeat; 2151d8dc74f2SAdrian Bunk } 21527f4446eeSMatthew Wilcox if (error == -EEXIST) 215354af6042SHugh Dickins goto repeat; 215454af6042SHugh Dickins return error; 21551da177e4SLinus Torvalds } 21561da177e4SLinus Torvalds 21574e1fc793SMatthew Wilcox (Oracle) int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, 21584e1fc793SMatthew Wilcox (Oracle) enum sgp_type sgp) 21594e1fc793SMatthew Wilcox (Oracle) { 21604e1fc793SMatthew Wilcox (Oracle) return shmem_get_folio_gfp(inode, index, foliop, sgp, 21614e1fc793SMatthew Wilcox (Oracle) mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 21624e1fc793SMatthew Wilcox (Oracle) } 21634e1fc793SMatthew Wilcox (Oracle) 216410d20bd2SLinus Torvalds /* 216510d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 216610d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 216710d20bd2SLinus Torvalds * target. 216810d20bd2SLinus Torvalds */ 2169ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 217010d20bd2SLinus Torvalds { 217110d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 21722055da97SIngo Molnar list_del_init(&wait->entry); 217310d20bd2SLinus Torvalds return ret; 217410d20bd2SLinus Torvalds } 217510d20bd2SLinus Torvalds 217620acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf) 21771da177e4SLinus Torvalds { 217811bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 2179496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 21809e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 218168a54100SMatthew Wilcox (Oracle) struct folio *folio = NULL; 218220acce67SSouptick Joarder int err; 218320acce67SSouptick Joarder vm_fault_t ret = VM_FAULT_LOCKED; 21841da177e4SLinus Torvalds 2185f00cdc6dSHugh Dickins /* 2186f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 2187f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 21889608703eSJan Kara * locks writers out with its hold on i_rwsem. So refrain from 21898e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 21908e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 21918e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 21928e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 21938e205f77SHugh Dickins * 21948e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 21958e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 21968e205f77SHugh Dickins * we just need to make racing faults a rare case. 21978e205f77SHugh Dickins * 21988e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 21999608703eSJan Kara * standard mutex or completion: but we cannot take i_rwsem in fault, 22008e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 2201f00cdc6dSHugh Dickins */ 2202f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 2203f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 2204f00cdc6dSHugh Dickins 2205f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2206f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 22078e205f77SHugh Dickins if (shmem_falloc && 22088e205f77SHugh Dickins shmem_falloc->waitq && 22098e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 22108e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 22118897c1b1SKirill A. Shutemov struct file *fpin; 22128e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 221310d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 22148e205f77SHugh Dickins 22158e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 22168897c1b1SKirill A. Shutemov fpin = maybe_unlock_mmap_for_io(vmf, NULL); 22178897c1b1SKirill A. Shutemov if (fpin) 22188e205f77SHugh Dickins ret = VM_FAULT_RETRY; 22198e205f77SHugh Dickins 22208e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 22218e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 22228e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 22238e205f77SHugh Dickins spin_unlock(&inode->i_lock); 22248e205f77SHugh Dickins schedule(); 22258e205f77SHugh Dickins 22268e205f77SHugh Dickins /* 22278e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 22288e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 22298e205f77SHugh Dickins * is usually invalid by the time we reach here, but 22308e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 22318e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 22328e205f77SHugh Dickins */ 22338e205f77SHugh Dickins spin_lock(&inode->i_lock); 22348e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 22358e205f77SHugh Dickins spin_unlock(&inode->i_lock); 22368897c1b1SKirill A. Shutemov 22378897c1b1SKirill A. Shutemov if (fpin) 22388897c1b1SKirill A. Shutemov fput(fpin); 22398e205f77SHugh Dickins return ret; 2240f00cdc6dSHugh Dickins } 22418e205f77SHugh Dickins spin_unlock(&inode->i_lock); 2242f00cdc6dSHugh Dickins } 2243f00cdc6dSHugh Dickins 224468a54100SMatthew Wilcox (Oracle) err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, 2245cfda0526SMike Rapoport gfp, vma, vmf, &ret); 224620acce67SSouptick Joarder if (err) 224720acce67SSouptick Joarder return vmf_error(err); 224868a54100SMatthew Wilcox (Oracle) if (folio) 224968a54100SMatthew Wilcox (Oracle) vmf->page = folio_file_page(folio, vmf->pgoff); 225068da9f05SHugh Dickins return ret; 22511da177e4SLinus Torvalds } 22521da177e4SLinus Torvalds 2253c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 2254c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 2255c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 2256c01d5b30SHugh Dickins { 2257c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 2258c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 2259c01d5b30SHugh Dickins unsigned long addr; 2260c01d5b30SHugh Dickins unsigned long offset; 2261c01d5b30SHugh Dickins unsigned long inflated_len; 2262c01d5b30SHugh Dickins unsigned long inflated_addr; 2263c01d5b30SHugh Dickins unsigned long inflated_offset; 2264c01d5b30SHugh Dickins 2265c01d5b30SHugh Dickins if (len > TASK_SIZE) 2266c01d5b30SHugh Dickins return -ENOMEM; 2267c01d5b30SHugh Dickins 2268c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 2269c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 2270c01d5b30SHugh Dickins 2271396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2272c01d5b30SHugh Dickins return addr; 2273c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 2274c01d5b30SHugh Dickins return addr; 2275c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 2276c01d5b30SHugh Dickins return addr; 2277c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2278c01d5b30SHugh Dickins return addr; 2279c01d5b30SHugh Dickins 2280c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2281c01d5b30SHugh Dickins return addr; 2282c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2283c01d5b30SHugh Dickins return addr; 2284c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2285c01d5b30SHugh Dickins return addr; 2286c01d5b30SHugh Dickins /* 2287c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2288c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 228999158997SKirill A. Shutemov * But if caller specified an address hint and we allocated area there 229099158997SKirill A. Shutemov * successfully, respect that as before. 2291c01d5b30SHugh Dickins */ 229299158997SKirill A. Shutemov if (uaddr == addr) 2293c01d5b30SHugh Dickins return addr; 2294c01d5b30SHugh Dickins 2295c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2296c01d5b30SHugh Dickins struct super_block *sb; 2297c01d5b30SHugh Dickins 2298c01d5b30SHugh Dickins if (file) { 2299c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2300c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2301c01d5b30SHugh Dickins } else { 2302c01d5b30SHugh Dickins /* 2303c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2304c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2305c01d5b30SHugh Dickins */ 2306c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2307c01d5b30SHugh Dickins return addr; 2308c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2309c01d5b30SHugh Dickins } 23103089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2311c01d5b30SHugh Dickins return addr; 2312c01d5b30SHugh Dickins } 2313c01d5b30SHugh Dickins 2314c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2315c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2316c01d5b30SHugh Dickins return addr; 2317c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2318c01d5b30SHugh Dickins return addr; 2319c01d5b30SHugh Dickins 2320c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2321c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2322c01d5b30SHugh Dickins return addr; 2323c01d5b30SHugh Dickins if (inflated_len < len) 2324c01d5b30SHugh Dickins return addr; 2325c01d5b30SHugh Dickins 232699158997SKirill A. Shutemov inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2327c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2328c01d5b30SHugh Dickins return addr; 2329c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2330c01d5b30SHugh Dickins return addr; 2331c01d5b30SHugh Dickins 2332c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2333c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2334c01d5b30SHugh Dickins if (inflated_offset > offset) 2335c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2336c01d5b30SHugh Dickins 2337c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2338c01d5b30SHugh Dickins return addr; 2339c01d5b30SHugh Dickins return inflated_addr; 2340c01d5b30SHugh Dickins } 2341c01d5b30SHugh Dickins 23421da177e4SLinus Torvalds #ifdef CONFIG_NUMA 234341ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 23441da177e4SLinus Torvalds { 2345496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 234641ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 23471da177e4SLinus Torvalds } 23481da177e4SLinus Torvalds 2349d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2350d8dc74f2SAdrian Bunk unsigned long addr) 23511da177e4SLinus Torvalds { 2352496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 235341ffe5d5SHugh Dickins pgoff_t index; 23541da177e4SLinus Torvalds 235541ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 235641ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 23571da177e4SLinus Torvalds } 23581da177e4SLinus Torvalds #endif 23591da177e4SLinus Torvalds 2360d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 23611da177e4SLinus Torvalds { 2362496ad9aaSAl Viro struct inode *inode = file_inode(file); 23631da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 23641da177e4SLinus Torvalds int retval = -ENOMEM; 23651da177e4SLinus Torvalds 2366ea0dfeb4SHugh Dickins /* 2367ea0dfeb4SHugh Dickins * What serializes the accesses to info->flags? 2368ea0dfeb4SHugh Dickins * ipc_lock_object() when called from shmctl_do_lock(), 2369ea0dfeb4SHugh Dickins * no serialization needed when called from shm_destroy(). 2370ea0dfeb4SHugh Dickins */ 23711da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 2372d7c9e99aSAlexey Gladkov if (!user_shm_lock(inode->i_size, ucounts)) 23731da177e4SLinus Torvalds goto out_nomem; 23741da177e4SLinus Torvalds info->flags |= VM_LOCKED; 237589e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 23761da177e4SLinus Torvalds } 2377d7c9e99aSAlexey Gladkov if (!lock && (info->flags & VM_LOCKED) && ucounts) { 2378d7c9e99aSAlexey Gladkov user_shm_unlock(inode->i_size, ucounts); 23791da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 238089e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 23811da177e4SLinus Torvalds } 23821da177e4SLinus Torvalds retval = 0; 238389e004eaSLee Schermerhorn 23841da177e4SLinus Torvalds out_nomem: 23851da177e4SLinus Torvalds return retval; 23861da177e4SLinus Torvalds } 23871da177e4SLinus Torvalds 23889b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 23891da177e4SLinus Torvalds { 2390d09e8ca6SPasha Tatashin struct inode *inode = file_inode(file); 2391d09e8ca6SPasha Tatashin struct shmem_inode_info *info = SHMEM_I(inode); 239222247efdSPeter Xu int ret; 2393ab3948f5SJoel Fernandes (Google) 239422247efdSPeter Xu ret = seal_check_future_write(info->seals, vma); 239522247efdSPeter Xu if (ret) 239622247efdSPeter Xu return ret; 2397ab3948f5SJoel Fernandes (Google) 239851b0bff2SCatalin Marinas /* arm64 - allow memory tagging on RAM-based files */ 23991c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_MTE_ALLOWED); 240051b0bff2SCatalin Marinas 24011da177e4SLinus Torvalds file_accessed(file); 2402d09e8ca6SPasha Tatashin /* This is anonymous shared memory if it is unlinked at the time of mmap */ 2403d09e8ca6SPasha Tatashin if (inode->i_nlink) 24041da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2405d09e8ca6SPasha Tatashin else 2406d09e8ca6SPasha Tatashin vma->vm_ops = &shmem_anon_vm_ops; 24071da177e4SLinus Torvalds return 0; 24081da177e4SLinus Torvalds } 24091da177e4SLinus Torvalds 2410e88e0d36SHugh Dickins static int shmem_file_open(struct inode *inode, struct file *file) 2411e88e0d36SHugh Dickins { 2412e88e0d36SHugh Dickins file->f_mode |= FMODE_CAN_ODIRECT; 2413e88e0d36SHugh Dickins return generic_file_open(inode, file); 2414e88e0d36SHugh Dickins } 2415e88e0d36SHugh Dickins 2416cb241339SHugh Dickins #ifdef CONFIG_TMPFS_XATTR 2417cb241339SHugh Dickins static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 2418cb241339SHugh Dickins 2419cb241339SHugh Dickins /* 2420cb241339SHugh Dickins * chattr's fsflags are unrelated to extended attributes, 2421cb241339SHugh Dickins * but tmpfs has chosen to enable them under the same config option. 2422cb241339SHugh Dickins */ 2423cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2424e408e695STheodore Ts'o { 2425cb241339SHugh Dickins unsigned int i_flags = 0; 2426cb241339SHugh Dickins 2427cb241339SHugh Dickins if (fsflags & FS_NOATIME_FL) 2428cb241339SHugh Dickins i_flags |= S_NOATIME; 2429cb241339SHugh Dickins if (fsflags & FS_APPEND_FL) 2430cb241339SHugh Dickins i_flags |= S_APPEND; 2431cb241339SHugh Dickins if (fsflags & FS_IMMUTABLE_FL) 2432cb241339SHugh Dickins i_flags |= S_IMMUTABLE; 2433cb241339SHugh Dickins /* 2434cb241339SHugh Dickins * But FS_NODUMP_FL does not require any action in i_flags. 2435cb241339SHugh Dickins */ 2436cb241339SHugh Dickins inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE); 2437e408e695STheodore Ts'o } 2438cb241339SHugh Dickins #else 2439cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2440cb241339SHugh Dickins { 2441cb241339SHugh Dickins } 2442cb241339SHugh Dickins #define shmem_initxattrs NULL 2443cb241339SHugh Dickins #endif 2444e408e695STheodore Ts'o 2445a2e45955SChuck Lever static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode) 2446a2e45955SChuck Lever { 2447a2e45955SChuck Lever return &SHMEM_I(inode)->dir_offsets; 2448a2e45955SChuck Lever } 2449a2e45955SChuck Lever 2450e09764cfSCarlos Maiolino static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, 2451e09764cfSCarlos Maiolino struct super_block *sb, 2452e09764cfSCarlos Maiolino struct inode *dir, umode_t mode, 2453e09764cfSCarlos Maiolino dev_t dev, unsigned long flags) 24541da177e4SLinus Torvalds { 24551da177e4SLinus Torvalds struct inode *inode; 24561da177e4SLinus Torvalds struct shmem_inode_info *info; 24571da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2458e809d5f0SChris Down ino_t ino; 245971480663SCarlos Maiolino int err; 24601da177e4SLinus Torvalds 246171480663SCarlos Maiolino err = shmem_reserve_inode(sb, &ino); 246271480663SCarlos Maiolino if (err) 246371480663SCarlos Maiolino return ERR_PTR(err); 246471480663SCarlos Maiolino 24651da177e4SLinus Torvalds 24661da177e4SLinus Torvalds inode = new_inode(sb); 246771480663SCarlos Maiolino if (!inode) { 24682daf18a7SHugh Dickins shmem_free_inode(sb, 0); 246971480663SCarlos Maiolino return ERR_PTR(-ENOSPC); 247071480663SCarlos Maiolino } 247171480663SCarlos Maiolino 2472e809d5f0SChris Down inode->i_ino = ino; 24737a80e5b8SGiuseppe Scrivano inode_init_owner(idmap, inode, dir, mode); 24741da177e4SLinus Torvalds inode->i_blocks = 0; 247565287334SJeff Layton inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); 2476a251c17aSJason A. Donenfeld inode->i_generation = get_random_u32(); 24771da177e4SLinus Torvalds info = SHMEM_I(inode); 24781da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 24791da177e4SLinus Torvalds spin_lock_init(&info->lock); 2480af53d3e9SHugh Dickins atomic_set(&info->stop_eviction, 0); 248140e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 24820b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2483f7cd16a5SXavier Roche info->i_crtime = inode->i_mtime; 2484e408e695STheodore Ts'o info->fsflags = (dir == NULL) ? 0 : 2485e408e695STheodore Ts'o SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; 2486cb241339SHugh Dickins if (info->fsflags) 2487cb241339SHugh Dickins shmem_set_inode_flags(inode, info->fsflags); 2488779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 24891da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 249071480663SCarlos Maiolino INIT_LIST_HEAD(&info->swaplist); 24912c6efe9cSLuis Chamberlain if (sbinfo->noswap) 24922c6efe9cSLuis Chamberlain mapping_set_unevictable(inode->i_mapping); 249338f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 249472c04902SAl Viro cache_no_acl(inode); 2495ff36da69SMatthew Wilcox (Oracle) mapping_set_large_folios(inode->i_mapping); 24961da177e4SLinus Torvalds 24971da177e4SLinus Torvalds switch (mode & S_IFMT) { 24981da177e4SLinus Torvalds default: 249939f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 25001da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 25011da177e4SLinus Torvalds break; 25021da177e4SLinus Torvalds case S_IFREG: 250314fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 25041da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 25051da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 250671fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 250771fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 25081da177e4SLinus Torvalds break; 25091da177e4SLinus Torvalds case S_IFDIR: 2510d8c76e6fSDave Hansen inc_nlink(inode); 25111da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 25121da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 25131da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 2514a2e45955SChuck Lever inode->i_fop = &simple_offset_dir_operations; 2515a2e45955SChuck Lever simple_offset_init(shmem_get_offset_ctx(inode)); 25161da177e4SLinus Torvalds break; 25171da177e4SLinus Torvalds case S_IFLNK: 25181da177e4SLinus Torvalds /* 25191da177e4SLinus Torvalds * Must not load anything in the rbtree, 25201da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 25211da177e4SLinus Torvalds */ 252271fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 25231da177e4SLinus Torvalds break; 25241da177e4SLinus Torvalds } 2525b45d71fbSJoel Fernandes (Google) 2526b45d71fbSJoel Fernandes (Google) lockdep_annotate_inode_mutex_key(inode); 25271da177e4SLinus Torvalds return inode; 25281da177e4SLinus Torvalds } 25291da177e4SLinus Torvalds 2530e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 2531e09764cfSCarlos Maiolino static struct inode *shmem_get_inode(struct mnt_idmap *idmap, 2532e09764cfSCarlos Maiolino struct super_block *sb, struct inode *dir, 2533e09764cfSCarlos Maiolino umode_t mode, dev_t dev, unsigned long flags) 2534e09764cfSCarlos Maiolino { 2535e09764cfSCarlos Maiolino int err; 2536e09764cfSCarlos Maiolino struct inode *inode; 2537e09764cfSCarlos Maiolino 2538e09764cfSCarlos Maiolino inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags); 2539e09764cfSCarlos Maiolino if (IS_ERR(inode)) 2540e09764cfSCarlos Maiolino return inode; 2541e09764cfSCarlos Maiolino 2542e09764cfSCarlos Maiolino err = dquot_initialize(inode); 2543e09764cfSCarlos Maiolino if (err) 2544e09764cfSCarlos Maiolino goto errout; 2545e09764cfSCarlos Maiolino 2546e09764cfSCarlos Maiolino err = dquot_alloc_inode(inode); 2547e09764cfSCarlos Maiolino if (err) { 2548e09764cfSCarlos Maiolino dquot_drop(inode); 2549e09764cfSCarlos Maiolino goto errout; 2550e09764cfSCarlos Maiolino } 2551e09764cfSCarlos Maiolino return inode; 2552e09764cfSCarlos Maiolino 2553e09764cfSCarlos Maiolino errout: 2554e09764cfSCarlos Maiolino inode->i_flags |= S_NOQUOTA; 2555e09764cfSCarlos Maiolino iput(inode); 2556e09764cfSCarlos Maiolino return ERR_PTR(err); 2557e09764cfSCarlos Maiolino } 2558e09764cfSCarlos Maiolino #else 2559e09764cfSCarlos Maiolino static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, 2560e09764cfSCarlos Maiolino struct super_block *sb, struct inode *dir, 2561e09764cfSCarlos Maiolino umode_t mode, dev_t dev, unsigned long flags) 2562e09764cfSCarlos Maiolino { 2563e09764cfSCarlos Maiolino return __shmem_get_inode(idmap, sb, dir, mode, dev, flags); 2564e09764cfSCarlos Maiolino } 2565e09764cfSCarlos Maiolino #endif /* CONFIG_TMPFS_QUOTA */ 2566e09764cfSCarlos Maiolino 25673460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD 256861c50040SAxel Rasmussen int shmem_mfill_atomic_pte(pmd_t *dst_pmd, 25694c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 25704c27fe4cSMike Rapoport unsigned long dst_addr, 25714c27fe4cSMike Rapoport unsigned long src_addr, 2572d9712937SAxel Rasmussen uffd_flags_t flags, 2573d7be6d7eSZhangPeng struct folio **foliop) 25744c27fe4cSMike Rapoport { 25754c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 25764c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 25774c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 25784c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 25794c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 25804c27fe4cSMike Rapoport void *page_kaddr; 2581b7dd44a1SMatthew Wilcox (Oracle) struct folio *folio; 25824c27fe4cSMike Rapoport int ret; 25833460f6e5SAxel Rasmussen pgoff_t max_off; 25844c27fe4cSMike Rapoport 2585c7e263abSLukas Czerner if (shmem_inode_acct_block(inode, 1)) { 25867ed9d238SAxel Rasmussen /* 25877ed9d238SAxel Rasmussen * We may have got a page, returned -ENOENT triggering a retry, 25887ed9d238SAxel Rasmussen * and now we find ourselves with -ENOMEM. Release the page, to 25897ed9d238SAxel Rasmussen * avoid a BUG_ON in our caller. 25907ed9d238SAxel Rasmussen */ 2591d7be6d7eSZhangPeng if (unlikely(*foliop)) { 2592d7be6d7eSZhangPeng folio_put(*foliop); 2593d7be6d7eSZhangPeng *foliop = NULL; 25947ed9d238SAxel Rasmussen } 25957d64ae3aSAxel Rasmussen return -ENOMEM; 25967ed9d238SAxel Rasmussen } 25974c27fe4cSMike Rapoport 2598d7be6d7eSZhangPeng if (!*foliop) { 25997d64ae3aSAxel Rasmussen ret = -ENOMEM; 26007a7256d5SMatthew Wilcox (Oracle) folio = shmem_alloc_folio(gfp, info, pgoff); 26017a7256d5SMatthew Wilcox (Oracle) if (!folio) 26020f079694SMike Rapoport goto out_unacct_blocks; 26034c27fe4cSMike Rapoport 2604d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) { 26057a7256d5SMatthew Wilcox (Oracle) page_kaddr = kmap_local_folio(folio, 0); 26065dc21f0cSIra Weiny /* 26075dc21f0cSIra Weiny * The read mmap_lock is held here. Despite the 26085dc21f0cSIra Weiny * mmap_lock being read recursive a deadlock is still 26095dc21f0cSIra Weiny * possible if a writer has taken a lock. For example: 26105dc21f0cSIra Weiny * 26115dc21f0cSIra Weiny * process A thread 1 takes read lock on own mmap_lock 26125dc21f0cSIra Weiny * process A thread 2 calls mmap, blocks taking write lock 26135dc21f0cSIra Weiny * process B thread 1 takes page fault, read lock on own mmap lock 26145dc21f0cSIra Weiny * process B thread 2 calls mmap, blocks taking write lock 26155dc21f0cSIra Weiny * process A thread 1 blocks taking read lock on process B 26165dc21f0cSIra Weiny * process B thread 1 blocks taking read lock on process A 26175dc21f0cSIra Weiny * 26185dc21f0cSIra Weiny * Disable page faults to prevent potential deadlock 26195dc21f0cSIra Weiny * and retry the copy outside the mmap_lock. 26205dc21f0cSIra Weiny */ 26215dc21f0cSIra Weiny pagefault_disable(); 26228d103963SMike Rapoport ret = copy_from_user(page_kaddr, 26238d103963SMike Rapoport (const void __user *)src_addr, 26244c27fe4cSMike Rapoport PAGE_SIZE); 26255dc21f0cSIra Weiny pagefault_enable(); 26267a7256d5SMatthew Wilcox (Oracle) kunmap_local(page_kaddr); 26274c27fe4cSMike Rapoport 2628c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 26294c27fe4cSMike Rapoport if (unlikely(ret)) { 2630d7be6d7eSZhangPeng *foliop = folio; 26317d64ae3aSAxel Rasmussen ret = -ENOENT; 26324c27fe4cSMike Rapoport /* don't free the page */ 26337d64ae3aSAxel Rasmussen goto out_unacct_blocks; 26344c27fe4cSMike Rapoport } 263519b482c2SMuchun Song 26367a7256d5SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 26373460f6e5SAxel Rasmussen } else { /* ZEROPAGE */ 26387a7256d5SMatthew Wilcox (Oracle) clear_user_highpage(&folio->page, dst_addr); 26398d103963SMike Rapoport } 26404c27fe4cSMike Rapoport } else { 2641d7be6d7eSZhangPeng folio = *foliop; 26427a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 2643d7be6d7eSZhangPeng *foliop = NULL; 26444c27fe4cSMike Rapoport } 26454c27fe4cSMike Rapoport 26467a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON(folio_test_locked(folio)); 26477a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON(folio_test_swapbacked(folio)); 26487a7256d5SMatthew Wilcox (Oracle) __folio_set_locked(folio); 26497a7256d5SMatthew Wilcox (Oracle) __folio_set_swapbacked(folio); 26507a7256d5SMatthew Wilcox (Oracle) __folio_mark_uptodate(folio); 26519cc90c66SAndrea Arcangeli 2652e2a50c1fSAndrea Arcangeli ret = -EFAULT; 2653e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 26543460f6e5SAxel Rasmussen if (unlikely(pgoff >= max_off)) 2655e2a50c1fSAndrea Arcangeli goto out_release; 2656e2a50c1fSAndrea Arcangeli 2657b7dd44a1SMatthew Wilcox (Oracle) ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, 265861c50040SAxel Rasmussen gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm); 26594c27fe4cSMike Rapoport if (ret) 26604c27fe4cSMike Rapoport goto out_release; 26614c27fe4cSMike Rapoport 266261c50040SAxel Rasmussen ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 2663d9712937SAxel Rasmussen &folio->page, true, flags); 26647d64ae3aSAxel Rasmussen if (ret) 26657d64ae3aSAxel Rasmussen goto out_delete_from_cache; 26664c27fe4cSMike Rapoport 26673c1b7528SHugh Dickins shmem_recalc_inode(inode, 1, 0); 26687a7256d5SMatthew Wilcox (Oracle) folio_unlock(folio); 26697d64ae3aSAxel Rasmussen return 0; 26707d64ae3aSAxel Rasmussen out_delete_from_cache: 26717a7256d5SMatthew Wilcox (Oracle) filemap_remove_folio(folio); 26724c27fe4cSMike Rapoport out_release: 26737a7256d5SMatthew Wilcox (Oracle) folio_unlock(folio); 26747a7256d5SMatthew Wilcox (Oracle) folio_put(folio); 26754c27fe4cSMike Rapoport out_unacct_blocks: 26760f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 26777d64ae3aSAxel Rasmussen return ret; 26784c27fe4cSMike Rapoport } 26793460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */ 26808d103963SMike Rapoport 26811da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 268292e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 268369f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 26841da177e4SLinus Torvalds 26851da177e4SLinus Torvalds static int 2686800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 26879d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len, 2688800d15a5SNick Piggin struct page **pagep, void **fsdata) 26891da177e4SLinus Torvalds { 2690800d15a5SNick Piggin struct inode *inode = mapping->host; 269140e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 269209cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 2693eff1f906SMatthew Wilcox (Oracle) struct folio *folio; 2694a7605426SYang Shi int ret = 0; 269540e041a2SDavid Herrmann 26969608703eSJan Kara /* i_rwsem is held by caller */ 2697ab3948f5SJoel Fernandes (Google) if (unlikely(info->seals & (F_SEAL_GROW | 2698ab3948f5SJoel Fernandes (Google) F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 2699ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 270040e041a2SDavid Herrmann return -EPERM; 270140e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 270240e041a2SDavid Herrmann return -EPERM; 270340e041a2SDavid Herrmann } 270440e041a2SDavid Herrmann 2705eff1f906SMatthew Wilcox (Oracle) ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); 2706a7605426SYang Shi 2707a7605426SYang Shi if (ret) 2708a7605426SYang Shi return ret; 2709a7605426SYang Shi 2710eff1f906SMatthew Wilcox (Oracle) *pagep = folio_file_page(folio, index); 2711a7605426SYang Shi if (PageHWPoison(*pagep)) { 2712eff1f906SMatthew Wilcox (Oracle) folio_unlock(folio); 2713eff1f906SMatthew Wilcox (Oracle) folio_put(folio); 2714a7605426SYang Shi *pagep = NULL; 2715a7605426SYang Shi return -EIO; 2716a7605426SYang Shi } 2717a7605426SYang Shi 2718a7605426SYang Shi return 0; 2719800d15a5SNick Piggin } 2720800d15a5SNick Piggin 2721800d15a5SNick Piggin static int 2722800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2723800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2724800d15a5SNick Piggin struct page *page, void *fsdata) 2725800d15a5SNick Piggin { 272669bbb87bSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2727800d15a5SNick Piggin struct inode *inode = mapping->host; 2728800d15a5SNick Piggin 2729800d15a5SNick Piggin if (pos + copied > inode->i_size) 2730800d15a5SNick Piggin i_size_write(inode, pos + copied); 2731800d15a5SNick Piggin 273269bbb87bSMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 273369bbb87bSMatthew Wilcox (Oracle) if (copied < folio_size(folio)) { 273469bbb87bSMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos); 273569bbb87bSMatthew Wilcox (Oracle) folio_zero_segments(folio, 0, from, 273669bbb87bSMatthew Wilcox (Oracle) from + copied, folio_size(folio)); 2737800d8c63SKirill A. Shutemov } 273869bbb87bSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 2739800d8c63SKirill A. Shutemov } 274069bbb87bSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 274169bbb87bSMatthew Wilcox (Oracle) folio_unlock(folio); 274269bbb87bSMatthew Wilcox (Oracle) folio_put(folio); 2743d3602444SHugh Dickins 2744800d15a5SNick Piggin return copied; 27451da177e4SLinus Torvalds } 27461da177e4SLinus Torvalds 27472ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 27481da177e4SLinus Torvalds { 27496e58e79dSAl Viro struct file *file = iocb->ki_filp; 27506e58e79dSAl Viro struct inode *inode = file_inode(file); 27511da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 275241ffe5d5SHugh Dickins pgoff_t index; 275341ffe5d5SHugh Dickins unsigned long offset; 2754f7c1d074SGeert Uytterhoeven int error = 0; 2755cb66a7a1SAl Viro ssize_t retval = 0; 27566e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2757a0ee5ec5SHugh Dickins 275809cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 275909cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 27601da177e4SLinus Torvalds 27611da177e4SLinus Torvalds for (;;) { 27624601e2fcSMatthew Wilcox (Oracle) struct folio *folio = NULL; 27631da177e4SLinus Torvalds struct page *page = NULL; 276441ffe5d5SHugh Dickins pgoff_t end_index; 276541ffe5d5SHugh Dickins unsigned long nr, ret; 27661da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 27671da177e4SLinus Torvalds 276809cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 27691da177e4SLinus Torvalds if (index > end_index) 27701da177e4SLinus Torvalds break; 27711da177e4SLinus Torvalds if (index == end_index) { 277209cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 27731da177e4SLinus Torvalds if (nr <= offset) 27741da177e4SLinus Torvalds break; 27751da177e4SLinus Torvalds } 27761da177e4SLinus Torvalds 27774601e2fcSMatthew Wilcox (Oracle) error = shmem_get_folio(inode, index, &folio, SGP_READ); 27786e58e79dSAl Viro if (error) { 27796e58e79dSAl Viro if (error == -EINVAL) 27806e58e79dSAl Viro error = 0; 27811da177e4SLinus Torvalds break; 27821da177e4SLinus Torvalds } 27834601e2fcSMatthew Wilcox (Oracle) if (folio) { 27844601e2fcSMatthew Wilcox (Oracle) folio_unlock(folio); 2785a7605426SYang Shi 27864601e2fcSMatthew Wilcox (Oracle) page = folio_file_page(folio, index); 2787a7605426SYang Shi if (PageHWPoison(page)) { 27884601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 2789a7605426SYang Shi error = -EIO; 2790a7605426SYang Shi break; 2791a7605426SYang Shi } 279275edd345SHugh Dickins } 27931da177e4SLinus Torvalds 27941da177e4SLinus Torvalds /* 27951da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 27969608703eSJan Kara * are called without i_rwsem protection against truncate 27971da177e4SLinus Torvalds */ 279809cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 27991da177e4SLinus Torvalds i_size = i_size_read(inode); 280009cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 28011da177e4SLinus Torvalds if (index == end_index) { 280209cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 28031da177e4SLinus Torvalds if (nr <= offset) { 28044601e2fcSMatthew Wilcox (Oracle) if (folio) 28054601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 28061da177e4SLinus Torvalds break; 28071da177e4SLinus Torvalds } 28081da177e4SLinus Torvalds } 28091da177e4SLinus Torvalds nr -= offset; 28101da177e4SLinus Torvalds 28114601e2fcSMatthew Wilcox (Oracle) if (folio) { 28121da177e4SLinus Torvalds /* 28131da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 28141da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 28151da177e4SLinus Torvalds * before reading the page on the kernel side. 28161da177e4SLinus Torvalds */ 28171da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 28181da177e4SLinus Torvalds flush_dcache_page(page); 28191da177e4SLinus Torvalds /* 28201da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 28211da177e4SLinus Torvalds */ 28221da177e4SLinus Torvalds if (!offset) 28234601e2fcSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 28241da177e4SLinus Torvalds /* 28251da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 28261da177e4SLinus Torvalds * now we can copy it to user space... 28271da177e4SLinus Torvalds */ 28282ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 28294601e2fcSMatthew Wilcox (Oracle) folio_put(folio); 28301bdec44bSHugh Dickins 2831fcb14cb1SAl Viro } else if (user_backed_iter(to)) { 28321bdec44bSHugh Dickins /* 28331bdec44bSHugh Dickins * Copy to user tends to be so well optimized, but 28341bdec44bSHugh Dickins * clear_user() not so much, that it is noticeably 28351bdec44bSHugh Dickins * faster to copy the zero page instead of clearing. 28361bdec44bSHugh Dickins */ 28371bdec44bSHugh Dickins ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to); 28381bdec44bSHugh Dickins } else { 28391bdec44bSHugh Dickins /* 28401bdec44bSHugh Dickins * But submitting the same page twice in a row to 28411bdec44bSHugh Dickins * splice() - or others? - can result in confusion: 28421bdec44bSHugh Dickins * so don't attempt that optimization on pipes etc. 28431bdec44bSHugh Dickins */ 28441bdec44bSHugh Dickins ret = iov_iter_zero(nr, to); 28451bdec44bSHugh Dickins } 28461bdec44bSHugh Dickins 28476e58e79dSAl Viro retval += ret; 28481da177e4SLinus Torvalds offset += ret; 284909cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 285009cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 28511da177e4SLinus Torvalds 28522ba5bbedSAl Viro if (!iov_iter_count(to)) 28531da177e4SLinus Torvalds break; 28546e58e79dSAl Viro if (ret < nr) { 28556e58e79dSAl Viro error = -EFAULT; 28566e58e79dSAl Viro break; 28576e58e79dSAl Viro } 28581da177e4SLinus Torvalds cond_resched(); 28591da177e4SLinus Torvalds } 28601da177e4SLinus Torvalds 286109cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 28626e58e79dSAl Viro file_accessed(file); 28636e58e79dSAl Viro return retval ? retval : error; 28641da177e4SLinus Torvalds } 28651da177e4SLinus Torvalds 2866e88e0d36SHugh Dickins static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2867e88e0d36SHugh Dickins { 2868e88e0d36SHugh Dickins struct file *file = iocb->ki_filp; 2869e88e0d36SHugh Dickins struct inode *inode = file->f_mapping->host; 2870e88e0d36SHugh Dickins ssize_t ret; 2871e88e0d36SHugh Dickins 2872e88e0d36SHugh Dickins inode_lock(inode); 2873e88e0d36SHugh Dickins ret = generic_write_checks(iocb, from); 2874e88e0d36SHugh Dickins if (ret <= 0) 2875e88e0d36SHugh Dickins goto unlock; 2876e88e0d36SHugh Dickins ret = file_remove_privs(file); 2877e88e0d36SHugh Dickins if (ret) 2878e88e0d36SHugh Dickins goto unlock; 2879e88e0d36SHugh Dickins ret = file_update_time(file); 2880e88e0d36SHugh Dickins if (ret) 2881e88e0d36SHugh Dickins goto unlock; 2882e88e0d36SHugh Dickins ret = generic_perform_write(iocb, from); 2883e88e0d36SHugh Dickins unlock: 2884e88e0d36SHugh Dickins inode_unlock(inode); 2885e88e0d36SHugh Dickins return ret; 2886e88e0d36SHugh Dickins } 2887e88e0d36SHugh Dickins 2888bd194b18SDavid Howells static bool zero_pipe_buf_get(struct pipe_inode_info *pipe, 2889bd194b18SDavid Howells struct pipe_buffer *buf) 2890bd194b18SDavid Howells { 2891bd194b18SDavid Howells return true; 2892bd194b18SDavid Howells } 2893bd194b18SDavid Howells 2894bd194b18SDavid Howells static void zero_pipe_buf_release(struct pipe_inode_info *pipe, 2895bd194b18SDavid Howells struct pipe_buffer *buf) 2896bd194b18SDavid Howells { 2897bd194b18SDavid Howells } 2898bd194b18SDavid Howells 2899bd194b18SDavid Howells static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe, 2900bd194b18SDavid Howells struct pipe_buffer *buf) 2901bd194b18SDavid Howells { 2902bd194b18SDavid Howells return false; 2903bd194b18SDavid Howells } 2904bd194b18SDavid Howells 2905bd194b18SDavid Howells static const struct pipe_buf_operations zero_pipe_buf_ops = { 2906bd194b18SDavid Howells .release = zero_pipe_buf_release, 2907bd194b18SDavid Howells .try_steal = zero_pipe_buf_try_steal, 2908bd194b18SDavid Howells .get = zero_pipe_buf_get, 2909bd194b18SDavid Howells }; 2910bd194b18SDavid Howells 2911bd194b18SDavid Howells static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe, 2912bd194b18SDavid Howells loff_t fpos, size_t size) 2913bd194b18SDavid Howells { 2914bd194b18SDavid Howells size_t offset = fpos & ~PAGE_MASK; 2915bd194b18SDavid Howells 2916bd194b18SDavid Howells size = min_t(size_t, size, PAGE_SIZE - offset); 2917bd194b18SDavid Howells 2918bd194b18SDavid Howells if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { 2919bd194b18SDavid Howells struct pipe_buffer *buf = pipe_head_buf(pipe); 2920bd194b18SDavid Howells 2921bd194b18SDavid Howells *buf = (struct pipe_buffer) { 2922bd194b18SDavid Howells .ops = &zero_pipe_buf_ops, 2923bd194b18SDavid Howells .page = ZERO_PAGE(0), 2924bd194b18SDavid Howells .offset = offset, 2925bd194b18SDavid Howells .len = size, 2926bd194b18SDavid Howells }; 2927bd194b18SDavid Howells pipe->head++; 2928bd194b18SDavid Howells } 2929bd194b18SDavid Howells 2930bd194b18SDavid Howells return size; 2931bd194b18SDavid Howells } 2932bd194b18SDavid Howells 2933bd194b18SDavid Howells static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 2934bd194b18SDavid Howells struct pipe_inode_info *pipe, 2935bd194b18SDavid Howells size_t len, unsigned int flags) 2936bd194b18SDavid Howells { 2937bd194b18SDavid Howells struct inode *inode = file_inode(in); 2938bd194b18SDavid Howells struct address_space *mapping = inode->i_mapping; 2939bd194b18SDavid Howells struct folio *folio = NULL; 2940bd194b18SDavid Howells size_t total_spliced = 0, used, npages, n, part; 2941bd194b18SDavid Howells loff_t isize; 2942bd194b18SDavid Howells int error = 0; 2943bd194b18SDavid Howells 2944bd194b18SDavid Howells /* Work out how much data we can actually add into the pipe */ 2945bd194b18SDavid Howells used = pipe_occupancy(pipe->head, pipe->tail); 2946bd194b18SDavid Howells npages = max_t(ssize_t, pipe->max_usage - used, 0); 2947bd194b18SDavid Howells len = min_t(size_t, len, npages * PAGE_SIZE); 2948bd194b18SDavid Howells 2949bd194b18SDavid Howells do { 2950bd194b18SDavid Howells if (*ppos >= i_size_read(inode)) 2951bd194b18SDavid Howells break; 2952bd194b18SDavid Howells 2953fa598952SHugh Dickins error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, 2954fa598952SHugh Dickins SGP_READ); 2955bd194b18SDavid Howells if (error) { 2956bd194b18SDavid Howells if (error == -EINVAL) 2957bd194b18SDavid Howells error = 0; 2958bd194b18SDavid Howells break; 2959bd194b18SDavid Howells } 2960bd194b18SDavid Howells if (folio) { 2961bd194b18SDavid Howells folio_unlock(folio); 2962bd194b18SDavid Howells 2963fa598952SHugh Dickins if (folio_test_hwpoison(folio) || 2964fa598952SHugh Dickins (folio_test_large(folio) && 2965fa598952SHugh Dickins folio_test_has_hwpoisoned(folio))) { 2966bd194b18SDavid Howells error = -EIO; 2967bd194b18SDavid Howells break; 2968bd194b18SDavid Howells } 2969bd194b18SDavid Howells } 2970bd194b18SDavid Howells 2971bd194b18SDavid Howells /* 2972bd194b18SDavid Howells * i_size must be checked after we know the pages are Uptodate. 2973bd194b18SDavid Howells * 2974bd194b18SDavid Howells * Checking i_size after the check allows us to calculate 2975bd194b18SDavid Howells * the correct value for "nr", which means the zero-filled 2976bd194b18SDavid Howells * part of the page is not copied back to userspace (unless 2977bd194b18SDavid Howells * another truncate extends the file - this is desired though). 2978bd194b18SDavid Howells */ 2979bd194b18SDavid Howells isize = i_size_read(inode); 2980bd194b18SDavid Howells if (unlikely(*ppos >= isize)) 2981bd194b18SDavid Howells break; 2982bd194b18SDavid Howells part = min_t(loff_t, isize - *ppos, len); 2983bd194b18SDavid Howells 2984bd194b18SDavid Howells if (folio) { 2985bd194b18SDavid Howells /* 2986bd194b18SDavid Howells * If users can be writing to this page using arbitrary 2987bd194b18SDavid Howells * virtual addresses, take care about potential aliasing 2988bd194b18SDavid Howells * before reading the page on the kernel side. 2989bd194b18SDavid Howells */ 2990bd194b18SDavid Howells if (mapping_writably_mapped(mapping)) 2991bd194b18SDavid Howells flush_dcache_folio(folio); 2992bd194b18SDavid Howells folio_mark_accessed(folio); 2993bd194b18SDavid Howells /* 2994bd194b18SDavid Howells * Ok, we have the page, and it's up-to-date, so we can 2995bd194b18SDavid Howells * now splice it into the pipe. 2996bd194b18SDavid Howells */ 2997bd194b18SDavid Howells n = splice_folio_into_pipe(pipe, folio, *ppos, part); 2998bd194b18SDavid Howells folio_put(folio); 2999bd194b18SDavid Howells folio = NULL; 3000bd194b18SDavid Howells } else { 3001fa598952SHugh Dickins n = splice_zeropage_into_pipe(pipe, *ppos, part); 3002bd194b18SDavid Howells } 3003bd194b18SDavid Howells 3004bd194b18SDavid Howells if (!n) 3005bd194b18SDavid Howells break; 3006bd194b18SDavid Howells len -= n; 3007bd194b18SDavid Howells total_spliced += n; 3008bd194b18SDavid Howells *ppos += n; 3009bd194b18SDavid Howells in->f_ra.prev_pos = *ppos; 3010bd194b18SDavid Howells if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 3011bd194b18SDavid Howells break; 3012bd194b18SDavid Howells 3013bd194b18SDavid Howells cond_resched(); 3014bd194b18SDavid Howells } while (len); 3015bd194b18SDavid Howells 3016bd194b18SDavid Howells if (folio) 3017bd194b18SDavid Howells folio_put(folio); 3018bd194b18SDavid Howells 3019bd194b18SDavid Howells file_accessed(in); 3020bd194b18SDavid Howells return total_spliced ? total_spliced : error; 3021bd194b18SDavid Howells } 3022bd194b18SDavid Howells 3023965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 3024220f2ac9SHugh Dickins { 3025220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 3026220f2ac9SHugh Dickins struct inode *inode = mapping->host; 3027220f2ac9SHugh Dickins 3028965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 3029965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 3030220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 303141139aa4SMatthew Wilcox (Oracle) if (offset < 0) 303241139aa4SMatthew Wilcox (Oracle) return -ENXIO; 303341139aa4SMatthew Wilcox (Oracle) 30345955102cSAl Viro inode_lock(inode); 30359608703eSJan Kara /* We're holding i_rwsem so we can access i_size directly */ 303641139aa4SMatthew Wilcox (Oracle) offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); 3037387aae6fSHugh Dickins if (offset >= 0) 303846a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 30395955102cSAl Viro inode_unlock(inode); 3040220f2ac9SHugh Dickins return offset; 3041220f2ac9SHugh Dickins } 3042220f2ac9SHugh Dickins 304383e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 304483e4fa9cSHugh Dickins loff_t len) 304583e4fa9cSHugh Dickins { 3046496ad9aaSAl Viro struct inode *inode = file_inode(file); 3047e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 304840e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 30491aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 3050d144bf62SHugh Dickins pgoff_t start, index, end, undo_fallocend; 3051e2d12e22SHugh Dickins int error; 305283e4fa9cSHugh Dickins 305313ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 305413ace4d0SHugh Dickins return -EOPNOTSUPP; 305513ace4d0SHugh Dickins 30565955102cSAl Viro inode_lock(inode); 305783e4fa9cSHugh Dickins 305883e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 305983e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 306083e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 306183e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 30628e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 306383e4fa9cSHugh Dickins 30649608703eSJan Kara /* protected by i_rwsem */ 3065ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 306640e041a2SDavid Herrmann error = -EPERM; 306740e041a2SDavid Herrmann goto out; 306840e041a2SDavid Herrmann } 306940e041a2SDavid Herrmann 30708e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 3071aa71ecd8SChen Jun shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 3072f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 3073f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 3074f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 3075f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 3076f00cdc6dSHugh Dickins 307783e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 307883e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 307983e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 308083e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 308183e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 30828e205f77SHugh Dickins 30838e205f77SHugh Dickins spin_lock(&inode->i_lock); 30848e205f77SHugh Dickins inode->i_private = NULL; 30858e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 30862055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 30878e205f77SHugh Dickins spin_unlock(&inode->i_lock); 308883e4fa9cSHugh Dickins error = 0; 30898e205f77SHugh Dickins goto out; 309083e4fa9cSHugh Dickins } 309183e4fa9cSHugh Dickins 3092e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 3093e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 3094e2d12e22SHugh Dickins if (error) 3095e2d12e22SHugh Dickins goto out; 3096e2d12e22SHugh Dickins 309740e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 309840e041a2SDavid Herrmann error = -EPERM; 309940e041a2SDavid Herrmann goto out; 310040e041a2SDavid Herrmann } 310140e041a2SDavid Herrmann 310209cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 310309cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 3104e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 3105e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 3106e2d12e22SHugh Dickins error = -ENOSPC; 3107e2d12e22SHugh Dickins goto out; 3108e2d12e22SHugh Dickins } 3109e2d12e22SHugh Dickins 31108e205f77SHugh Dickins shmem_falloc.waitq = NULL; 31111aac1400SHugh Dickins shmem_falloc.start = start; 31121aac1400SHugh Dickins shmem_falloc.next = start; 31131aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 31141aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 31151aac1400SHugh Dickins spin_lock(&inode->i_lock); 31161aac1400SHugh Dickins inode->i_private = &shmem_falloc; 31171aac1400SHugh Dickins spin_unlock(&inode->i_lock); 31181aac1400SHugh Dickins 3119d144bf62SHugh Dickins /* 3120d144bf62SHugh Dickins * info->fallocend is only relevant when huge pages might be 3121d144bf62SHugh Dickins * involved: to prevent split_huge_page() freeing fallocated 3122d144bf62SHugh Dickins * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. 3123d144bf62SHugh Dickins */ 3124d144bf62SHugh Dickins undo_fallocend = info->fallocend; 3125d144bf62SHugh Dickins if (info->fallocend < end) 3126d144bf62SHugh Dickins info->fallocend = end; 3127d144bf62SHugh Dickins 3128050dcb5cSHugh Dickins for (index = start; index < end; ) { 3129b0802b22SMatthew Wilcox (Oracle) struct folio *folio; 3130e2d12e22SHugh Dickins 3131e2d12e22SHugh Dickins /* 3132e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 3133e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 3134e2d12e22SHugh Dickins */ 3135e2d12e22SHugh Dickins if (signal_pending(current)) 3136e2d12e22SHugh Dickins error = -EINTR; 31371aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 31381aac1400SHugh Dickins error = -ENOMEM; 3139e2d12e22SHugh Dickins else 3140b0802b22SMatthew Wilcox (Oracle) error = shmem_get_folio(inode, index, &folio, 3141b0802b22SMatthew Wilcox (Oracle) SGP_FALLOC); 3142e2d12e22SHugh Dickins if (error) { 3143d144bf62SHugh Dickins info->fallocend = undo_fallocend; 3144b0802b22SMatthew Wilcox (Oracle) /* Remove the !uptodate folios we added */ 31457f556567SHugh Dickins if (index > start) { 31461635f6a7SHugh Dickins shmem_undo_range(inode, 314709cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 3148b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 31497f556567SHugh Dickins } 31501aac1400SHugh Dickins goto undone; 3151e2d12e22SHugh Dickins } 3152e2d12e22SHugh Dickins 3153050dcb5cSHugh Dickins /* 3154050dcb5cSHugh Dickins * Here is a more important optimization than it appears: 3155b0802b22SMatthew Wilcox (Oracle) * a second SGP_FALLOC on the same large folio will clear it, 3156b0802b22SMatthew Wilcox (Oracle) * making it uptodate and un-undoable if we fail later. 3157050dcb5cSHugh Dickins */ 3158b0802b22SMatthew Wilcox (Oracle) index = folio_next_index(folio); 3159050dcb5cSHugh Dickins /* Beware 32-bit wraparound */ 3160050dcb5cSHugh Dickins if (!index) 3161050dcb5cSHugh Dickins index--; 3162050dcb5cSHugh Dickins 3163e2d12e22SHugh Dickins /* 31641aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 31651aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 31661aac1400SHugh Dickins */ 3167b0802b22SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) 3168050dcb5cSHugh Dickins shmem_falloc.nr_falloced += index - shmem_falloc.next; 3169050dcb5cSHugh Dickins shmem_falloc.next = index; 31701aac1400SHugh Dickins 31711aac1400SHugh Dickins /* 3172b0802b22SMatthew Wilcox (Oracle) * If !uptodate, leave it that way so that freeable folios 31731635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 3174b0802b22SMatthew Wilcox (Oracle) * But mark it dirty so that memory pressure will swap rather 3175b0802b22SMatthew Wilcox (Oracle) * than free the folios we are allocating (and SGP_CACHE folios 3176e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 3177e2d12e22SHugh Dickins */ 3178b0802b22SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 3179b0802b22SMatthew Wilcox (Oracle) folio_unlock(folio); 3180b0802b22SMatthew Wilcox (Oracle) folio_put(folio); 3181e2d12e22SHugh Dickins cond_resched(); 3182e2d12e22SHugh Dickins } 3183e2d12e22SHugh Dickins 3184e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 3185e2d12e22SHugh Dickins i_size_write(inode, offset + len); 31861aac1400SHugh Dickins undone: 31871aac1400SHugh Dickins spin_lock(&inode->i_lock); 31881aac1400SHugh Dickins inode->i_private = NULL; 31891aac1400SHugh Dickins spin_unlock(&inode->i_lock); 3190e2d12e22SHugh Dickins out: 319115f242bbSHugh Dickins if (!error) 319215f242bbSHugh Dickins file_modified(file); 31935955102cSAl Viro inode_unlock(inode); 319483e4fa9cSHugh Dickins return error; 319583e4fa9cSHugh Dickins } 319683e4fa9cSHugh Dickins 3197726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 31981da177e4SLinus Torvalds { 3199726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 32001da177e4SLinus Torvalds 32011da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 320209cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 32031da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 32040edd73b3SHugh Dickins if (sbinfo->max_blocks) { 32051da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 320641ffe5d5SHugh Dickins buf->f_bavail = 320741ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 320841ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 32090edd73b3SHugh Dickins } 32100edd73b3SHugh Dickins if (sbinfo->max_inodes) { 32111da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 3212e07c469eSHugh Dickins buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE; 32131da177e4SLinus Torvalds } 32141da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 321559cda49eSAmir Goldstein 321659cda49eSAmir Goldstein buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); 321759cda49eSAmir Goldstein 32181da177e4SLinus Torvalds return 0; 32191da177e4SLinus Torvalds } 32201da177e4SLinus Torvalds 32211da177e4SLinus Torvalds /* 32221da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 32231da177e4SLinus Torvalds */ 32241da177e4SLinus Torvalds static int 32255ebb29beSChristian Brauner shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, 3226549c7297SChristian Brauner struct dentry *dentry, umode_t mode, dev_t dev) 32271da177e4SLinus Torvalds { 32280b0a0806SHugh Dickins struct inode *inode; 322971480663SCarlos Maiolino int error; 32301da177e4SLinus Torvalds 32317a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); 323271480663SCarlos Maiolino if (IS_ERR(inode)) 323371480663SCarlos Maiolino return PTR_ERR(inode); 323471480663SCarlos Maiolino 3235feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 3236feda821eSChristoph Hellwig if (error) 3237feda821eSChristoph Hellwig goto out_iput; 32382a7dba39SEric Paris error = security_inode_init_security(inode, dir, 32399d8f13baSMimi Zohar &dentry->d_name, 32406d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3241feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 3242feda821eSChristoph Hellwig goto out_iput; 324337ec43cdSMimi Zohar 3244a2e45955SChuck Lever error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3245a2e45955SChuck Lever if (error) 3246a2e45955SChuck Lever goto out_iput; 3247a2e45955SChuck Lever 32481da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 324965287334SJeff Layton dir->i_mtime = inode_set_ctime_current(dir); 325036f05cabSJeff Layton inode_inc_iversion(dir); 32511da177e4SLinus Torvalds d_instantiate(dentry, inode); 32521da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 32531da177e4SLinus Torvalds return error; 325471480663SCarlos Maiolino 3255feda821eSChristoph Hellwig out_iput: 3256feda821eSChristoph Hellwig iput(inode); 3257feda821eSChristoph Hellwig return error; 32581da177e4SLinus Torvalds } 32591da177e4SLinus Torvalds 326060545d0dSAl Viro static int 3261011e2b71SChristian Brauner shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 3262863f144fSMiklos Szeredi struct file *file, umode_t mode) 326360545d0dSAl Viro { 326460545d0dSAl Viro struct inode *inode; 326571480663SCarlos Maiolino int error; 326660545d0dSAl Viro 32677a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); 326871480663SCarlos Maiolino 326971480663SCarlos Maiolino if (IS_ERR(inode)) { 327071480663SCarlos Maiolino error = PTR_ERR(inode); 327171480663SCarlos Maiolino goto err_out; 327271480663SCarlos Maiolino } 327371480663SCarlos Maiolino 327460545d0dSAl Viro error = security_inode_init_security(inode, dir, 327560545d0dSAl Viro NULL, 327660545d0dSAl Viro shmem_initxattrs, NULL); 3277feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 3278feda821eSChristoph Hellwig goto out_iput; 3279feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 3280feda821eSChristoph Hellwig if (error) 3281feda821eSChristoph Hellwig goto out_iput; 3282863f144fSMiklos Szeredi d_tmpfile(file, inode); 328371480663SCarlos Maiolino 328471480663SCarlos Maiolino err_out: 3285863f144fSMiklos Szeredi return finish_open_simple(file, error); 3286feda821eSChristoph Hellwig out_iput: 3287feda821eSChristoph Hellwig iput(inode); 3288feda821eSChristoph Hellwig return error; 328960545d0dSAl Viro } 329060545d0dSAl Viro 3291c54bd91eSChristian Brauner static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir, 3292549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 32931da177e4SLinus Torvalds { 32941da177e4SLinus Torvalds int error; 32951da177e4SLinus Torvalds 32967a80e5b8SGiuseppe Scrivano error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0); 32977a80e5b8SGiuseppe Scrivano if (error) 32981da177e4SLinus Torvalds return error; 3299d8c76e6fSDave Hansen inc_nlink(dir); 33001da177e4SLinus Torvalds return 0; 33011da177e4SLinus Torvalds } 33021da177e4SLinus Torvalds 33036c960e68SChristian Brauner static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, 3304549c7297SChristian Brauner struct dentry *dentry, umode_t mode, bool excl) 33051da177e4SLinus Torvalds { 33067a80e5b8SGiuseppe Scrivano return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0); 33071da177e4SLinus Torvalds } 33081da177e4SLinus Torvalds 33091da177e4SLinus Torvalds /* 33101da177e4SLinus Torvalds * Link a file.. 33111da177e4SLinus Torvalds */ 33121da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 33131da177e4SLinus Torvalds { 331475c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 331529b00e60SDarrick J. Wong int ret = 0; 33161da177e4SLinus Torvalds 33171da177e4SLinus Torvalds /* 33181da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 33191da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 33201da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 33211062af92SDarrick J. Wong * But if an O_TMPFILE file is linked into the tmpfs, the 33221062af92SDarrick J. Wong * first link must skip that, to get the accounting right. 33231da177e4SLinus Torvalds */ 33241062af92SDarrick J. Wong if (inode->i_nlink) { 3325e809d5f0SChris Down ret = shmem_reserve_inode(inode->i_sb, NULL); 33265b04c689SPavel Emelyanov if (ret) 33275b04c689SPavel Emelyanov goto out; 33281062af92SDarrick J. Wong } 33291da177e4SLinus Torvalds 3330a2e45955SChuck Lever ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3331a2e45955SChuck Lever if (ret) { 3332a2e45955SChuck Lever if (inode->i_nlink) 33332daf18a7SHugh Dickins shmem_free_inode(inode->i_sb, 0); 3334a2e45955SChuck Lever goto out; 3335a2e45955SChuck Lever } 3336a2e45955SChuck Lever 33371da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 333865287334SJeff Layton dir->i_mtime = inode_set_ctime_to_ts(dir, 333965287334SJeff Layton inode_set_ctime_current(inode)); 334036f05cabSJeff Layton inode_inc_iversion(dir); 3341d8c76e6fSDave Hansen inc_nlink(inode); 33427de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 33431da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 33441da177e4SLinus Torvalds d_instantiate(dentry, inode); 33455b04c689SPavel Emelyanov out: 33465b04c689SPavel Emelyanov return ret; 33471da177e4SLinus Torvalds } 33481da177e4SLinus Torvalds 33491da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 33501da177e4SLinus Torvalds { 335175c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 33521da177e4SLinus Torvalds 33535b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 33542daf18a7SHugh Dickins shmem_free_inode(inode->i_sb, 0); 33551da177e4SLinus Torvalds 3356a2e45955SChuck Lever simple_offset_remove(shmem_get_offset_ctx(dir), dentry); 33571da177e4SLinus Torvalds 33581da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 335965287334SJeff Layton dir->i_mtime = inode_set_ctime_to_ts(dir, 336065287334SJeff Layton inode_set_ctime_current(inode)); 336136f05cabSJeff Layton inode_inc_iversion(dir); 33629a53c3a7SDave Hansen drop_nlink(inode); 33631da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 33641da177e4SLinus Torvalds return 0; 33651da177e4SLinus Torvalds } 33661da177e4SLinus Torvalds 33671da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 33681da177e4SLinus Torvalds { 33691da177e4SLinus Torvalds if (!simple_empty(dentry)) 33701da177e4SLinus Torvalds return -ENOTEMPTY; 33711da177e4SLinus Torvalds 337275c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 33739a53c3a7SDave Hansen drop_nlink(dir); 33741da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 33751da177e4SLinus Torvalds } 33761da177e4SLinus Torvalds 3377e18275aeSChristian Brauner static int shmem_whiteout(struct mnt_idmap *idmap, 3378549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry) 337946fdb794SMiklos Szeredi { 338046fdb794SMiklos Szeredi struct dentry *whiteout; 338146fdb794SMiklos Szeredi int error; 338246fdb794SMiklos Szeredi 338346fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 338446fdb794SMiklos Szeredi if (!whiteout) 338546fdb794SMiklos Szeredi return -ENOMEM; 338646fdb794SMiklos Szeredi 33877a80e5b8SGiuseppe Scrivano error = shmem_mknod(idmap, old_dir, whiteout, 338846fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 338946fdb794SMiklos Szeredi dput(whiteout); 339046fdb794SMiklos Szeredi if (error) 339146fdb794SMiklos Szeredi return error; 339246fdb794SMiklos Szeredi 339346fdb794SMiklos Szeredi /* 339446fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 339546fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 339646fdb794SMiklos Szeredi * 339746fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 339846fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 339946fdb794SMiklos Szeredi */ 340046fdb794SMiklos Szeredi d_rehash(whiteout); 340146fdb794SMiklos Szeredi return 0; 340246fdb794SMiklos Szeredi } 340346fdb794SMiklos Szeredi 34041da177e4SLinus Torvalds /* 34051da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 34061da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 34071da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 34081da177e4SLinus Torvalds * gets overwritten. 34091da177e4SLinus Torvalds */ 3410e18275aeSChristian Brauner static int shmem_rename2(struct mnt_idmap *idmap, 3411549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry, 3412549c7297SChristian Brauner struct inode *new_dir, struct dentry *new_dentry, 3413549c7297SChristian Brauner unsigned int flags) 34141da177e4SLinus Torvalds { 341575c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 34161da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 3417a2e45955SChuck Lever int error; 34181da177e4SLinus Torvalds 341946fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 34203b69ff51SMiklos Szeredi return -EINVAL; 34213b69ff51SMiklos Szeredi 342237456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 3423a2e45955SChuck Lever return simple_offset_rename_exchange(old_dir, old_dentry, 3424a2e45955SChuck Lever new_dir, new_dentry); 342537456771SMiklos Szeredi 34261da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 34271da177e4SLinus Torvalds return -ENOTEMPTY; 34281da177e4SLinus Torvalds 342946fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 34307a80e5b8SGiuseppe Scrivano error = shmem_whiteout(idmap, old_dir, old_dentry); 343146fdb794SMiklos Szeredi if (error) 343246fdb794SMiklos Szeredi return error; 343346fdb794SMiklos Szeredi } 343446fdb794SMiklos Szeredi 3435a2e45955SChuck Lever simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry); 3436a2e45955SChuck Lever error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry); 3437a2e45955SChuck Lever if (error) 3438a2e45955SChuck Lever return error; 3439a2e45955SChuck Lever 344075c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 34411da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 3442b928095bSMiklos Szeredi if (they_are_dirs) { 344375c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 34449a53c3a7SDave Hansen drop_nlink(old_dir); 3445b928095bSMiklos Szeredi } 34461da177e4SLinus Torvalds } else if (they_are_dirs) { 34479a53c3a7SDave Hansen drop_nlink(old_dir); 3448d8c76e6fSDave Hansen inc_nlink(new_dir); 34491da177e4SLinus Torvalds } 34501da177e4SLinus Torvalds 34511da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 34521da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 3453944d0d9dSJeff Layton simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 345436f05cabSJeff Layton inode_inc_iversion(old_dir); 345536f05cabSJeff Layton inode_inc_iversion(new_dir); 34561da177e4SLinus Torvalds return 0; 34571da177e4SLinus Torvalds } 34581da177e4SLinus Torvalds 34597a77db95SChristian Brauner static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, 3460549c7297SChristian Brauner struct dentry *dentry, const char *symname) 34611da177e4SLinus Torvalds { 34621da177e4SLinus Torvalds int error; 34631da177e4SLinus Torvalds int len; 34641da177e4SLinus Torvalds struct inode *inode; 34657ad0414bSMatthew Wilcox (Oracle) struct folio *folio; 34661da177e4SLinus Torvalds 34671da177e4SLinus Torvalds len = strlen(symname) + 1; 346809cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 34691da177e4SLinus Torvalds return -ENAMETOOLONG; 34701da177e4SLinus Torvalds 34717a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, 34720825a6f9SJoe Perches VM_NORESERVE); 347371480663SCarlos Maiolino 347471480663SCarlos Maiolino if (IS_ERR(inode)) 347571480663SCarlos Maiolino return PTR_ERR(inode); 34761da177e4SLinus Torvalds 34779d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 34786d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 347923a31d87SChuck Lever if (error && error != -EOPNOTSUPP) 348023a31d87SChuck Lever goto out_iput; 3481570bc1c2SStephen Smalley 3482a2e45955SChuck Lever error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3483a2e45955SChuck Lever if (error) 3484a2e45955SChuck Lever goto out_iput; 34851da177e4SLinus Torvalds 34861da177e4SLinus Torvalds inode->i_size = len-1; 348769f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 34883ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 34893ed47db3SAl Viro if (!inode->i_link) { 349023a31d87SChuck Lever error = -ENOMEM; 3491a2e45955SChuck Lever goto out_remove_offset; 349269f07ec9SHugh Dickins } 349369f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 34941da177e4SLinus Torvalds } else { 3495e8ecde25SAl Viro inode_nohighmem(inode); 34967ad0414bSMatthew Wilcox (Oracle) error = shmem_get_folio(inode, 0, &folio, SGP_WRITE); 349723a31d87SChuck Lever if (error) 3498a2e45955SChuck Lever goto out_remove_offset; 349914fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 35001da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 35017ad0414bSMatthew Wilcox (Oracle) memcpy(folio_address(folio), symname, len); 35027ad0414bSMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 35037ad0414bSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 35047ad0414bSMatthew Wilcox (Oracle) folio_unlock(folio); 35057ad0414bSMatthew Wilcox (Oracle) folio_put(folio); 35061da177e4SLinus Torvalds } 35071da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 350865287334SJeff Layton dir->i_mtime = inode_set_ctime_current(dir); 350936f05cabSJeff Layton inode_inc_iversion(dir); 35101da177e4SLinus Torvalds d_instantiate(dentry, inode); 35111da177e4SLinus Torvalds dget(dentry); 35121da177e4SLinus Torvalds return 0; 3513a2e45955SChuck Lever 3514a2e45955SChuck Lever out_remove_offset: 3515a2e45955SChuck Lever simple_offset_remove(shmem_get_offset_ctx(dir), dentry); 351623a31d87SChuck Lever out_iput: 351723a31d87SChuck Lever iput(inode); 351823a31d87SChuck Lever return error; 35191da177e4SLinus Torvalds } 35201da177e4SLinus Torvalds 3521fceef393SAl Viro static void shmem_put_link(void *arg) 3522fceef393SAl Viro { 3523e4b57722SMatthew Wilcox (Oracle) folio_mark_accessed(arg); 3524e4b57722SMatthew Wilcox (Oracle) folio_put(arg); 3525fceef393SAl Viro } 3526fceef393SAl Viro 35276b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3528fceef393SAl Viro struct inode *inode, 3529fceef393SAl Viro struct delayed_call *done) 35301da177e4SLinus Torvalds { 3531e4b57722SMatthew Wilcox (Oracle) struct folio *folio = NULL; 35326b255391SAl Viro int error; 3533e4b57722SMatthew Wilcox (Oracle) 35346a6c9904SAl Viro if (!dentry) { 3535e4b57722SMatthew Wilcox (Oracle) folio = filemap_get_folio(inode->i_mapping, 0); 353666dabbb6SChristoph Hellwig if (IS_ERR(folio)) 35376b255391SAl Viro return ERR_PTR(-ECHILD); 35387459c149SMatthew Wilcox (Oracle) if (PageHWPoison(folio_page(folio, 0)) || 3539e4b57722SMatthew Wilcox (Oracle) !folio_test_uptodate(folio)) { 3540e4b57722SMatthew Wilcox (Oracle) folio_put(folio); 35416a6c9904SAl Viro return ERR_PTR(-ECHILD); 35426a6c9904SAl Viro } 35436a6c9904SAl Viro } else { 3544e4b57722SMatthew Wilcox (Oracle) error = shmem_get_folio(inode, 0, &folio, SGP_READ); 3545680baacbSAl Viro if (error) 3546680baacbSAl Viro return ERR_PTR(error); 3547e4b57722SMatthew Wilcox (Oracle) if (!folio) 3548a7605426SYang Shi return ERR_PTR(-ECHILD); 35497459c149SMatthew Wilcox (Oracle) if (PageHWPoison(folio_page(folio, 0))) { 3550e4b57722SMatthew Wilcox (Oracle) folio_unlock(folio); 3551e4b57722SMatthew Wilcox (Oracle) folio_put(folio); 3552a7605426SYang Shi return ERR_PTR(-ECHILD); 3553a7605426SYang Shi } 3554e4b57722SMatthew Wilcox (Oracle) folio_unlock(folio); 35551da177e4SLinus Torvalds } 3556e4b57722SMatthew Wilcox (Oracle) set_delayed_call(done, shmem_put_link, folio); 3557e4b57722SMatthew Wilcox (Oracle) return folio_address(folio); 35581da177e4SLinus Torvalds } 35591da177e4SLinus Torvalds 3560b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3561e408e695STheodore Ts'o 3562e408e695STheodore Ts'o static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa) 3563e408e695STheodore Ts'o { 3564e408e695STheodore Ts'o struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3565e408e695STheodore Ts'o 3566e408e695STheodore Ts'o fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); 3567e408e695STheodore Ts'o 3568e408e695STheodore Ts'o return 0; 3569e408e695STheodore Ts'o } 3570e408e695STheodore Ts'o 35718782a9aeSChristian Brauner static int shmem_fileattr_set(struct mnt_idmap *idmap, 3572e408e695STheodore Ts'o struct dentry *dentry, struct fileattr *fa) 3573e408e695STheodore Ts'o { 3574e408e695STheodore Ts'o struct inode *inode = d_inode(dentry); 3575e408e695STheodore Ts'o struct shmem_inode_info *info = SHMEM_I(inode); 3576e408e695STheodore Ts'o 3577e408e695STheodore Ts'o if (fileattr_has_fsx(fa)) 3578e408e695STheodore Ts'o return -EOPNOTSUPP; 3579cb241339SHugh Dickins if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) 3580cb241339SHugh Dickins return -EOPNOTSUPP; 3581e408e695STheodore Ts'o 3582e408e695STheodore Ts'o info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | 3583e408e695STheodore Ts'o (fa->flags & SHMEM_FL_USER_MODIFIABLE); 3584e408e695STheodore Ts'o 3585cb241339SHugh Dickins shmem_set_inode_flags(inode, info->fsflags); 358665287334SJeff Layton inode_set_ctime_current(inode); 358736f05cabSJeff Layton inode_inc_iversion(inode); 3588e408e695STheodore Ts'o return 0; 3589e408e695STheodore Ts'o } 3590e408e695STheodore Ts'o 3591b09e0fa4SEric Paris /* 3592b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3593b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3594b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3595b09e0fa4SEric Paris * filesystem level, though. 3596b09e0fa4SEric Paris */ 3597b09e0fa4SEric Paris 35986d9d88d0SJarkko Sakkinen /* 35996d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 36006d9d88d0SJarkko Sakkinen */ 36016d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 36026d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 36036d9d88d0SJarkko Sakkinen void *fs_info) 36046d9d88d0SJarkko Sakkinen { 36056d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 36062daf18a7SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 36076d9d88d0SJarkko Sakkinen const struct xattr *xattr; 360838f38657SAristeu Rozanski struct simple_xattr *new_xattr; 36092daf18a7SHugh Dickins size_t ispace = 0; 36106d9d88d0SJarkko Sakkinen size_t len; 36116d9d88d0SJarkko Sakkinen 36122daf18a7SHugh Dickins if (sbinfo->max_inodes) { 36132daf18a7SHugh Dickins for (xattr = xattr_array; xattr->name != NULL; xattr++) { 36142daf18a7SHugh Dickins ispace += simple_xattr_space(xattr->name, 36152daf18a7SHugh Dickins xattr->value_len + XATTR_SECURITY_PREFIX_LEN); 36162daf18a7SHugh Dickins } 36172daf18a7SHugh Dickins if (ispace) { 36182daf18a7SHugh Dickins raw_spin_lock(&sbinfo->stat_lock); 36192daf18a7SHugh Dickins if (sbinfo->free_ispace < ispace) 36202daf18a7SHugh Dickins ispace = 0; 36212daf18a7SHugh Dickins else 36222daf18a7SHugh Dickins sbinfo->free_ispace -= ispace; 36232daf18a7SHugh Dickins raw_spin_unlock(&sbinfo->stat_lock); 36242daf18a7SHugh Dickins if (!ispace) 36252daf18a7SHugh Dickins return -ENOSPC; 36262daf18a7SHugh Dickins } 36272daf18a7SHugh Dickins } 36282daf18a7SHugh Dickins 36296d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 363038f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 36316d9d88d0SJarkko Sakkinen if (!new_xattr) 36322daf18a7SHugh Dickins break; 36336d9d88d0SJarkko Sakkinen 36346d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 36356d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 3636572a3d1eSHugh Dickins GFP_KERNEL_ACCOUNT); 36376d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 36383bef735aSChengguang Xu kvfree(new_xattr); 36392daf18a7SHugh Dickins break; 36406d9d88d0SJarkko Sakkinen } 36416d9d88d0SJarkko Sakkinen 36426d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 36436d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 36446d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 36456d9d88d0SJarkko Sakkinen xattr->name, len); 36466d9d88d0SJarkko Sakkinen 36473b4c7bc0SChristian Brauner simple_xattr_add(&info->xattrs, new_xattr); 36486d9d88d0SJarkko Sakkinen } 36496d9d88d0SJarkko Sakkinen 36502daf18a7SHugh Dickins if (xattr->name != NULL) { 36512daf18a7SHugh Dickins if (ispace) { 36522daf18a7SHugh Dickins raw_spin_lock(&sbinfo->stat_lock); 36532daf18a7SHugh Dickins sbinfo->free_ispace += ispace; 36542daf18a7SHugh Dickins raw_spin_unlock(&sbinfo->stat_lock); 36552daf18a7SHugh Dickins } 36562daf18a7SHugh Dickins simple_xattrs_free(&info->xattrs, NULL); 36572daf18a7SHugh Dickins return -ENOMEM; 36582daf18a7SHugh Dickins } 36592daf18a7SHugh Dickins 36606d9d88d0SJarkko Sakkinen return 0; 36616d9d88d0SJarkko Sakkinen } 36626d9d88d0SJarkko Sakkinen 3663aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3664b296821aSAl Viro struct dentry *unused, struct inode *inode, 3665b296821aSAl Viro const char *name, void *buffer, size_t size) 3666aa7c5241SAndreas Gruenbacher { 3667b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3668aa7c5241SAndreas Gruenbacher 3669aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3670aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3671aa7c5241SAndreas Gruenbacher } 3672aa7c5241SAndreas Gruenbacher 3673aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 367439f60c1cSChristian Brauner struct mnt_idmap *idmap, 367559301226SAl Viro struct dentry *unused, struct inode *inode, 367659301226SAl Viro const char *name, const void *value, 367759301226SAl Viro size_t size, int flags) 3678aa7c5241SAndreas Gruenbacher { 367959301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 36802daf18a7SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 36815de75970SHugh Dickins struct simple_xattr *old_xattr; 36822daf18a7SHugh Dickins size_t ispace = 0; 3683aa7c5241SAndreas Gruenbacher 3684aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 36852daf18a7SHugh Dickins if (value && sbinfo->max_inodes) { 36862daf18a7SHugh Dickins ispace = simple_xattr_space(name, size); 36872daf18a7SHugh Dickins raw_spin_lock(&sbinfo->stat_lock); 36882daf18a7SHugh Dickins if (sbinfo->free_ispace < ispace) 36892daf18a7SHugh Dickins ispace = 0; 36902daf18a7SHugh Dickins else 36912daf18a7SHugh Dickins sbinfo->free_ispace -= ispace; 36922daf18a7SHugh Dickins raw_spin_unlock(&sbinfo->stat_lock); 36932daf18a7SHugh Dickins if (!ispace) 36942daf18a7SHugh Dickins return -ENOSPC; 36952daf18a7SHugh Dickins } 36962daf18a7SHugh Dickins 36975de75970SHugh Dickins old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags); 36985de75970SHugh Dickins if (!IS_ERR(old_xattr)) { 36992daf18a7SHugh Dickins ispace = 0; 37002daf18a7SHugh Dickins if (old_xattr && sbinfo->max_inodes) 37012daf18a7SHugh Dickins ispace = simple_xattr_space(old_xattr->name, 37022daf18a7SHugh Dickins old_xattr->size); 37035de75970SHugh Dickins simple_xattr_free(old_xattr); 37045de75970SHugh Dickins old_xattr = NULL; 370565287334SJeff Layton inode_set_ctime_current(inode); 370636f05cabSJeff Layton inode_inc_iversion(inode); 370736f05cabSJeff Layton } 37082daf18a7SHugh Dickins if (ispace) { 37092daf18a7SHugh Dickins raw_spin_lock(&sbinfo->stat_lock); 37102daf18a7SHugh Dickins sbinfo->free_ispace += ispace; 37112daf18a7SHugh Dickins raw_spin_unlock(&sbinfo->stat_lock); 37122daf18a7SHugh Dickins } 37135de75970SHugh Dickins return PTR_ERR(old_xattr); 3714aa7c5241SAndreas Gruenbacher } 3715aa7c5241SAndreas Gruenbacher 3716aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3717aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3718aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3719aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3720aa7c5241SAndreas Gruenbacher }; 3721aa7c5241SAndreas Gruenbacher 3722aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3723aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3724aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3725aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3726aa7c5241SAndreas Gruenbacher }; 3727aa7c5241SAndreas Gruenbacher 37282daf18a7SHugh Dickins static const struct xattr_handler shmem_user_xattr_handler = { 37292daf18a7SHugh Dickins .prefix = XATTR_USER_PREFIX, 37302daf18a7SHugh Dickins .get = shmem_xattr_handler_get, 37312daf18a7SHugh Dickins .set = shmem_xattr_handler_set, 37322daf18a7SHugh Dickins }; 37332daf18a7SHugh Dickins 3734b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3735aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3736aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 37372daf18a7SHugh Dickins &shmem_user_xattr_handler, 3738b09e0fa4SEric Paris NULL 3739b09e0fa4SEric Paris }; 3740b09e0fa4SEric Paris 3741b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3742b09e0fa4SEric Paris { 374375c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3744786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3745b09e0fa4SEric Paris } 3746b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3747b09e0fa4SEric Paris 374869f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 3749f7cd16a5SXavier Roche .getattr = shmem_getattr, 3750e09764cfSCarlos Maiolino .setattr = shmem_setattr, 37516b255391SAl Viro .get_link = simple_get_link, 3752b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3753b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3754b09e0fa4SEric Paris #endif 37551da177e4SLinus Torvalds }; 37561da177e4SLinus Torvalds 375792e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 3758f7cd16a5SXavier Roche .getattr = shmem_getattr, 3759e09764cfSCarlos Maiolino .setattr = shmem_setattr, 37606b255391SAl Viro .get_link = shmem_get_link, 3761b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3762b09e0fa4SEric Paris .listxattr = shmem_listxattr, 376339f0247dSAndreas Gruenbacher #endif 3764b09e0fa4SEric Paris }; 376539f0247dSAndreas Gruenbacher 376691828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 376791828a40SDavid M. Grimes { 376891828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 376991828a40SDavid M. Grimes } 377091828a40SDavid M. Grimes 377191828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 377291828a40SDavid M. Grimes { 377391828a40SDavid M. Grimes __u32 *fh = vfh; 377491828a40SDavid M. Grimes __u64 inum = fh[2]; 377591828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 377691828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 377791828a40SDavid M. Grimes } 377891828a40SDavid M. Grimes 377912ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */ 378012ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode) 378112ba780dSAmir Goldstein { 378212ba780dSAmir Goldstein struct dentry *alias = d_find_alias(inode); 378312ba780dSAmir Goldstein 378412ba780dSAmir Goldstein return alias ?: d_find_any_alias(inode); 378512ba780dSAmir Goldstein } 378612ba780dSAmir Goldstein 378712ba780dSAmir Goldstein 3788480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3789480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 379091828a40SDavid M. Grimes { 379191828a40SDavid M. Grimes struct inode *inode; 3792480b116cSChristoph Hellwig struct dentry *dentry = NULL; 379335c2a7f4SHugh Dickins u64 inum; 379491828a40SDavid M. Grimes 3795480b116cSChristoph Hellwig if (fh_len < 3) 3796480b116cSChristoph Hellwig return NULL; 3797480b116cSChristoph Hellwig 379835c2a7f4SHugh Dickins inum = fid->raw[2]; 379935c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 380035c2a7f4SHugh Dickins 3801480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3802480b116cSChristoph Hellwig shmem_match, fid->raw); 380391828a40SDavid M. Grimes if (inode) { 380412ba780dSAmir Goldstein dentry = shmem_find_alias(inode); 380591828a40SDavid M. Grimes iput(inode); 380691828a40SDavid M. Grimes } 380791828a40SDavid M. Grimes 3808480b116cSChristoph Hellwig return dentry; 380991828a40SDavid M. Grimes } 381091828a40SDavid M. Grimes 3811b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3812b0b0382bSAl Viro struct inode *parent) 381391828a40SDavid M. Grimes { 38145fe0c237SAneesh Kumar K.V if (*len < 3) { 38155fe0c237SAneesh Kumar K.V *len = 3; 381694e07a75SNamjae Jeon return FILEID_INVALID; 38175fe0c237SAneesh Kumar K.V } 381891828a40SDavid M. Grimes 38191d3382cbSAl Viro if (inode_unhashed(inode)) { 382091828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 382191828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 382291828a40SDavid M. Grimes * time, we need a lock to ensure we only try 382391828a40SDavid M. Grimes * to do it once 382491828a40SDavid M. Grimes */ 382591828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 382691828a40SDavid M. Grimes spin_lock(&lock); 38271d3382cbSAl Viro if (inode_unhashed(inode)) 382891828a40SDavid M. Grimes __insert_inode_hash(inode, 382991828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 383091828a40SDavid M. Grimes spin_unlock(&lock); 383191828a40SDavid M. Grimes } 383291828a40SDavid M. Grimes 383391828a40SDavid M. Grimes fh[0] = inode->i_generation; 383491828a40SDavid M. Grimes fh[1] = inode->i_ino; 383591828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 383691828a40SDavid M. Grimes 383791828a40SDavid M. Grimes *len = 3; 383891828a40SDavid M. Grimes return 1; 383991828a40SDavid M. Grimes } 384091828a40SDavid M. Grimes 384139655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 384291828a40SDavid M. Grimes .get_parent = shmem_get_parent, 384391828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3844480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 384591828a40SDavid M. Grimes }; 384691828a40SDavid M. Grimes 3847626c3920SAl Viro enum shmem_param { 3848626c3920SAl Viro Opt_gid, 3849626c3920SAl Viro Opt_huge, 3850626c3920SAl Viro Opt_mode, 3851626c3920SAl Viro Opt_mpol, 3852626c3920SAl Viro Opt_nr_blocks, 3853626c3920SAl Viro Opt_nr_inodes, 3854626c3920SAl Viro Opt_size, 3855626c3920SAl Viro Opt_uid, 3856ea3271f7SChris Down Opt_inode32, 3857ea3271f7SChris Down Opt_inode64, 38582c6efe9cSLuis Chamberlain Opt_noswap, 3859e09764cfSCarlos Maiolino Opt_quota, 3860e09764cfSCarlos Maiolino Opt_usrquota, 3861e09764cfSCarlos Maiolino Opt_grpquota, 3862de4c0e7cSLukas Czerner Opt_usrquota_block_hardlimit, 3863de4c0e7cSLukas Czerner Opt_usrquota_inode_hardlimit, 3864de4c0e7cSLukas Czerner Opt_grpquota_block_hardlimit, 3865de4c0e7cSLukas Czerner Opt_grpquota_inode_hardlimit, 3866626c3920SAl Viro }; 38671da177e4SLinus Torvalds 38685eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = { 38692710c957SAl Viro {"never", SHMEM_HUGE_NEVER }, 38702710c957SAl Viro {"always", SHMEM_HUGE_ALWAYS }, 38712710c957SAl Viro {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 38722710c957SAl Viro {"advise", SHMEM_HUGE_ADVISE }, 38732710c957SAl Viro {} 38742710c957SAl Viro }; 38752710c957SAl Viro 3876d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = { 3877626c3920SAl Viro fsparam_u32 ("gid", Opt_gid), 38782710c957SAl Viro fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 3879626c3920SAl Viro fsparam_u32oct("mode", Opt_mode), 3880626c3920SAl Viro fsparam_string("mpol", Opt_mpol), 3881626c3920SAl Viro fsparam_string("nr_blocks", Opt_nr_blocks), 3882626c3920SAl Viro fsparam_string("nr_inodes", Opt_nr_inodes), 3883626c3920SAl Viro fsparam_string("size", Opt_size), 3884626c3920SAl Viro fsparam_u32 ("uid", Opt_uid), 3885ea3271f7SChris Down fsparam_flag ("inode32", Opt_inode32), 3886ea3271f7SChris Down fsparam_flag ("inode64", Opt_inode64), 38872c6efe9cSLuis Chamberlain fsparam_flag ("noswap", Opt_noswap), 3888e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 3889e09764cfSCarlos Maiolino fsparam_flag ("quota", Opt_quota), 3890e09764cfSCarlos Maiolino fsparam_flag ("usrquota", Opt_usrquota), 3891e09764cfSCarlos Maiolino fsparam_flag ("grpquota", Opt_grpquota), 3892de4c0e7cSLukas Czerner fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit), 3893de4c0e7cSLukas Czerner fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit), 3894de4c0e7cSLukas Czerner fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit), 3895de4c0e7cSLukas Czerner fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit), 3896e09764cfSCarlos Maiolino #endif 3897626c3920SAl Viro {} 3898626c3920SAl Viro }; 3899626c3920SAl Viro 3900f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 3901626c3920SAl Viro { 3902f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3903626c3920SAl Viro struct fs_parse_result result; 3904e04dc423SAl Viro unsigned long long size; 3905626c3920SAl Viro char *rest; 3906626c3920SAl Viro int opt; 39070200679fSChristian Brauner kuid_t kuid; 39080200679fSChristian Brauner kgid_t kgid; 3909626c3920SAl Viro 3910d7167b14SAl Viro opt = fs_parse(fc, shmem_fs_parameters, param, &result); 3911f3235626SDavid Howells if (opt < 0) 3912626c3920SAl Viro return opt; 3913626c3920SAl Viro 3914626c3920SAl Viro switch (opt) { 3915626c3920SAl Viro case Opt_size: 3916626c3920SAl Viro size = memparse(param->string, &rest); 3917e04dc423SAl Viro if (*rest == '%') { 3918e04dc423SAl Viro size <<= PAGE_SHIFT; 3919e04dc423SAl Viro size *= totalram_pages(); 3920e04dc423SAl Viro do_div(size, 100); 3921e04dc423SAl Viro rest++; 3922e04dc423SAl Viro } 3923e04dc423SAl Viro if (*rest) 3924626c3920SAl Viro goto bad_value; 3925e04dc423SAl Viro ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 3926e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3927626c3920SAl Viro break; 3928626c3920SAl Viro case Opt_nr_blocks: 3929626c3920SAl Viro ctx->blocks = memparse(param->string, &rest); 3930e07c469eSHugh Dickins if (*rest || ctx->blocks > LONG_MAX) 3931626c3920SAl Viro goto bad_value; 3932e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3933626c3920SAl Viro break; 3934626c3920SAl Viro case Opt_nr_inodes: 3935626c3920SAl Viro ctx->inodes = memparse(param->string, &rest); 3936e07c469eSHugh Dickins if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE) 3937626c3920SAl Viro goto bad_value; 3938e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_INODES; 3939626c3920SAl Viro break; 3940626c3920SAl Viro case Opt_mode: 3941626c3920SAl Viro ctx->mode = result.uint_32 & 07777; 3942626c3920SAl Viro break; 3943626c3920SAl Viro case Opt_uid: 39440200679fSChristian Brauner kuid = make_kuid(current_user_ns(), result.uint_32); 39450200679fSChristian Brauner if (!uid_valid(kuid)) 3946626c3920SAl Viro goto bad_value; 39470200679fSChristian Brauner 39480200679fSChristian Brauner /* 39490200679fSChristian Brauner * The requested uid must be representable in the 39500200679fSChristian Brauner * filesystem's idmapping. 39510200679fSChristian Brauner */ 39520200679fSChristian Brauner if (!kuid_has_mapping(fc->user_ns, kuid)) 39530200679fSChristian Brauner goto bad_value; 39540200679fSChristian Brauner 39550200679fSChristian Brauner ctx->uid = kuid; 3956626c3920SAl Viro break; 3957626c3920SAl Viro case Opt_gid: 39580200679fSChristian Brauner kgid = make_kgid(current_user_ns(), result.uint_32); 39590200679fSChristian Brauner if (!gid_valid(kgid)) 3960626c3920SAl Viro goto bad_value; 39610200679fSChristian Brauner 39620200679fSChristian Brauner /* 39630200679fSChristian Brauner * The requested gid must be representable in the 39640200679fSChristian Brauner * filesystem's idmapping. 39650200679fSChristian Brauner */ 39660200679fSChristian Brauner if (!kgid_has_mapping(fc->user_ns, kgid)) 39670200679fSChristian Brauner goto bad_value; 39680200679fSChristian Brauner 39690200679fSChristian Brauner ctx->gid = kgid; 3970626c3920SAl Viro break; 3971626c3920SAl Viro case Opt_huge: 3972626c3920SAl Viro ctx->huge = result.uint_32; 3973626c3920SAl Viro if (ctx->huge != SHMEM_HUGE_NEVER && 3974396bcc52SMatthew Wilcox (Oracle) !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 3975626c3920SAl Viro has_transparent_hugepage())) 3976626c3920SAl Viro goto unsupported_parameter; 3977e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_HUGE; 3978626c3920SAl Viro break; 3979626c3920SAl Viro case Opt_mpol: 3980626c3920SAl Viro if (IS_ENABLED(CONFIG_NUMA)) { 3981e04dc423SAl Viro mpol_put(ctx->mpol); 3982e04dc423SAl Viro ctx->mpol = NULL; 3983626c3920SAl Viro if (mpol_parse_str(param->string, &ctx->mpol)) 3984626c3920SAl Viro goto bad_value; 3985626c3920SAl Viro break; 3986626c3920SAl Viro } 3987626c3920SAl Viro goto unsupported_parameter; 3988ea3271f7SChris Down case Opt_inode32: 3989ea3271f7SChris Down ctx->full_inums = false; 3990ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3991ea3271f7SChris Down break; 3992ea3271f7SChris Down case Opt_inode64: 3993ea3271f7SChris Down if (sizeof(ino_t) < 8) { 3994ea3271f7SChris Down return invalfc(fc, 3995ea3271f7SChris Down "Cannot use inode64 with <64bit inums in kernel\n"); 3996ea3271f7SChris Down } 3997ea3271f7SChris Down ctx->full_inums = true; 3998ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS; 3999ea3271f7SChris Down break; 40002c6efe9cSLuis Chamberlain case Opt_noswap: 400101106e14SChristian Brauner if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) { 400201106e14SChristian Brauner return invalfc(fc, 400301106e14SChristian Brauner "Turning off swap in unprivileged tmpfs mounts unsupported"); 400401106e14SChristian Brauner } 40052c6efe9cSLuis Chamberlain ctx->noswap = true; 40062c6efe9cSLuis Chamberlain ctx->seen |= SHMEM_SEEN_NOSWAP; 40072c6efe9cSLuis Chamberlain break; 4008e09764cfSCarlos Maiolino case Opt_quota: 4009e09764cfSCarlos Maiolino if (fc->user_ns != &init_user_ns) 4010e09764cfSCarlos Maiolino return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4011e09764cfSCarlos Maiolino ctx->seen |= SHMEM_SEEN_QUOTA; 4012e09764cfSCarlos Maiolino ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP); 4013e09764cfSCarlos Maiolino break; 4014e09764cfSCarlos Maiolino case Opt_usrquota: 4015e09764cfSCarlos Maiolino if (fc->user_ns != &init_user_ns) 4016e09764cfSCarlos Maiolino return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4017e09764cfSCarlos Maiolino ctx->seen |= SHMEM_SEEN_QUOTA; 4018e09764cfSCarlos Maiolino ctx->quota_types |= QTYPE_MASK_USR; 4019e09764cfSCarlos Maiolino break; 4020e09764cfSCarlos Maiolino case Opt_grpquota: 4021e09764cfSCarlos Maiolino if (fc->user_ns != &init_user_ns) 4022e09764cfSCarlos Maiolino return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4023e09764cfSCarlos Maiolino ctx->seen |= SHMEM_SEEN_QUOTA; 4024e09764cfSCarlos Maiolino ctx->quota_types |= QTYPE_MASK_GRP; 4025e09764cfSCarlos Maiolino break; 4026de4c0e7cSLukas Czerner case Opt_usrquota_block_hardlimit: 4027de4c0e7cSLukas Czerner size = memparse(param->string, &rest); 4028de4c0e7cSLukas Czerner if (*rest || !size) 4029de4c0e7cSLukas Czerner goto bad_value; 4030de4c0e7cSLukas Czerner if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) 4031de4c0e7cSLukas Czerner return invalfc(fc, 4032de4c0e7cSLukas Czerner "User quota block hardlimit too large."); 4033de4c0e7cSLukas Czerner ctx->qlimits.usrquota_bhardlimit = size; 4034de4c0e7cSLukas Czerner break; 4035de4c0e7cSLukas Czerner case Opt_grpquota_block_hardlimit: 4036de4c0e7cSLukas Czerner size = memparse(param->string, &rest); 4037de4c0e7cSLukas Czerner if (*rest || !size) 4038de4c0e7cSLukas Czerner goto bad_value; 4039de4c0e7cSLukas Czerner if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) 4040de4c0e7cSLukas Czerner return invalfc(fc, 4041de4c0e7cSLukas Czerner "Group quota block hardlimit too large."); 4042de4c0e7cSLukas Czerner ctx->qlimits.grpquota_bhardlimit = size; 4043de4c0e7cSLukas Czerner break; 4044de4c0e7cSLukas Czerner case Opt_usrquota_inode_hardlimit: 4045de4c0e7cSLukas Czerner size = memparse(param->string, &rest); 4046de4c0e7cSLukas Czerner if (*rest || !size) 4047de4c0e7cSLukas Czerner goto bad_value; 4048de4c0e7cSLukas Czerner if (size > SHMEM_QUOTA_MAX_INO_LIMIT) 4049de4c0e7cSLukas Czerner return invalfc(fc, 4050de4c0e7cSLukas Czerner "User quota inode hardlimit too large."); 4051de4c0e7cSLukas Czerner ctx->qlimits.usrquota_ihardlimit = size; 4052de4c0e7cSLukas Czerner break; 4053de4c0e7cSLukas Czerner case Opt_grpquota_inode_hardlimit: 4054de4c0e7cSLukas Czerner size = memparse(param->string, &rest); 4055de4c0e7cSLukas Czerner if (*rest || !size) 4056de4c0e7cSLukas Czerner goto bad_value; 4057de4c0e7cSLukas Czerner if (size > SHMEM_QUOTA_MAX_INO_LIMIT) 4058de4c0e7cSLukas Czerner return invalfc(fc, 4059de4c0e7cSLukas Czerner "Group quota inode hardlimit too large."); 4060de4c0e7cSLukas Czerner ctx->qlimits.grpquota_ihardlimit = size; 4061de4c0e7cSLukas Czerner break; 4062e04dc423SAl Viro } 4063e04dc423SAl Viro return 0; 4064e04dc423SAl Viro 4065626c3920SAl Viro unsupported_parameter: 4066f35aa2bcSAl Viro return invalfc(fc, "Unsupported parameter '%s'", param->key); 4067626c3920SAl Viro bad_value: 4068f35aa2bcSAl Viro return invalfc(fc, "Bad value for '%s'", param->key); 4069e04dc423SAl Viro } 4070e04dc423SAl Viro 4071f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data) 4072e04dc423SAl Viro { 4073f3235626SDavid Howells char *options = data; 4074f3235626SDavid Howells 407533f37c64SAl Viro if (options) { 407633f37c64SAl Viro int err = security_sb_eat_lsm_opts(options, &fc->security); 407733f37c64SAl Viro if (err) 407833f37c64SAl Viro return err; 407933f37c64SAl Viro } 408033f37c64SAl Viro 4081b00dc3adSHugh Dickins while (options != NULL) { 4082626c3920SAl Viro char *this_char = options; 4083b00dc3adSHugh Dickins for (;;) { 4084b00dc3adSHugh Dickins /* 4085b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 4086b00dc3adSHugh Dickins * mount options form a comma-separated list, 4087b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 4088b00dc3adSHugh Dickins */ 4089b00dc3adSHugh Dickins options = strchr(options, ','); 4090b00dc3adSHugh Dickins if (options == NULL) 4091b00dc3adSHugh Dickins break; 4092b00dc3adSHugh Dickins options++; 4093b00dc3adSHugh Dickins if (!isdigit(*options)) { 4094b00dc3adSHugh Dickins options[-1] = '\0'; 4095b00dc3adSHugh Dickins break; 4096b00dc3adSHugh Dickins } 4097b00dc3adSHugh Dickins } 4098626c3920SAl Viro if (*this_char) { 4099626c3920SAl Viro char *value = strchr(this_char, '='); 4100f3235626SDavid Howells size_t len = 0; 4101626c3920SAl Viro int err; 4102626c3920SAl Viro 4103626c3920SAl Viro if (value) { 4104626c3920SAl Viro *value++ = '\0'; 4105f3235626SDavid Howells len = strlen(value); 41061da177e4SLinus Torvalds } 4107f3235626SDavid Howells err = vfs_parse_fs_string(fc, this_char, value, len); 4108f3235626SDavid Howells if (err < 0) 4109f3235626SDavid Howells return err; 41101da177e4SLinus Torvalds } 4111626c3920SAl Viro } 41121da177e4SLinus Torvalds return 0; 41131da177e4SLinus Torvalds } 41141da177e4SLinus Torvalds 4115f3235626SDavid Howells /* 4116f3235626SDavid Howells * Reconfigure a shmem filesystem. 4117f3235626SDavid Howells */ 4118f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc) 41191da177e4SLinus Torvalds { 4120f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 4121f3235626SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 4122e07c469eSHugh Dickins unsigned long used_isp; 4123bf11b9a8SSebastian Andrzej Siewior struct mempolicy *mpol = NULL; 4124f3235626SDavid Howells const char *err; 41250edd73b3SHugh Dickins 4126bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); 4127e07c469eSHugh Dickins used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace; 41280c98c8e1SZhaoLong Wang 4129f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 4130f3235626SDavid Howells if (!sbinfo->max_blocks) { 4131f3235626SDavid Howells err = "Cannot retroactively limit size"; 41320edd73b3SHugh Dickins goto out; 41330b5071ddSAl Viro } 4134f3235626SDavid Howells if (percpu_counter_compare(&sbinfo->used_blocks, 4135f3235626SDavid Howells ctx->blocks) > 0) { 4136f3235626SDavid Howells err = "Too small a size for current use"; 41370b5071ddSAl Viro goto out; 4138f3235626SDavid Howells } 4139f3235626SDavid Howells } 4140f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 4141f3235626SDavid Howells if (!sbinfo->max_inodes) { 4142f3235626SDavid Howells err = "Cannot retroactively limit inodes"; 41430b5071ddSAl Viro goto out; 41440b5071ddSAl Viro } 4145e07c469eSHugh Dickins if (ctx->inodes * BOGO_INODE_SIZE < used_isp) { 4146f3235626SDavid Howells err = "Too few inodes for current use"; 4147f3235626SDavid Howells goto out; 4148f3235626SDavid Howells } 4149f3235626SDavid Howells } 41500edd73b3SHugh Dickins 4151ea3271f7SChris Down if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 4152ea3271f7SChris Down sbinfo->next_ino > UINT_MAX) { 4153ea3271f7SChris Down err = "Current inum too high to switch to 32-bit inums"; 4154ea3271f7SChris Down goto out; 4155ea3271f7SChris Down } 41562c6efe9cSLuis Chamberlain if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { 41572c6efe9cSLuis Chamberlain err = "Cannot disable swap on remount"; 41582c6efe9cSLuis Chamberlain goto out; 41592c6efe9cSLuis Chamberlain } 41602c6efe9cSLuis Chamberlain if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { 41612c6efe9cSLuis Chamberlain err = "Cannot enable swap on remount if it was disabled on first mount"; 41622c6efe9cSLuis Chamberlain goto out; 41632c6efe9cSLuis Chamberlain } 4164ea3271f7SChris Down 4165e09764cfSCarlos Maiolino if (ctx->seen & SHMEM_SEEN_QUOTA && 4166e09764cfSCarlos Maiolino !sb_any_quota_loaded(fc->root->d_sb)) { 4167e09764cfSCarlos Maiolino err = "Cannot enable quota on remount"; 4168e09764cfSCarlos Maiolino goto out; 4169e09764cfSCarlos Maiolino } 4170e09764cfSCarlos Maiolino 4171de4c0e7cSLukas Czerner #ifdef CONFIG_TMPFS_QUOTA 4172de4c0e7cSLukas Czerner #define CHANGED_LIMIT(name) \ 4173de4c0e7cSLukas Czerner (ctx->qlimits.name## hardlimit && \ 4174de4c0e7cSLukas Czerner (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit)) 4175de4c0e7cSLukas Czerner 4176de4c0e7cSLukas Czerner if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) || 4177de4c0e7cSLukas Czerner CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) { 4178de4c0e7cSLukas Czerner err = "Cannot change global quota limit on remount"; 4179de4c0e7cSLukas Czerner goto out; 4180de4c0e7cSLukas Czerner } 4181de4c0e7cSLukas Czerner #endif /* CONFIG_TMPFS_QUOTA */ 4182de4c0e7cSLukas Czerner 4183f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_HUGE) 4184f3235626SDavid Howells sbinfo->huge = ctx->huge; 4185ea3271f7SChris Down if (ctx->seen & SHMEM_SEEN_INUMS) 4186ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 4187f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_BLOCKS) 4188f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 4189f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_INODES) { 4190f3235626SDavid Howells sbinfo->max_inodes = ctx->inodes; 4191e07c469eSHugh Dickins sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp; 41920b5071ddSAl Viro } 419371fe804bSLee Schermerhorn 41945f00110fSGreg Thelen /* 41955f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 41965f00110fSGreg Thelen */ 4197f3235626SDavid Howells if (ctx->mpol) { 4198bf11b9a8SSebastian Andrzej Siewior mpol = sbinfo->mpol; 4199f3235626SDavid Howells sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 4200f3235626SDavid Howells ctx->mpol = NULL; 42015f00110fSGreg Thelen } 42022c6efe9cSLuis Chamberlain 42032c6efe9cSLuis Chamberlain if (ctx->noswap) 42042c6efe9cSLuis Chamberlain sbinfo->noswap = true; 42052c6efe9cSLuis Chamberlain 4206bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 4207bf11b9a8SSebastian Andrzej Siewior mpol_put(mpol); 4208f3235626SDavid Howells return 0; 42090edd73b3SHugh Dickins out: 4210bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock); 4211f35aa2bcSAl Viro return invalfc(fc, "%s", err); 42121da177e4SLinus Torvalds } 4213680d794bSakpm@linux-foundation.org 421434c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 4215680d794bSakpm@linux-foundation.org { 421634c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 4217283ebdeeSTu Jinjiang struct mempolicy *mpol; 4218680d794bSakpm@linux-foundation.org 4219680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 4220b91742d8SZhangPeng seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks)); 4221680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 4222680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 42230825a6f9SJoe Perches if (sbinfo->mode != (0777 | S_ISVTX)) 422409208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 42258751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 42268751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 42278751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 42288751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 42298751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 42308751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 4231ea3271f7SChris Down 4232ea3271f7SChris Down /* 4233ea3271f7SChris Down * Showing inode{64,32} might be useful even if it's the system default, 4234ea3271f7SChris Down * since then people don't have to resort to checking both here and 4235ea3271f7SChris Down * /proc/config.gz to confirm 64-bit inums were successfully applied 4236ea3271f7SChris Down * (which may not even exist if IKCONFIG_PROC isn't enabled). 4237ea3271f7SChris Down * 4238ea3271f7SChris Down * We hide it when inode64 isn't the default and we are using 32-bit 4239ea3271f7SChris Down * inodes, since that probably just means the feature isn't even under 4240ea3271f7SChris Down * consideration. 4241ea3271f7SChris Down * 4242ea3271f7SChris Down * As such: 4243ea3271f7SChris Down * 4244ea3271f7SChris Down * +-----------------+-----------------+ 4245ea3271f7SChris Down * | TMPFS_INODE64=y | TMPFS_INODE64=n | 4246ea3271f7SChris Down * +------------------+-----------------+-----------------+ 4247ea3271f7SChris Down * | full_inums=true | show | show | 4248ea3271f7SChris Down * | full_inums=false | show | hide | 4249ea3271f7SChris Down * +------------------+-----------------+-----------------+ 4250ea3271f7SChris Down * 4251ea3271f7SChris Down */ 4252ea3271f7SChris Down if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 4253ea3271f7SChris Down seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 4254396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 42555a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 42565a6e75f8SKirill A. Shutemov if (sbinfo->huge) 42575a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 42585a6e75f8SKirill A. Shutemov #endif 4259283ebdeeSTu Jinjiang mpol = shmem_get_sbmpol(sbinfo); 4260283ebdeeSTu Jinjiang shmem_show_mpol(seq, mpol); 4261283ebdeeSTu Jinjiang mpol_put(mpol); 42622c6efe9cSLuis Chamberlain if (sbinfo->noswap) 42632c6efe9cSLuis Chamberlain seq_printf(seq, ",noswap"); 4264680d794bSakpm@linux-foundation.org return 0; 4265680d794bSakpm@linux-foundation.org } 42669183df25SDavid Herrmann 4267680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 42681da177e4SLinus Torvalds 42691da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 42701da177e4SLinus Torvalds { 4271602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 4272602586a8SHugh Dickins 4273e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 4274e09764cfSCarlos Maiolino shmem_disable_quotas(sb); 4275e09764cfSCarlos Maiolino #endif 4276e809d5f0SChris Down free_percpu(sbinfo->ino_batch); 4277602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 427849cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 4279602586a8SHugh Dickins kfree(sbinfo); 42801da177e4SLinus Torvalds sb->s_fs_info = NULL; 42811da177e4SLinus Torvalds } 42821da177e4SLinus Torvalds 4283f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 42841da177e4SLinus Torvalds { 4285f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 42861da177e4SLinus Torvalds struct inode *inode; 42870edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 428871480663SCarlos Maiolino int error = -ENOMEM; 4289680d794bSakpm@linux-foundation.org 4290680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 4291425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 4292680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 4293680d794bSakpm@linux-foundation.org if (!sbinfo) 429471480663SCarlos Maiolino return error; 4295680d794bSakpm@linux-foundation.org 4296680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 42971da177e4SLinus Torvalds 42980edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 42991da177e4SLinus Torvalds /* 43001da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 43011da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 43021da177e4SLinus Torvalds * but the internal instance is left unlimited. 43031da177e4SLinus Torvalds */ 43041751e8a6SLinus Torvalds if (!(sb->s_flags & SB_KERNMOUNT)) { 4305f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 4306f3235626SDavid Howells ctx->blocks = shmem_default_max_blocks(); 4307f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_INODES)) 4308f3235626SDavid Howells ctx->inodes = shmem_default_max_inodes(); 4309ea3271f7SChris Down if (!(ctx->seen & SHMEM_SEEN_INUMS)) 4310ea3271f7SChris Down ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 43112c6efe9cSLuis Chamberlain sbinfo->noswap = ctx->noswap; 4312ca4e0519SAl Viro } else { 43131751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 43141da177e4SLinus Torvalds } 431591828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 431636f05cabSJeff Layton sb->s_flags |= SB_NOSEC | SB_I_VERSION; 43170edd73b3SHugh Dickins #else 43181751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 43190edd73b3SHugh Dickins #endif 4320f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 4321e07c469eSHugh Dickins sbinfo->max_inodes = ctx->inodes; 4322e07c469eSHugh Dickins sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE; 4323e809d5f0SChris Down if (sb->s_flags & SB_KERNMOUNT) { 4324e809d5f0SChris Down sbinfo->ino_batch = alloc_percpu(ino_t); 4325e809d5f0SChris Down if (!sbinfo->ino_batch) 4326e809d5f0SChris Down goto failed; 4327e809d5f0SChris Down } 4328f3235626SDavid Howells sbinfo->uid = ctx->uid; 4329f3235626SDavid Howells sbinfo->gid = ctx->gid; 4330ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums; 4331f3235626SDavid Howells sbinfo->mode = ctx->mode; 4332f3235626SDavid Howells sbinfo->huge = ctx->huge; 4333f3235626SDavid Howells sbinfo->mpol = ctx->mpol; 4334f3235626SDavid Howells ctx->mpol = NULL; 43351da177e4SLinus Torvalds 4336bf11b9a8SSebastian Andrzej Siewior raw_spin_lock_init(&sbinfo->stat_lock); 4337908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 4338602586a8SHugh Dickins goto failed; 4339779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 4340779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 43411da177e4SLinus Torvalds 4342285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 434309cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 434409cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 43451da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 43461da177e4SLinus Torvalds sb->s_op = &shmem_ops; 4347cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 4348b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 434939f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 4350b09e0fa4SEric Paris #endif 4351b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 43521751e8a6SLinus Torvalds sb->s_flags |= SB_POSIXACL; 435339f0247dSAndreas Gruenbacher #endif 43542b4db796SAmir Goldstein uuid_gen(&sb->s_uuid); 43550edd73b3SHugh Dickins 4356e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 4357e09764cfSCarlos Maiolino if (ctx->seen & SHMEM_SEEN_QUOTA) { 4358e09764cfSCarlos Maiolino sb->dq_op = &shmem_quota_operations; 4359e09764cfSCarlos Maiolino sb->s_qcop = &dquot_quotactl_sysfile_ops; 4360e09764cfSCarlos Maiolino sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; 4361e09764cfSCarlos Maiolino 4362de4c0e7cSLukas Czerner /* Copy the default limits from ctx into sbinfo */ 4363de4c0e7cSLukas Czerner memcpy(&sbinfo->qlimits, &ctx->qlimits, 4364de4c0e7cSLukas Czerner sizeof(struct shmem_quota_limits)); 4365de4c0e7cSLukas Czerner 4366e09764cfSCarlos Maiolino if (shmem_enable_quotas(sb, ctx->quota_types)) 4367e09764cfSCarlos Maiolino goto failed; 4368e09764cfSCarlos Maiolino } 4369e09764cfSCarlos Maiolino #endif /* CONFIG_TMPFS_QUOTA */ 4370e09764cfSCarlos Maiolino 43717a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0, 43727a80e5b8SGiuseppe Scrivano VM_NORESERVE); 437371480663SCarlos Maiolino if (IS_ERR(inode)) { 437471480663SCarlos Maiolino error = PTR_ERR(inode); 43751da177e4SLinus Torvalds goto failed; 437671480663SCarlos Maiolino } 4377680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 4378680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 4379318ceed0SAl Viro sb->s_root = d_make_root(inode); 4380318ceed0SAl Viro if (!sb->s_root) 438148fde701SAl Viro goto failed; 43821da177e4SLinus Torvalds return 0; 43831da177e4SLinus Torvalds 43841da177e4SLinus Torvalds failed: 43851da177e4SLinus Torvalds shmem_put_super(sb); 438671480663SCarlos Maiolino return error; 43871da177e4SLinus Torvalds } 43881da177e4SLinus Torvalds 4389f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc) 4390f3235626SDavid Howells { 4391f3235626SDavid Howells return get_tree_nodev(fc, shmem_fill_super); 4392f3235626SDavid Howells } 4393f3235626SDavid Howells 4394f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc) 4395f3235626SDavid Howells { 4396f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 4397f3235626SDavid Howells 4398f3235626SDavid Howells if (ctx) { 4399f3235626SDavid Howells mpol_put(ctx->mpol); 4400f3235626SDavid Howells kfree(ctx); 4401f3235626SDavid Howells } 4402f3235626SDavid Howells } 4403f3235626SDavid Howells 4404f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = { 4405f3235626SDavid Howells .free = shmem_free_fc, 4406f3235626SDavid Howells .get_tree = shmem_get_tree, 4407f3235626SDavid Howells #ifdef CONFIG_TMPFS 4408f3235626SDavid Howells .parse_monolithic = shmem_parse_options, 4409f3235626SDavid Howells .parse_param = shmem_parse_one, 4410f3235626SDavid Howells .reconfigure = shmem_reconfigure, 4411f3235626SDavid Howells #endif 4412f3235626SDavid Howells }; 4413f3235626SDavid Howells 4414fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 44151da177e4SLinus Torvalds 44161da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 44171da177e4SLinus Torvalds { 441841ffe5d5SHugh Dickins struct shmem_inode_info *info; 4419fd60b288SMuchun Song info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL); 442041ffe5d5SHugh Dickins if (!info) 44211da177e4SLinus Torvalds return NULL; 442241ffe5d5SHugh Dickins return &info->vfs_inode; 44231da177e4SLinus Torvalds } 44241da177e4SLinus Torvalds 442574b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode) 4426fa0d7e3dSNick Piggin { 442784e710daSAl Viro if (S_ISLNK(inode->i_mode)) 44283ed47db3SAl Viro kfree(inode->i_link); 4429fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 4430fa0d7e3dSNick Piggin } 4431fa0d7e3dSNick Piggin 44321da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 44331da177e4SLinus Torvalds { 443409208d15SAl Viro if (S_ISREG(inode->i_mode)) 44351da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 4436a2e45955SChuck Lever if (S_ISDIR(inode->i_mode)) 4437a2e45955SChuck Lever simple_offset_destroy(shmem_get_offset_ctx(inode)); 44381da177e4SLinus Torvalds } 44391da177e4SLinus Torvalds 444041ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 44411da177e4SLinus Torvalds { 444241ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 444341ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 44441da177e4SLinus Torvalds } 44451da177e4SLinus Torvalds 44469a8ec03eSweiping zhang static void shmem_init_inodecache(void) 44471da177e4SLinus Torvalds { 44481da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 44491da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 44505d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 44511da177e4SLinus Torvalds } 44521da177e4SLinus Torvalds 445341ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 44541da177e4SLinus Torvalds { 44551a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 44561da177e4SLinus Torvalds } 44571da177e4SLinus Torvalds 4458a7605426SYang Shi /* Keep the page in page cache instead of truncating it */ 4459a7605426SYang Shi static int shmem_error_remove_page(struct address_space *mapping, 4460a7605426SYang Shi struct page *page) 4461a7605426SYang Shi { 4462a7605426SYang Shi return 0; 4463a7605426SYang Shi } 4464a7605426SYang Shi 446530e6a51dSHui Su const struct address_space_operations shmem_aops = { 44661da177e4SLinus Torvalds .writepage = shmem_writepage, 446746de8b97SMatthew Wilcox (Oracle) .dirty_folio = noop_dirty_folio, 44681da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 4469800d15a5SNick Piggin .write_begin = shmem_write_begin, 4470800d15a5SNick Piggin .write_end = shmem_write_end, 44711da177e4SLinus Torvalds #endif 44721c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 447354184650SMatthew Wilcox (Oracle) .migrate_folio = migrate_folio, 44741c93923cSAndrew Morton #endif 4475a7605426SYang Shi .error_remove_page = shmem_error_remove_page, 44761da177e4SLinus Torvalds }; 447730e6a51dSHui Su EXPORT_SYMBOL(shmem_aops); 44781da177e4SLinus Torvalds 447915ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 44801da177e4SLinus Torvalds .mmap = shmem_mmap, 4481e88e0d36SHugh Dickins .open = shmem_file_open, 4482c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 44831da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 4484220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 44852ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 4486e88e0d36SHugh Dickins .write_iter = shmem_file_write_iter, 44871b061d92SChristoph Hellwig .fsync = noop_fsync, 4488bd194b18SDavid Howells .splice_read = shmem_file_splice_read, 4489f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 449083e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 44911da177e4SLinus Torvalds #endif 44921da177e4SLinus Torvalds }; 44931da177e4SLinus Torvalds 449492e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 449544a30220SYu Zhao .getattr = shmem_getattr, 449694c1e62dSHugh Dickins .setattr = shmem_setattr, 4497b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 4498b09e0fa4SEric Paris .listxattr = shmem_listxattr, 4499feda821eSChristoph Hellwig .set_acl = simple_set_acl, 4500e408e695STheodore Ts'o .fileattr_get = shmem_fileattr_get, 4501e408e695STheodore Ts'o .fileattr_set = shmem_fileattr_set, 4502b09e0fa4SEric Paris #endif 45031da177e4SLinus Torvalds }; 45041da177e4SLinus Torvalds 450592e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 45061da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 4507f7cd16a5SXavier Roche .getattr = shmem_getattr, 45081da177e4SLinus Torvalds .create = shmem_create, 45091da177e4SLinus Torvalds .lookup = simple_lookup, 45101da177e4SLinus Torvalds .link = shmem_link, 45111da177e4SLinus Torvalds .unlink = shmem_unlink, 45121da177e4SLinus Torvalds .symlink = shmem_symlink, 45131da177e4SLinus Torvalds .mkdir = shmem_mkdir, 45141da177e4SLinus Torvalds .rmdir = shmem_rmdir, 45151da177e4SLinus Torvalds .mknod = shmem_mknod, 45162773bf00SMiklos Szeredi .rename = shmem_rename2, 451760545d0dSAl Viro .tmpfile = shmem_tmpfile, 4518a2e45955SChuck Lever .get_offset_ctx = shmem_get_offset_ctx, 45191da177e4SLinus Torvalds #endif 4520b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 4521b09e0fa4SEric Paris .listxattr = shmem_listxattr, 4522e408e695STheodore Ts'o .fileattr_get = shmem_fileattr_get, 4523e408e695STheodore Ts'o .fileattr_set = shmem_fileattr_set, 4524b09e0fa4SEric Paris #endif 452539f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 452694c1e62dSHugh Dickins .setattr = shmem_setattr, 4527feda821eSChristoph Hellwig .set_acl = simple_set_acl, 452839f0247dSAndreas Gruenbacher #endif 452939f0247dSAndreas Gruenbacher }; 453039f0247dSAndreas Gruenbacher 453192e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 4532f7cd16a5SXavier Roche .getattr = shmem_getattr, 4533b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 4534b09e0fa4SEric Paris .listxattr = shmem_listxattr, 4535b09e0fa4SEric Paris #endif 453639f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 453794c1e62dSHugh Dickins .setattr = shmem_setattr, 4538feda821eSChristoph Hellwig .set_acl = simple_set_acl, 453939f0247dSAndreas Gruenbacher #endif 45401da177e4SLinus Torvalds }; 45411da177e4SLinus Torvalds 4542759b9775SHugh Dickins static const struct super_operations shmem_ops = { 45431da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 454474b1da56SAl Viro .free_inode = shmem_free_in_core_inode, 45451da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 45461da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 45471da177e4SLinus Torvalds .statfs = shmem_statfs, 4548680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 45491da177e4SLinus Torvalds #endif 4550e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 4551e09764cfSCarlos Maiolino .get_dquots = shmem_get_dquots, 4552e09764cfSCarlos Maiolino #endif 45531f895f75SAl Viro .evict_inode = shmem_evict_inode, 45541da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 45551da177e4SLinus Torvalds .put_super = shmem_put_super, 4556396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4557779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 4558779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 4559779750d2SKirill A. Shutemov #endif 45601da177e4SLinus Torvalds }; 45611da177e4SLinus Torvalds 4562f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 456354cb8821SNick Piggin .fault = shmem_fault, 4564d7c17551SNing Qu .map_pages = filemap_map_pages, 45651da177e4SLinus Torvalds #ifdef CONFIG_NUMA 45661da177e4SLinus Torvalds .set_policy = shmem_set_policy, 45671da177e4SLinus Torvalds .get_policy = shmem_get_policy, 45681da177e4SLinus Torvalds #endif 45691da177e4SLinus Torvalds }; 45701da177e4SLinus Torvalds 4571d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops = { 4572d09e8ca6SPasha Tatashin .fault = shmem_fault, 4573d09e8ca6SPasha Tatashin .map_pages = filemap_map_pages, 4574d09e8ca6SPasha Tatashin #ifdef CONFIG_NUMA 4575d09e8ca6SPasha Tatashin .set_policy = shmem_set_policy, 4576d09e8ca6SPasha Tatashin .get_policy = shmem_get_policy, 4577d09e8ca6SPasha Tatashin #endif 4578d09e8ca6SPasha Tatashin }; 4579d09e8ca6SPasha Tatashin 4580f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc) 45811da177e4SLinus Torvalds { 4582f3235626SDavid Howells struct shmem_options *ctx; 4583f3235626SDavid Howells 4584f3235626SDavid Howells ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 4585f3235626SDavid Howells if (!ctx) 4586f3235626SDavid Howells return -ENOMEM; 4587f3235626SDavid Howells 4588f3235626SDavid Howells ctx->mode = 0777 | S_ISVTX; 4589f3235626SDavid Howells ctx->uid = current_fsuid(); 4590f3235626SDavid Howells ctx->gid = current_fsgid(); 4591f3235626SDavid Howells 4592f3235626SDavid Howells fc->fs_private = ctx; 4593f3235626SDavid Howells fc->ops = &shmem_fs_context_ops; 4594f3235626SDavid Howells return 0; 45951da177e4SLinus Torvalds } 45961da177e4SLinus Torvalds 459741ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 45981da177e4SLinus Torvalds .owner = THIS_MODULE, 45991da177e4SLinus Torvalds .name = "tmpfs", 4600f3235626SDavid Howells .init_fs_context = shmem_init_fs_context, 4601f3235626SDavid Howells #ifdef CONFIG_TMPFS 4602d7167b14SAl Viro .parameters = shmem_fs_parameters, 4603f3235626SDavid Howells #endif 46041da177e4SLinus Torvalds .kill_sb = kill_litter_super, 46057a80e5b8SGiuseppe Scrivano #ifdef CONFIG_SHMEM 4606db58b5eeSChristian Brauner .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP, 46077a80e5b8SGiuseppe Scrivano #else 4608ff36da69SMatthew Wilcox (Oracle) .fs_flags = FS_USERNS_MOUNT, 46097a80e5b8SGiuseppe Scrivano #endif 46101da177e4SLinus Torvalds }; 46111da177e4SLinus Torvalds 46129096bbe9SMiaohe Lin void __init shmem_init(void) 46131da177e4SLinus Torvalds { 46141da177e4SLinus Torvalds int error; 46151da177e4SLinus Torvalds 46169a8ec03eSweiping zhang shmem_init_inodecache(); 46171da177e4SLinus Torvalds 4618e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 4619e09764cfSCarlos Maiolino error = register_quota_format(&shmem_quota_format); 4620e09764cfSCarlos Maiolino if (error < 0) { 4621e09764cfSCarlos Maiolino pr_err("Could not register quota format\n"); 4622e09764cfSCarlos Maiolino goto out3; 4623e09764cfSCarlos Maiolino } 4624e09764cfSCarlos Maiolino #endif 4625e09764cfSCarlos Maiolino 462641ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 46271da177e4SLinus Torvalds if (error) { 46281170532bSJoe Perches pr_err("Could not register tmpfs\n"); 46291da177e4SLinus Torvalds goto out2; 46301da177e4SLinus Torvalds } 463195dc112aSGreg Kroah-Hartman 4632ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 46331da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 46341da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 46351170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 46361da177e4SLinus Torvalds goto out1; 46371da177e4SLinus Torvalds } 46385a6e75f8SKirill A. Shutemov 4639396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4640435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 46415a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 46425a6e75f8SKirill A. Shutemov else 46435e6e5a12SHugh Dickins shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ 46445a6e75f8SKirill A. Shutemov #endif 46459096bbe9SMiaohe Lin return; 46461da177e4SLinus Torvalds 46471da177e4SLinus Torvalds out1: 464841ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 46491da177e4SLinus Torvalds out2: 4650e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA 4651e09764cfSCarlos Maiolino unregister_quota_format(&shmem_quota_format); 4652e09764cfSCarlos Maiolino out3: 4653e09764cfSCarlos Maiolino #endif 465441ffe5d5SHugh Dickins shmem_destroy_inodecache(); 46551da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 46561da177e4SLinus Torvalds } 4657853ac43aSMatt Mackall 4658396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 46595a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 46605a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 46615a6e75f8SKirill A. Shutemov { 466226083eb6SColin Ian King static const int values[] = { 46635a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 46645a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 46655a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 46665a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 46675a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 46685a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 46695a6e75f8SKirill A. Shutemov }; 467079d4d38aSJoe Perches int len = 0; 467179d4d38aSJoe Perches int i; 46725a6e75f8SKirill A. Shutemov 467379d4d38aSJoe Perches for (i = 0; i < ARRAY_SIZE(values); i++) { 467479d4d38aSJoe Perches len += sysfs_emit_at(buf, len, 467579d4d38aSJoe Perches shmem_huge == values[i] ? "%s[%s]" : "%s%s", 467679d4d38aSJoe Perches i ? " " : "", 46775a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 46785a6e75f8SKirill A. Shutemov } 467979d4d38aSJoe Perches 468079d4d38aSJoe Perches len += sysfs_emit_at(buf, len, "\n"); 468179d4d38aSJoe Perches 468279d4d38aSJoe Perches return len; 46835a6e75f8SKirill A. Shutemov } 46845a6e75f8SKirill A. Shutemov 46855a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 46865a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 46875a6e75f8SKirill A. Shutemov { 46885a6e75f8SKirill A. Shutemov char tmp[16]; 46895a6e75f8SKirill A. Shutemov int huge; 46905a6e75f8SKirill A. Shutemov 46915a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 46925a6e75f8SKirill A. Shutemov return -EINVAL; 46935a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 46945a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 46955a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 46965a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 46975a6e75f8SKirill A. Shutemov 46985a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 46995a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 47005a6e75f8SKirill A. Shutemov return -EINVAL; 47015a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 47025a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 47035a6e75f8SKirill A. Shutemov return -EINVAL; 47045a6e75f8SKirill A. Shutemov 47055a6e75f8SKirill A. Shutemov shmem_huge = huge; 4706435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY) 47075a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 47085a6e75f8SKirill A. Shutemov return count; 47095a6e75f8SKirill A. Shutemov } 47105a6e75f8SKirill A. Shutemov 47114bfa8adaSMiaohe Lin struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); 4712396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 4713f3f0e1d2SKirill A. Shutemov 4714853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 4715853ac43aSMatt Mackall 4716853ac43aSMatt Mackall /* 4717853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4718853ac43aSMatt Mackall * 4719853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 4720853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 4721853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 4722853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 4723853ac43aSMatt Mackall */ 4724853ac43aSMatt Mackall 472541ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 4726853ac43aSMatt Mackall .name = "tmpfs", 4727f3235626SDavid Howells .init_fs_context = ramfs_init_fs_context, 4728d7167b14SAl Viro .parameters = ramfs_fs_parameters, 472936ce9d76SRoberto Sassu .kill_sb = ramfs_kill_sb, 47302b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 4731853ac43aSMatt Mackall }; 4732853ac43aSMatt Mackall 47339096bbe9SMiaohe Lin void __init shmem_init(void) 4734853ac43aSMatt Mackall { 473541ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4736853ac43aSMatt Mackall 473741ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 4738853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 4739853ac43aSMatt Mackall } 4740853ac43aSMatt Mackall 474110a9c496SChristoph Hellwig int shmem_unuse(unsigned int type) 4742853ac43aSMatt Mackall { 4743853ac43aSMatt Mackall return 0; 4744853ac43aSMatt Mackall } 4745853ac43aSMatt Mackall 4746d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 47473f96b79aSHugh Dickins { 47483f96b79aSHugh Dickins return 0; 47493f96b79aSHugh Dickins } 47503f96b79aSHugh Dickins 475124513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 475224513264SHugh Dickins { 475324513264SHugh Dickins } 475424513264SHugh Dickins 4755c01d5b30SHugh Dickins #ifdef CONFIG_MMU 4756c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 4757c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 4758c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 4759c01d5b30SHugh Dickins { 4760c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4761c01d5b30SHugh Dickins } 4762c01d5b30SHugh Dickins #endif 4763c01d5b30SHugh Dickins 476441ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 476594c1e62dSHugh Dickins { 476641ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 476794c1e62dSHugh Dickins } 476894c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 476994c1e62dSHugh Dickins 4770853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 4771d09e8ca6SPasha Tatashin #define shmem_anon_vm_ops generic_file_vm_ops 47720b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 47730b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 47740b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 4775853ac43aSMatt Mackall 477671480663SCarlos Maiolino static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir, 477771480663SCarlos Maiolino umode_t mode, dev_t dev, unsigned long flags) 477871480663SCarlos Maiolino { 477971480663SCarlos Maiolino struct inode *inode = ramfs_get_inode(sb, dir, mode, dev); 478071480663SCarlos Maiolino return inode ? inode : ERR_PTR(-ENOSPC); 478171480663SCarlos Maiolino } 478271480663SCarlos Maiolino 4783853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 4784853ac43aSMatt Mackall 4785853ac43aSMatt Mackall /* common code */ 47861da177e4SLinus Torvalds 4787703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 4788c7277090SEric Paris unsigned long flags, unsigned int i_flags) 47891da177e4SLinus Torvalds { 47901da177e4SLinus Torvalds struct inode *inode; 479193dec2daSAl Viro struct file *res; 47921da177e4SLinus Torvalds 4793703321b6SMatthew Auld if (IS_ERR(mnt)) 4794703321b6SMatthew Auld return ERR_CAST(mnt); 47951da177e4SLinus Torvalds 4796285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 47971da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 47981da177e4SLinus Torvalds 47991da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 48001da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 48011da177e4SLinus Torvalds 48027a80e5b8SGiuseppe Scrivano if (is_idmapped_mnt(mnt)) 48037a80e5b8SGiuseppe Scrivano return ERR_PTR(-EINVAL); 48047a80e5b8SGiuseppe Scrivano 48057a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, 48067a80e5b8SGiuseppe Scrivano S_IFREG | S_IRWXUGO, 0, flags); 480771480663SCarlos Maiolino 480871480663SCarlos Maiolino if (IS_ERR(inode)) { 4809dac2d1f6SAl Viro shmem_unacct_size(flags, size); 481071480663SCarlos Maiolino return ERR_CAST(inode); 4811dac2d1f6SAl Viro } 4812c7277090SEric Paris inode->i_flags |= i_flags; 48131da177e4SLinus Torvalds inode->i_size = size; 48146d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 481526567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 481693dec2daSAl Viro if (!IS_ERR(res)) 481793dec2daSAl Viro res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 48184b42af81SAl Viro &shmem_file_operations); 48196b4d0b27SAl Viro if (IS_ERR(res)) 482093dec2daSAl Viro iput(inode); 48216b4d0b27SAl Viro return res; 48221da177e4SLinus Torvalds } 4823c7277090SEric Paris 4824c7277090SEric Paris /** 4825c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4826c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4827c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4828e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4829e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4830c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4831c7277090SEric Paris * @size: size to be set for the file 4832c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4833c7277090SEric Paris */ 4834c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4835c7277090SEric Paris { 4836703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 4837c7277090SEric Paris } 4838c7277090SEric Paris 4839c7277090SEric Paris /** 4840c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4841c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4842c7277090SEric Paris * @size: size to be set for the file 4843c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4844c7277090SEric Paris */ 4845c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4846c7277090SEric Paris { 4847703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, 0); 4848c7277090SEric Paris } 4849395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 48501da177e4SLinus Torvalds 485146711810SRandy Dunlap /** 4852703321b6SMatthew Auld * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 4853703321b6SMatthew Auld * @mnt: the tmpfs mount where the file will be created 4854703321b6SMatthew Auld * @name: name for dentry (to be seen in /proc/<pid>/maps 4855703321b6SMatthew Auld * @size: size to be set for the file 4856703321b6SMatthew Auld * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4857703321b6SMatthew Auld */ 4858703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 4859703321b6SMatthew Auld loff_t size, unsigned long flags) 4860703321b6SMatthew Auld { 4861703321b6SMatthew Auld return __shmem_file_setup(mnt, name, size, flags, 0); 4862703321b6SMatthew Auld } 4863703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4864703321b6SMatthew Auld 4865703321b6SMatthew Auld /** 48661da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 486745e55300SPeter Collingbourne * @vma: the vma to be mmapped is prepared by do_mmap 48681da177e4SLinus Torvalds */ 48691da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 48701da177e4SLinus Torvalds { 48711da177e4SLinus Torvalds struct file *file; 48721da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 48731da177e4SLinus Torvalds 487466fc1303SHugh Dickins /* 4875c1e8d7c6SMichel Lespinasse * Cloning a new file under mmap_lock leads to a lock ordering conflict 487666fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 487766fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 487866fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 487966fc1303SHugh Dickins */ 4880703321b6SMatthew Auld file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 48811da177e4SLinus Torvalds if (IS_ERR(file)) 48821da177e4SLinus Torvalds return PTR_ERR(file); 48831da177e4SLinus Torvalds 48841da177e4SLinus Torvalds if (vma->vm_file) 48851da177e4SLinus Torvalds fput(vma->vm_file); 48861da177e4SLinus Torvalds vma->vm_file = file; 4887d09e8ca6SPasha Tatashin vma->vm_ops = &shmem_anon_vm_ops; 4888f3f0e1d2SKirill A. Shutemov 48891da177e4SLinus Torvalds return 0; 48901da177e4SLinus Torvalds } 4891d9d90e5eSHugh Dickins 4892d9d90e5eSHugh Dickins /** 4893f01b2b3eSMatthew Wilcox (Oracle) * shmem_read_folio_gfp - read into page cache, using specified page allocation flags. 4894f01b2b3eSMatthew Wilcox (Oracle) * @mapping: the folio's address_space 4895f01b2b3eSMatthew Wilcox (Oracle) * @index: the folio index 4896d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4897d9d90e5eSHugh Dickins * 4898d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4899d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 49007e0a1265SMatthew Wilcox (Oracle) * But read_cache_page_gfp() uses the ->read_folio() method: which does not 4901d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4902d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4903d9d90e5eSHugh Dickins * 490468da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 490568da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4906d9d90e5eSHugh Dickins */ 4907f01b2b3eSMatthew Wilcox (Oracle) struct folio *shmem_read_folio_gfp(struct address_space *mapping, 4908d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4909d9d90e5eSHugh Dickins { 491068da9f05SHugh Dickins #ifdef CONFIG_SHMEM 491168da9f05SHugh Dickins struct inode *inode = mapping->host; 4912a3a9c397SMatthew Wilcox (Oracle) struct folio *folio; 491368da9f05SHugh Dickins int error; 491468da9f05SHugh Dickins 491530e6a51dSHui Su BUG_ON(!shmem_mapping(mapping)); 4916a3a9c397SMatthew Wilcox (Oracle) error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, 4917cfda0526SMike Rapoport gfp, NULL, NULL, NULL); 491868da9f05SHugh Dickins if (error) 4919a7605426SYang Shi return ERR_PTR(error); 4920a7605426SYang Shi 4921a3a9c397SMatthew Wilcox (Oracle) folio_unlock(folio); 4922f01b2b3eSMatthew Wilcox (Oracle) return folio; 4923f01b2b3eSMatthew Wilcox (Oracle) #else 4924f01b2b3eSMatthew Wilcox (Oracle) /* 4925f01b2b3eSMatthew Wilcox (Oracle) * The tiny !SHMEM case uses ramfs without swap 4926f01b2b3eSMatthew Wilcox (Oracle) */ 4927f01b2b3eSMatthew Wilcox (Oracle) return mapping_read_folio_gfp(mapping, index, gfp); 4928f01b2b3eSMatthew Wilcox (Oracle) #endif 4929f01b2b3eSMatthew Wilcox (Oracle) } 4930f01b2b3eSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); 4931f01b2b3eSMatthew Wilcox (Oracle) 4932f01b2b3eSMatthew Wilcox (Oracle) struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4933f01b2b3eSMatthew Wilcox (Oracle) pgoff_t index, gfp_t gfp) 4934f01b2b3eSMatthew Wilcox (Oracle) { 4935f01b2b3eSMatthew Wilcox (Oracle) struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp); 4936f01b2b3eSMatthew Wilcox (Oracle) struct page *page; 4937f01b2b3eSMatthew Wilcox (Oracle) 4938f01b2b3eSMatthew Wilcox (Oracle) if (IS_ERR(folio)) 4939f01b2b3eSMatthew Wilcox (Oracle) return &folio->page; 4940f01b2b3eSMatthew Wilcox (Oracle) 4941a3a9c397SMatthew Wilcox (Oracle) page = folio_file_page(folio, index); 4942a7605426SYang Shi if (PageHWPoison(page)) { 4943a3a9c397SMatthew Wilcox (Oracle) folio_put(folio); 4944a7605426SYang Shi return ERR_PTR(-EIO); 4945a7605426SYang Shi } 4946a7605426SYang Shi 494768da9f05SHugh Dickins return page; 4948d9d90e5eSHugh Dickins } 4949d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4950