11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds *
41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds * 2000 Transmeta Corp.
61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds * 2000-2001 SAP AG
81da177e4SLinus Torvalds * 2002 Red Hat Inc.
96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds *
141da177e4SLinus Torvalds * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds *
18853ac43aSMatt Mackall * tiny-shmem:
19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall *
211da177e4SLinus Torvalds * This file is released under the GPL.
221da177e4SLinus Torvalds */
231da177e4SLinus Torvalds
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31e408e695STheodore Ts'o #include <linux/fileattr.h>
32853ac43aSMatt Mackall #include <linux/mm.h>
3346c9a946SArnd Bergmann #include <linux/random.h>
34174cd4b1SIngo Molnar #include <linux/sched/signal.h>
35b95f1b31SPaul Gortmaker #include <linux/export.h>
365ff2121aSMatthew Wilcox (Oracle) #include <linux/shmem_fs.h>
37853ac43aSMatt Mackall #include <linux/swap.h>
38e2e40f2cSChristoph Hellwig #include <linux/uio.h>
39749df87bSMike Kravetz #include <linux/hugetlb.h>
40626c3920SAl Viro #include <linux/fs_parser.h>
4186a2f3f2SMiaohe Lin #include <linux/swapfile.h>
4236f05cabSJeff Layton #include <linux/iversion.h>
43014bb1deSNeilBrown #include "swap.h"
4495cc09d6SAndrea Arcangeli
45853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
46853ac43aSMatt Mackall
47853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
481da177e4SLinus Torvalds /*
491da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It
501da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits
511da177e4SLinus Torvalds * which makes it a completely usable filesystem.
521da177e4SLinus Torvalds */
531da177e4SLinus Torvalds
5439f0247dSAndreas Gruenbacher #include <linux/xattr.h>
55a5694255SChristoph Hellwig #include <linux/exportfs.h>
561c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
57feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
581da177e4SLinus Torvalds #include <linux/mman.h>
591da177e4SLinus Torvalds #include <linux/string.h>
601da177e4SLinus Torvalds #include <linux/slab.h>
611da177e4SLinus Torvalds #include <linux/backing-dev.h>
621da177e4SLinus Torvalds #include <linux/writeback.h>
63bda97eabSHugh Dickins #include <linux/pagevec.h>
6441ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6583e4fa9cSHugh Dickins #include <linux/falloc.h>
66708e3508SHugh Dickins #include <linux/splice.h>
671da177e4SLinus Torvalds #include <linux/security.h>
681da177e4SLinus Torvalds #include <linux/swapops.h>
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/namei.h>
71b00dc3adSHugh Dickins #include <linux/ctype.h>
72304dbdb7SLee Schermerhorn #include <linux/migrate.h>
73c1f60a5aSChristoph Lameter #include <linux/highmem.h>
74680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7592562927SMimi Zohar #include <linux/magic.h>
769183df25SDavid Herrmann #include <linux/syscalls.h>
7740e041a2SDavid Herrmann #include <linux/fcntl.h>
789183df25SDavid Herrmann #include <uapi/linux/memfd.h>
794c27fe4cSMike Rapoport #include <linux/rmap.h>
802b4db796SAmir Goldstein #include <linux/uuid.h>
81e09764cfSCarlos Maiolino #include <linux/quotaops.h>
82304dbdb7SLee Schermerhorn
837c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
841da177e4SLinus Torvalds
85dd56b046SMel Gorman #include "internal.h"
86dd56b046SMel Gorman
8709cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
8809cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
891da177e4SLinus Torvalds
901da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
911da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
921da177e4SLinus Torvalds
93e07c469eSHugh Dickins /* Pretend that one inode + its dentry occupy this much memory */
94e07c469eSHugh Dickins #define BOGO_INODE_SIZE 1024
95e07c469eSHugh Dickins
9669f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9769f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9869f07ec9SHugh Dickins
991aac1400SHugh Dickins /*
100f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via
1019608703eSJan Kara * inode->i_private (with i_rwsem making sure that it has only one user at
102f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that.
1031aac1400SHugh Dickins */
1041aac1400SHugh Dickins struct shmem_falloc {
1058e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1061aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */
1071aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */
1081aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */
1091aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */
1101aac1400SHugh Dickins };
1111aac1400SHugh Dickins
1120b5071ddSAl Viro struct shmem_options {
1130b5071ddSAl Viro unsigned long long blocks;
1140b5071ddSAl Viro unsigned long long inodes;
1150b5071ddSAl Viro struct mempolicy *mpol;
1160b5071ddSAl Viro kuid_t uid;
1170b5071ddSAl Viro kgid_t gid;
1180b5071ddSAl Viro umode_t mode;
119ea3271f7SChris Down bool full_inums;
1200b5071ddSAl Viro int huge;
1210b5071ddSAl Viro int seen;
1222c6efe9cSLuis Chamberlain bool noswap;
123e09764cfSCarlos Maiolino unsigned short quota_types;
124de4c0e7cSLukas Czerner struct shmem_quota_limits qlimits;
1250b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1
1260b5071ddSAl Viro #define SHMEM_SEEN_INODES 2
1270b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4
128ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8
1292c6efe9cSLuis Chamberlain #define SHMEM_SEEN_NOSWAP 16
130e09764cfSCarlos Maiolino #define SHMEM_SEEN_QUOTA 32
1310b5071ddSAl Viro };
1320b5071ddSAl Viro
133b76db735SAndrew Morton #ifdef CONFIG_TMPFS
shmem_default_max_blocks(void)134680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
135680d794bSakpm@linux-foundation.org {
136ca79b0c2SArun KS return totalram_pages() / 2;
137680d794bSakpm@linux-foundation.org }
138680d794bSakpm@linux-foundation.org
shmem_default_max_inodes(void)139680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
140680d794bSakpm@linux-foundation.org {
141ca79b0c2SArun KS unsigned long nr_pages = totalram_pages();
142ca79b0c2SArun KS
143e07c469eSHugh Dickins return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
144e07c469eSHugh Dickins ULONG_MAX / BOGO_INODE_SIZE);
145680d794bSakpm@linux-foundation.org }
146b76db735SAndrew Morton #endif
147680d794bSakpm@linux-foundation.org
148da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
149da08e9b7SMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp,
150c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma,
151c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type);
1521da177e4SLinus Torvalds
SHMEM_SB(struct super_block * sb)1531da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1541da177e4SLinus Torvalds {
1551da177e4SLinus Torvalds return sb->s_fs_info;
1561da177e4SLinus Torvalds }
1571da177e4SLinus Torvalds
1581da177e4SLinus Torvalds /*
1591da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1601da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings
1611da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1621da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ...
1631da177e4SLinus Torvalds */
shmem_acct_size(unsigned long flags,loff_t size)1641da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1651da177e4SLinus Torvalds {
1660b0a0806SHugh Dickins return (flags & VM_NORESERVE) ?
167191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds
shmem_unacct_size(unsigned long flags,loff_t size)1701da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1711da177e4SLinus Torvalds {
1720b0a0806SHugh Dickins if (!(flags & VM_NORESERVE))
1731da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size));
1741da177e4SLinus Torvalds }
1751da177e4SLinus Torvalds
shmem_reacct_size(unsigned long flags,loff_t oldsize,loff_t newsize)17677142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
17777142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize)
17877142517SKonstantin Khlebnikov {
17977142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) {
18077142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize))
18177142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm,
18277142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize));
18377142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
18477142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
18577142517SKonstantin Khlebnikov }
18677142517SKonstantin Khlebnikov return 0;
18777142517SKonstantin Khlebnikov }
18877142517SKonstantin Khlebnikov
1891da177e4SLinus Torvalds /*
1901da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as
19175edd345SHugh Dickins * pages are allocated, in order to allow large sparse files.
192923e2f0eSMatthew Wilcox (Oracle) * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1931da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1941da177e4SLinus Torvalds */
shmem_acct_block(unsigned long flags,long pages)195800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
1961da177e4SLinus Torvalds {
197800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE))
198800d8c63SKirill A. Shutemov return 0;
199800d8c63SKirill A. Shutemov
200800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm,
201800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE));
2021da177e4SLinus Torvalds }
2031da177e4SLinus Torvalds
shmem_unacct_blocks(unsigned long flags,long pages)2041da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
2051da177e4SLinus Torvalds {
2060b0a0806SHugh Dickins if (flags & VM_NORESERVE)
20709cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
2081da177e4SLinus Torvalds }
2091da177e4SLinus Torvalds
shmem_inode_acct_block(struct inode * inode,long pages)2103c1b7528SHugh Dickins static int shmem_inode_acct_block(struct inode *inode, long pages)
2110f079694SMike Rapoport {
2120f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode);
2130f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
214c7e263abSLukas Czerner int err = -ENOSPC;
2150f079694SMike Rapoport
2160f079694SMike Rapoport if (shmem_acct_block(info->flags, pages))
217c7e263abSLukas Czerner return err;
2180f079694SMike Rapoport
2193c1b7528SHugh Dickins might_sleep(); /* when quotas */
2200f079694SMike Rapoport if (sbinfo->max_blocks) {
2210f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks,
2220f079694SMike Rapoport sbinfo->max_blocks - pages) > 0)
2230f079694SMike Rapoport goto unacct;
224e09764cfSCarlos Maiolino
225e09764cfSCarlos Maiolino err = dquot_alloc_block_nodirty(inode, pages);
226e09764cfSCarlos Maiolino if (err)
227e09764cfSCarlos Maiolino goto unacct;
228e09764cfSCarlos Maiolino
2290f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages);
230e09764cfSCarlos Maiolino } else {
231e09764cfSCarlos Maiolino err = dquot_alloc_block_nodirty(inode, pages);
232e09764cfSCarlos Maiolino if (err)
233e09764cfSCarlos Maiolino goto unacct;
2340f079694SMike Rapoport }
2350f079694SMike Rapoport
236c7e263abSLukas Czerner return 0;
2370f079694SMike Rapoport
2380f079694SMike Rapoport unacct:
2390f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages);
240c7e263abSLukas Czerner return err;
2410f079694SMike Rapoport }
2420f079694SMike Rapoport
shmem_inode_unacct_blocks(struct inode * inode,long pages)2433c1b7528SHugh Dickins static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2440f079694SMike Rapoport {
2450f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode);
2460f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2470f079694SMike Rapoport
2483c1b7528SHugh Dickins might_sleep(); /* when quotas */
249e09764cfSCarlos Maiolino dquot_free_block_nodirty(inode, pages);
250e09764cfSCarlos Maiolino
2510f079694SMike Rapoport if (sbinfo->max_blocks)
2520f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages);
2530f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages);
2540f079694SMike Rapoport }
2550f079694SMike Rapoport
256759b9775SHugh Dickins static const struct super_operations shmem_ops;
25730e6a51dSHui Su const struct address_space_operations shmem_aops;
25815ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
25992e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
26092e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
26192e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
262f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
263d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops;
264779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2651da177e4SLinus Torvalds
vma_is_anon_shmem(struct vm_area_struct * vma)266d09e8ca6SPasha Tatashin bool vma_is_anon_shmem(struct vm_area_struct *vma)
267d09e8ca6SPasha Tatashin {
268d09e8ca6SPasha Tatashin return vma->vm_ops == &shmem_anon_vm_ops;
269d09e8ca6SPasha Tatashin }
270d09e8ca6SPasha Tatashin
vma_is_shmem(struct vm_area_struct * vma)271b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
272b0506e48SMike Rapoport {
273d09e8ca6SPasha Tatashin return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
274b0506e48SMike Rapoport }
275b0506e48SMike Rapoport
2761da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
277cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2781da177e4SLinus Torvalds
279e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
280e09764cfSCarlos Maiolino
shmem_enable_quotas(struct super_block * sb,unsigned short quota_types)281e09764cfSCarlos Maiolino static int shmem_enable_quotas(struct super_block *sb,
282e09764cfSCarlos Maiolino unsigned short quota_types)
283e09764cfSCarlos Maiolino {
284e09764cfSCarlos Maiolino int type, err = 0;
285e09764cfSCarlos Maiolino
286e09764cfSCarlos Maiolino sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
287e09764cfSCarlos Maiolino for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
288e09764cfSCarlos Maiolino if (!(quota_types & (1 << type)))
289e09764cfSCarlos Maiolino continue;
290e09764cfSCarlos Maiolino err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
291e09764cfSCarlos Maiolino DQUOT_USAGE_ENABLED |
292e09764cfSCarlos Maiolino DQUOT_LIMITS_ENABLED);
293e09764cfSCarlos Maiolino if (err)
294e09764cfSCarlos Maiolino goto out_err;
295e09764cfSCarlos Maiolino }
296e09764cfSCarlos Maiolino return 0;
297e09764cfSCarlos Maiolino
298e09764cfSCarlos Maiolino out_err:
299e09764cfSCarlos Maiolino pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
300e09764cfSCarlos Maiolino type, err);
301e09764cfSCarlos Maiolino for (type--; type >= 0; type--)
302e09764cfSCarlos Maiolino dquot_quota_off(sb, type);
303e09764cfSCarlos Maiolino return err;
304e09764cfSCarlos Maiolino }
305e09764cfSCarlos Maiolino
shmem_disable_quotas(struct super_block * sb)306e09764cfSCarlos Maiolino static void shmem_disable_quotas(struct super_block *sb)
307e09764cfSCarlos Maiolino {
308e09764cfSCarlos Maiolino int type;
309e09764cfSCarlos Maiolino
310e09764cfSCarlos Maiolino for (type = 0; type < SHMEM_MAXQUOTAS; type++)
311e09764cfSCarlos Maiolino dquot_quota_off(sb, type);
312e09764cfSCarlos Maiolino }
313e09764cfSCarlos Maiolino
shmem_get_dquots(struct inode * inode)31442954c37SJan Kara static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
315e09764cfSCarlos Maiolino {
316e09764cfSCarlos Maiolino return SHMEM_I(inode)->i_dquot;
317e09764cfSCarlos Maiolino }
318e09764cfSCarlos Maiolino #endif /* CONFIG_TMPFS_QUOTA */
319e09764cfSCarlos Maiolino
320e809d5f0SChris Down /*
321e809d5f0SChris Down * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
322e809d5f0SChris Down * produces a novel ino for the newly allocated inode.
323e809d5f0SChris Down *
324e809d5f0SChris Down * It may also be called when making a hard link to permit the space needed by
325e809d5f0SChris Down * each dentry. However, in that case, no new inode number is needed since that
326e809d5f0SChris Down * internally draws from another pool of inode numbers (currently global
327e809d5f0SChris Down * get_next_ino()). This case is indicated by passing NULL as inop.
328e809d5f0SChris Down */
329e809d5f0SChris Down #define SHMEM_INO_BATCH 1024
shmem_reserve_inode(struct super_block * sb,ino_t * inop)330e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
3315b04c689SPavel Emelyanov {
3325b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
333e809d5f0SChris Down ino_t ino;
334e809d5f0SChris Down
335e809d5f0SChris Down if (!(sb->s_flags & SB_KERNMOUNT)) {
336bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock);
337bb3e96d6SByron Stanoszek if (sbinfo->max_inodes) {
338e07c469eSHugh Dickins if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
339bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock);
3405b04c689SPavel Emelyanov return -ENOSPC;
3415b04c689SPavel Emelyanov }
342e07c469eSHugh Dickins sbinfo->free_ispace -= BOGO_INODE_SIZE;
343bb3e96d6SByron Stanoszek }
344e809d5f0SChris Down if (inop) {
345e809d5f0SChris Down ino = sbinfo->next_ino++;
346e809d5f0SChris Down if (unlikely(is_zero_ino(ino)))
347e809d5f0SChris Down ino = sbinfo->next_ino++;
348ea3271f7SChris Down if (unlikely(!sbinfo->full_inums &&
349ea3271f7SChris Down ino > UINT_MAX)) {
350e809d5f0SChris Down /*
351e809d5f0SChris Down * Emulate get_next_ino uint wraparound for
352e809d5f0SChris Down * compatibility
353e809d5f0SChris Down */
354ea3271f7SChris Down if (IS_ENABLED(CONFIG_64BIT))
355ea3271f7SChris Down pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
356ea3271f7SChris Down __func__, MINOR(sb->s_dev));
357ea3271f7SChris Down sbinfo->next_ino = 1;
358ea3271f7SChris Down ino = sbinfo->next_ino++;
3595b04c689SPavel Emelyanov }
360e809d5f0SChris Down *inop = ino;
361e809d5f0SChris Down }
362bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock);
363e809d5f0SChris Down } else if (inop) {
364e809d5f0SChris Down /*
365e809d5f0SChris Down * __shmem_file_setup, one of our callers, is lock-free: it
366e809d5f0SChris Down * doesn't hold stat_lock in shmem_reserve_inode since
367e809d5f0SChris Down * max_inodes is always 0, and is called from potentially
368e809d5f0SChris Down * unknown contexts. As such, use a per-cpu batched allocator
369e809d5f0SChris Down * which doesn't require the per-sb stat_lock unless we are at
370e809d5f0SChris Down * the batch boundary.
371ea3271f7SChris Down *
372ea3271f7SChris Down * We don't need to worry about inode{32,64} since SB_KERNMOUNT
373ea3271f7SChris Down * shmem mounts are not exposed to userspace, so we don't need
374ea3271f7SChris Down * to worry about things like glibc compatibility.
375e809d5f0SChris Down */
376e809d5f0SChris Down ino_t *next_ino;
377bf11b9a8SSebastian Andrzej Siewior
378e809d5f0SChris Down next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
379e809d5f0SChris Down ino = *next_ino;
380e809d5f0SChris Down if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
381bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock);
382e809d5f0SChris Down ino = sbinfo->next_ino;
383e809d5f0SChris Down sbinfo->next_ino += SHMEM_INO_BATCH;
384bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock);
385e809d5f0SChris Down if (unlikely(is_zero_ino(ino)))
386e809d5f0SChris Down ino++;
387e809d5f0SChris Down }
388e809d5f0SChris Down *inop = ino;
389e809d5f0SChris Down *next_ino = ++ino;
390e809d5f0SChris Down put_cpu();
391e809d5f0SChris Down }
392e809d5f0SChris Down
3935b04c689SPavel Emelyanov return 0;
3945b04c689SPavel Emelyanov }
3955b04c689SPavel Emelyanov
shmem_free_inode(struct super_block * sb,size_t freed_ispace)3962daf18a7SHugh Dickins static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
3975b04c689SPavel Emelyanov {
3985b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3995b04c689SPavel Emelyanov if (sbinfo->max_inodes) {
400bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock);
4012daf18a7SHugh Dickins sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
402bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock);
4035b04c689SPavel Emelyanov }
4045b04c689SPavel Emelyanov }
4055b04c689SPavel Emelyanov
40646711810SRandy Dunlap /**
40741ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode
4081da177e4SLinus Torvalds * @inode: inode to recalc
4093c1b7528SHugh Dickins * @alloced: the change in number of pages allocated to inode
4103c1b7528SHugh Dickins * @swapped: the change in number of pages swapped from inode
4111da177e4SLinus Torvalds *
4121da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop
4131da177e4SLinus Torvalds * undirtied hole pages behind our back.
4141da177e4SLinus Torvalds *
4151da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
4161da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
4171da177e4SLinus Torvalds */
shmem_recalc_inode(struct inode * inode,long alloced,long swapped)4183c1b7528SHugh Dickins static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
4191da177e4SLinus Torvalds {
4201da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode);
4211da177e4SLinus Torvalds long freed;
4221da177e4SLinus Torvalds
4233c1b7528SHugh Dickins spin_lock(&info->lock);
4243c1b7528SHugh Dickins info->alloced += alloced;
4253c1b7528SHugh Dickins info->swapped += swapped;
4263c1b7528SHugh Dickins freed = info->alloced - info->swapped -
4273c1b7528SHugh Dickins READ_ONCE(inode->i_mapping->nrpages);
4283c1b7528SHugh Dickins /*
4293c1b7528SHugh Dickins * Special case: whereas normally shmem_recalc_inode() is called
4303c1b7528SHugh Dickins * after i_mapping->nrpages has already been adjusted (up or down),
4313c1b7528SHugh Dickins * shmem_writepage() has to raise swapped before nrpages is lowered -
4323c1b7528SHugh Dickins * to stop a racing shmem_recalc_inode() from thinking that a page has
4333c1b7528SHugh Dickins * been freed. Compensate here, to avoid the need for a followup call.
4343c1b7528SHugh Dickins */
4353c1b7528SHugh Dickins if (swapped > 0)
4363c1b7528SHugh Dickins freed += swapped;
4373c1b7528SHugh Dickins if (freed > 0)
4381da177e4SLinus Torvalds info->alloced -= freed;
4393c1b7528SHugh Dickins spin_unlock(&info->lock);
4403c1b7528SHugh Dickins
4413c1b7528SHugh Dickins /* The quota case may block */
4423c1b7528SHugh Dickins if (freed > 0)
4430f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed);
4441da177e4SLinus Torvalds }
4451da177e4SLinus Torvalds
shmem_charge(struct inode * inode,long pages)446800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
447800d8c63SKirill A. Shutemov {
448509f0069SHugh Dickins struct address_space *mapping = inode->i_mapping;
449800d8c63SKirill A. Shutemov
450c7e263abSLukas Czerner if (shmem_inode_acct_block(inode, pages))
451800d8c63SKirill A. Shutemov return false;
452b1cc94abSMike Rapoport
453aaa52e34SHugh Dickins /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
454509f0069SHugh Dickins xa_lock_irq(&mapping->i_pages);
455509f0069SHugh Dickins mapping->nrpages += pages;
456509f0069SHugh Dickins xa_unlock_irq(&mapping->i_pages);
457aaa52e34SHugh Dickins
4583c1b7528SHugh Dickins shmem_recalc_inode(inode, pages, 0);
459800d8c63SKirill A. Shutemov return true;
460800d8c63SKirill A. Shutemov }
461800d8c63SKirill A. Shutemov
shmem_uncharge(struct inode * inode,long pages)462800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
463800d8c63SKirill A. Shutemov {
4643c1b7528SHugh Dickins /* pages argument is currently unused: keep it to help debugging */
4656ffcd825SMatthew Wilcox (Oracle) /* nrpages adjustment done by __filemap_remove_folio() or caller */
466aaa52e34SHugh Dickins
4673c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 0);
468800d8c63SKirill A. Shutemov }
469800d8c63SKirill A. Shutemov
4707a5d0fbbSHugh Dickins /*
47162f945b6SMatthew Wilcox * Replace item expected in xarray by a new item, while holding xa_lock.
4727a5d0fbbSHugh Dickins */
shmem_replace_entry(struct address_space * mapping,pgoff_t index,void * expected,void * replacement)47362f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
4747a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement)
4757a5d0fbbSHugh Dickins {
47662f945b6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index);
4776dbaf22cSJohannes Weiner void *item;
4787a5d0fbbSHugh Dickins
4797a5d0fbbSHugh Dickins VM_BUG_ON(!expected);
4806dbaf22cSJohannes Weiner VM_BUG_ON(!replacement);
48162f945b6SMatthew Wilcox item = xas_load(&xas);
4827a5d0fbbSHugh Dickins if (item != expected)
4837a5d0fbbSHugh Dickins return -ENOENT;
48462f945b6SMatthew Wilcox xas_store(&xas, replacement);
4857a5d0fbbSHugh Dickins return 0;
4867a5d0fbbSHugh Dickins }
4877a5d0fbbSHugh Dickins
4887a5d0fbbSHugh Dickins /*
489d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check
490d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread.
491d1899228SHugh Dickins *
492d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it
493d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before.
494d1899228SHugh Dickins */
shmem_confirm_swap(struct address_space * mapping,pgoff_t index,swp_entry_t swap)495d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
496d1899228SHugh Dickins pgoff_t index, swp_entry_t swap)
497d1899228SHugh Dickins {
498a12831bfSMatthew Wilcox return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
499d1899228SHugh Dickins }
500d1899228SHugh Dickins
501d1899228SHugh Dickins /*
5025a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
5035a6e75f8SKirill A. Shutemov *
5045a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER:
5055a6e75f8SKirill A. Shutemov * disables huge pages for the mount;
5065a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS:
5075a6e75f8SKirill A. Shutemov * enables huge pages for the mount;
5085a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE:
5095a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size,
5105a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints;
5115a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE:
5125a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise();
5135a6e75f8SKirill A. Shutemov */
5145a6e75f8SKirill A. Shutemov
5155a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0
5165a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1
5175a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2
5185a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3
5195a6e75f8SKirill A. Shutemov
5205a6e75f8SKirill A. Shutemov /*
5215a6e75f8SKirill A. Shutemov * Special values.
5225a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
5235a6e75f8SKirill A. Shutemov *
5245a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY:
5255a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use;
5265a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE:
5275a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
5285a6e75f8SKirill A. Shutemov *
5295a6e75f8SKirill A. Shutemov */
5305a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1)
5315a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2)
5325a6e75f8SKirill A. Shutemov
533396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5345a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
5355a6e75f8SKirill A. Shutemov
5365e6e5a12SHugh Dickins static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
5375a6e75f8SKirill A. Shutemov
__shmem_is_huge(struct inode * inode,pgoff_t index,bool shmem_huge_force,struct mm_struct * mm,unsigned long vm_flags)538*93893eacSGavin Shan static bool __shmem_is_huge(struct inode *inode, pgoff_t index,
539*93893eacSGavin Shan bool shmem_huge_force, struct mm_struct *mm,
540*93893eacSGavin Shan unsigned long vm_flags)
541c852023eSHugh Dickins {
542c852023eSHugh Dickins loff_t i_size;
543c852023eSHugh Dickins
544f7cd16a5SXavier Roche if (!S_ISREG(inode->i_mode))
545f7cd16a5SXavier Roche return false;
5462cf13384SDavid Stevens if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
547c852023eSHugh Dickins return false;
5487c6c6cc4SZach O'Keefe if (shmem_huge == SHMEM_HUGE_DENY)
5497c6c6cc4SZach O'Keefe return false;
5503de0c269SZach O'Keefe if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
5513de0c269SZach O'Keefe return true;
5525e6e5a12SHugh Dickins
5535e6e5a12SHugh Dickins switch (SHMEM_SB(inode->i_sb)->huge) {
554c852023eSHugh Dickins case SHMEM_HUGE_ALWAYS:
555c852023eSHugh Dickins return true;
556c852023eSHugh Dickins case SHMEM_HUGE_WITHIN_SIZE:
557de6ee659SLiu Yuntao index = round_up(index + 1, HPAGE_PMD_NR);
558c852023eSHugh Dickins i_size = round_up(i_size_read(inode), PAGE_SIZE);
559de6ee659SLiu Yuntao if (i_size >> PAGE_SHIFT >= index)
560c852023eSHugh Dickins return true;
561c852023eSHugh Dickins fallthrough;
562c852023eSHugh Dickins case SHMEM_HUGE_ADVISE:
5632cf13384SDavid Stevens if (mm && (vm_flags & VM_HUGEPAGE))
5645e6e5a12SHugh Dickins return true;
5655e6e5a12SHugh Dickins fallthrough;
566c852023eSHugh Dickins default:
567c852023eSHugh Dickins return false;
568c852023eSHugh Dickins }
569c852023eSHugh Dickins }
5705a6e75f8SKirill A. Shutemov
shmem_is_huge(struct inode * inode,pgoff_t index,bool shmem_huge_force,struct mm_struct * mm,unsigned long vm_flags)571*93893eacSGavin Shan bool shmem_is_huge(struct inode *inode, pgoff_t index,
572*93893eacSGavin Shan bool shmem_huge_force, struct mm_struct *mm,
573*93893eacSGavin Shan unsigned long vm_flags)
574*93893eacSGavin Shan {
575*93893eacSGavin Shan if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
576*93893eacSGavin Shan return false;
577*93893eacSGavin Shan
578*93893eacSGavin Shan return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags);
579*93893eacSGavin Shan }
580*93893eacSGavin Shan
581e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS)
shmem_parse_huge(const char * str)5825a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
5835a6e75f8SKirill A. Shutemov {
5845a6e75f8SKirill A. Shutemov if (!strcmp(str, "never"))
5855a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER;
5865a6e75f8SKirill A. Shutemov if (!strcmp(str, "always"))
5875a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS;
5885a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size"))
5895a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE;
5905a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise"))
5915a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE;
5925a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny"))
5935a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY;
5945a6e75f8SKirill A. Shutemov if (!strcmp(str, "force"))
5955a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE;
5965a6e75f8SKirill A. Shutemov return -EINVAL;
5975a6e75f8SKirill A. Shutemov }
598e5f2249aSArnd Bergmann #endif
5995a6e75f8SKirill A. Shutemov
600e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
shmem_format_huge(int huge)6015a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
6025a6e75f8SKirill A. Shutemov {
6035a6e75f8SKirill A. Shutemov switch (huge) {
6045a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER:
6055a6e75f8SKirill A. Shutemov return "never";
6065a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS:
6075a6e75f8SKirill A. Shutemov return "always";
6085a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE:
6095a6e75f8SKirill A. Shutemov return "within_size";
6105a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE:
6115a6e75f8SKirill A. Shutemov return "advise";
6125a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY:
6135a6e75f8SKirill A. Shutemov return "deny";
6145a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE:
6155a6e75f8SKirill A. Shutemov return "force";
6165a6e75f8SKirill A. Shutemov default:
6175a6e75f8SKirill A. Shutemov VM_BUG_ON(1);
6185a6e75f8SKirill A. Shutemov return "bad_val";
6195a6e75f8SKirill A. Shutemov }
6205a6e75f8SKirill A. Shutemov }
621f1f5929cSJérémy Lefaure #endif
6225a6e75f8SKirill A. Shutemov
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_split)623779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
624779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split)
625779750d2SKirill A. Shutemov {
626779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next;
627253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove);
628779750d2SKirill A. Shutemov struct inode *inode;
629779750d2SKirill A. Shutemov struct shmem_inode_info *info;
63005624571SMatthew Wilcox (Oracle) struct folio *folio;
631779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128;
63262c9827cSGang Li int split = 0;
633779750d2SKirill A. Shutemov
634779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist))
635779750d2SKirill A. Shutemov return SHRINK_STOP;
636779750d2SKirill A. Shutemov
637779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock);
638779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) {
639779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist);
640779750d2SKirill A. Shutemov
641779750d2SKirill A. Shutemov /* pin the inode */
642779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode);
643779750d2SKirill A. Shutemov
644779750d2SKirill A. Shutemov /* inode is about to be evicted */
645779750d2SKirill A. Shutemov if (!inode) {
646779750d2SKirill A. Shutemov list_del_init(&info->shrinklist);
647779750d2SKirill A. Shutemov goto next;
648779750d2SKirill A. Shutemov }
649779750d2SKirill A. Shutemov
650779750d2SKirill A. Shutemov /* Check if there's anything to gain */
651779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) ==
652779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) {
653253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove);
654779750d2SKirill A. Shutemov goto next;
655779750d2SKirill A. Shutemov }
656779750d2SKirill A. Shutemov
657779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list);
658779750d2SKirill A. Shutemov next:
65962c9827cSGang Li sbinfo->shrinklist_len--;
660779750d2SKirill A. Shutemov if (!--batch)
661779750d2SKirill A. Shutemov break;
662779750d2SKirill A. Shutemov }
663779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock);
664779750d2SKirill A. Shutemov
665253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) {
666253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist);
667253fd0f0SKirill A. Shutemov inode = &info->vfs_inode;
668253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist);
669253fd0f0SKirill A. Shutemov iput(inode);
670253fd0f0SKirill A. Shutemov }
671253fd0f0SKirill A. Shutemov
672779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) {
673779750d2SKirill A. Shutemov int ret;
67405624571SMatthew Wilcox (Oracle) pgoff_t index;
675779750d2SKirill A. Shutemov
676779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist);
677779750d2SKirill A. Shutemov inode = &info->vfs_inode;
678779750d2SKirill A. Shutemov
679b3cd54b2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split)
68062c9827cSGang Li goto move_back;
681779750d2SKirill A. Shutemov
68205624571SMatthew Wilcox (Oracle) index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
68305624571SMatthew Wilcox (Oracle) folio = filemap_get_folio(inode->i_mapping, index);
68466dabbb6SChristoph Hellwig if (IS_ERR(folio))
685779750d2SKirill A. Shutemov goto drop;
686779750d2SKirill A. Shutemov
687b3cd54b2SKirill A. Shutemov /* No huge page at the end of the file: nothing to split */
68805624571SMatthew Wilcox (Oracle) if (!folio_test_large(folio)) {
68905624571SMatthew Wilcox (Oracle) folio_put(folio);
690779750d2SKirill A. Shutemov goto drop;
691779750d2SKirill A. Shutemov }
692779750d2SKirill A. Shutemov
693b3cd54b2SKirill A. Shutemov /*
69462c9827cSGang Li * Move the inode on the list back to shrinklist if we failed
69562c9827cSGang Li * to lock the page at this time.
696b3cd54b2SKirill A. Shutemov *
697b3cd54b2SKirill A. Shutemov * Waiting for the lock may lead to deadlock in the
698b3cd54b2SKirill A. Shutemov * reclaim path.
699b3cd54b2SKirill A. Shutemov */
70005624571SMatthew Wilcox (Oracle) if (!folio_trylock(folio)) {
70105624571SMatthew Wilcox (Oracle) folio_put(folio);
70262c9827cSGang Li goto move_back;
703b3cd54b2SKirill A. Shutemov }
704b3cd54b2SKirill A. Shutemov
705d788f5b3SMatthew Wilcox (Oracle) ret = split_folio(folio);
70605624571SMatthew Wilcox (Oracle) folio_unlock(folio);
70705624571SMatthew Wilcox (Oracle) folio_put(folio);
708779750d2SKirill A. Shutemov
70962c9827cSGang Li /* If split failed move the inode on the list back to shrinklist */
710b3cd54b2SKirill A. Shutemov if (ret)
71162c9827cSGang Li goto move_back;
712779750d2SKirill A. Shutemov
713779750d2SKirill A. Shutemov split++;
714779750d2SKirill A. Shutemov drop:
715779750d2SKirill A. Shutemov list_del_init(&info->shrinklist);
71662c9827cSGang Li goto put;
71762c9827cSGang Li move_back:
71862c9827cSGang Li /*
71962c9827cSGang Li * Make sure the inode is either on the global list or deleted
72062c9827cSGang Li * from any local list before iput() since it could be deleted
72162c9827cSGang Li * in another thread once we put the inode (then the local list
72262c9827cSGang Li * is corrupted).
72362c9827cSGang Li */
72462c9827cSGang Li spin_lock(&sbinfo->shrinklist_lock);
72562c9827cSGang Li list_move(&info->shrinklist, &sbinfo->shrinklist);
72662c9827cSGang Li sbinfo->shrinklist_len++;
72762c9827cSGang Li spin_unlock(&sbinfo->shrinklist_lock);
72862c9827cSGang Li put:
729779750d2SKirill A. Shutemov iput(inode);
730779750d2SKirill A. Shutemov }
731779750d2SKirill A. Shutemov
732779750d2SKirill A. Shutemov return split;
733779750d2SKirill A. Shutemov }
734779750d2SKirill A. Shutemov
shmem_unused_huge_scan(struct super_block * sb,struct shrink_control * sc)735779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
736779750d2SKirill A. Shutemov struct shrink_control *sc)
737779750d2SKirill A. Shutemov {
738779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
739779750d2SKirill A. Shutemov
740779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len))
741779750d2SKirill A. Shutemov return SHRINK_STOP;
742779750d2SKirill A. Shutemov
743779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0);
744779750d2SKirill A. Shutemov }
745779750d2SKirill A. Shutemov
shmem_unused_huge_count(struct super_block * sb,struct shrink_control * sc)746779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
747779750d2SKirill A. Shutemov struct shrink_control *sc)
748779750d2SKirill A. Shutemov {
749779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
750779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len);
751779750d2SKirill A. Shutemov }
752396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
7535a6e75f8SKirill A. Shutemov
7545a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
7555a6e75f8SKirill A. Shutemov
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_split)756779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
757779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split)
758779750d2SKirill A. Shutemov {
759779750d2SKirill A. Shutemov return 0;
760779750d2SKirill A. Shutemov }
761396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
7625a6e75f8SKirill A. Shutemov
7635a6e75f8SKirill A. Shutemov /*
7642bb876b5SMatthew Wilcox (Oracle) * Like filemap_add_folio, but error if expected item has gone.
76546f65ec1SHugh Dickins */
shmem_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t index,void * expected,gfp_t gfp,struct mm_struct * charge_mm)766b7dd44a1SMatthew Wilcox (Oracle) static int shmem_add_to_page_cache(struct folio *folio,
76746f65ec1SHugh Dickins struct address_space *mapping,
7683fea5a49SJohannes Weiner pgoff_t index, void *expected, gfp_t gfp,
7693fea5a49SJohannes Weiner struct mm_struct *charge_mm)
77046f65ec1SHugh Dickins {
771b7dd44a1SMatthew Wilcox (Oracle) XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
772b7dd44a1SMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
7733fea5a49SJohannes Weiner int error;
77446f65ec1SHugh Dickins
775b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
776b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
777b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
778b7dd44a1SMatthew Wilcox (Oracle) VM_BUG_ON(expected && folio_test_large(folio));
77946f65ec1SHugh Dickins
780b7dd44a1SMatthew Wilcox (Oracle) folio_ref_add(folio, nr);
781b7dd44a1SMatthew Wilcox (Oracle) folio->mapping = mapping;
782b7dd44a1SMatthew Wilcox (Oracle) folio->index = index;
78346f65ec1SHugh Dickins
784b7dd44a1SMatthew Wilcox (Oracle) if (!folio_test_swapcache(folio)) {
785b7dd44a1SMatthew Wilcox (Oracle) error = mem_cgroup_charge(folio, charge_mm, gfp);
7863fea5a49SJohannes Weiner if (error) {
787b7dd44a1SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio)) {
7883fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK);
7893fea5a49SJohannes Weiner count_vm_event(THP_FILE_FALLBACK_CHARGE);
7903fea5a49SJohannes Weiner }
7913fea5a49SJohannes Weiner goto error;
7923fea5a49SJohannes Weiner }
7934c6355b2SJohannes Weiner }
794b7dd44a1SMatthew Wilcox (Oracle) folio_throttle_swaprate(folio, gfp);
7953fea5a49SJohannes Weiner
796552446a4SMatthew Wilcox do {
797552446a4SMatthew Wilcox xas_lock_irq(&xas);
7986b24ca4aSMatthew Wilcox (Oracle) if (expected != xas_find_conflict(&xas)) {
799552446a4SMatthew Wilcox xas_set_err(&xas, -EEXIST);
8006b24ca4aSMatthew Wilcox (Oracle) goto unlock;
8016b24ca4aSMatthew Wilcox (Oracle) }
8026b24ca4aSMatthew Wilcox (Oracle) if (expected && xas_find_conflict(&xas)) {
8036b24ca4aSMatthew Wilcox (Oracle) xas_set_err(&xas, -EEXIST);
8046b24ca4aSMatthew Wilcox (Oracle) goto unlock;
8056b24ca4aSMatthew Wilcox (Oracle) }
806b7dd44a1SMatthew Wilcox (Oracle) xas_store(&xas, folio);
807552446a4SMatthew Wilcox if (xas_error(&xas))
808552446a4SMatthew Wilcox goto unlock;
809b7dd44a1SMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio)) {
810800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC);
811b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
812552446a4SMatthew Wilcox }
813552446a4SMatthew Wilcox mapping->nrpages += nr;
814b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
815b7dd44a1SMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
816552446a4SMatthew Wilcox unlock:
817552446a4SMatthew Wilcox xas_unlock_irq(&xas);
818552446a4SMatthew Wilcox } while (xas_nomem(&xas, gfp));
819552446a4SMatthew Wilcox
820552446a4SMatthew Wilcox if (xas_error(&xas)) {
8213fea5a49SJohannes Weiner error = xas_error(&xas);
8223fea5a49SJohannes Weiner goto error;
82346f65ec1SHugh Dickins }
824552446a4SMatthew Wilcox
825552446a4SMatthew Wilcox return 0;
8263fea5a49SJohannes Weiner error:
827b7dd44a1SMatthew Wilcox (Oracle) folio->mapping = NULL;
828b7dd44a1SMatthew Wilcox (Oracle) folio_ref_sub(folio, nr);
8293fea5a49SJohannes Weiner return error;
83046f65ec1SHugh Dickins }
83146f65ec1SHugh Dickins
83246f65ec1SHugh Dickins /*
8334cd400fdSMatthew Wilcox (Oracle) * Like delete_from_page_cache, but substitutes swap for @folio.
8346922c0c7SHugh Dickins */
shmem_delete_from_page_cache(struct folio * folio,void * radswap)8354cd400fdSMatthew Wilcox (Oracle) static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
8366922c0c7SHugh Dickins {
8374cd400fdSMatthew Wilcox (Oracle) struct address_space *mapping = folio->mapping;
8384cd400fdSMatthew Wilcox (Oracle) long nr = folio_nr_pages(folio);
8396922c0c7SHugh Dickins int error;
8406922c0c7SHugh Dickins
841b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages);
8424cd400fdSMatthew Wilcox (Oracle) error = shmem_replace_entry(mapping, folio->index, folio, radswap);
8434cd400fdSMatthew Wilcox (Oracle) folio->mapping = NULL;
8444cd400fdSMatthew Wilcox (Oracle) mapping->nrpages -= nr;
8454cd400fdSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
8464cd400fdSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
847b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages);
8484cd400fdSMatthew Wilcox (Oracle) folio_put(folio);
8496922c0c7SHugh Dickins BUG_ON(error);
8506922c0c7SHugh Dickins }
8516922c0c7SHugh Dickins
8526922c0c7SHugh Dickins /*
853c121d3bbSMatthew Wilcox * Remove swap entry from page cache, free the swap and its page cache.
8547a5d0fbbSHugh Dickins */
shmem_free_swap(struct address_space * mapping,pgoff_t index,void * radswap)8557a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
8567a5d0fbbSHugh Dickins pgoff_t index, void *radswap)
8577a5d0fbbSHugh Dickins {
8586dbaf22cSJohannes Weiner void *old;
8597a5d0fbbSHugh Dickins
86055f3f7eaSMatthew Wilcox old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
8616dbaf22cSJohannes Weiner if (old != radswap)
8626dbaf22cSJohannes Weiner return -ENOENT;
8637a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap));
8646dbaf22cSJohannes Weiner return 0;
8657a5d0fbbSHugh Dickins }
8667a5d0fbbSHugh Dickins
8677a5d0fbbSHugh Dickins /*
8686a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the
86948131e03SVlastimil Babka * given offsets are swapped out.
8706a15a370SVlastimil Babka *
8719608703eSJan Kara * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
8726a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem.
8736a15a370SVlastimil Babka */
shmem_partial_swap_usage(struct address_space * mapping,pgoff_t start,pgoff_t end)87448131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
87548131e03SVlastimil Babka pgoff_t start, pgoff_t end)
8766a15a370SVlastimil Babka {
8777ae3424fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start);
8786a15a370SVlastimil Babka struct page *page;
87948131e03SVlastimil Babka unsigned long swapped = 0;
880e5548f85SHugh Dickins unsigned long max = end - 1;
8816a15a370SVlastimil Babka
8826a15a370SVlastimil Babka rcu_read_lock();
883e5548f85SHugh Dickins xas_for_each(&xas, page, max) {
8847ae3424fSMatthew Wilcox if (xas_retry(&xas, page))
8852cf938aaSMatthew Wilcox continue;
8863159f943SMatthew Wilcox if (xa_is_value(page))
8876a15a370SVlastimil Babka swapped++;
888e5548f85SHugh Dickins if (xas.xa_index == max)
889e5548f85SHugh Dickins break;
8906a15a370SVlastimil Babka if (need_resched()) {
8917ae3424fSMatthew Wilcox xas_pause(&xas);
8926a15a370SVlastimil Babka cond_resched_rcu();
8936a15a370SVlastimil Babka }
8946a15a370SVlastimil Babka }
8956a15a370SVlastimil Babka
8966a15a370SVlastimil Babka rcu_read_unlock();
8976a15a370SVlastimil Babka
8986a15a370SVlastimil Babka return swapped << PAGE_SHIFT;
8996a15a370SVlastimil Babka }
9006a15a370SVlastimil Babka
9016a15a370SVlastimil Babka /*
90248131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the
90348131e03SVlastimil Babka * given vma is swapped out.
90448131e03SVlastimil Babka *
9059608703eSJan Kara * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
90648131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem.
90748131e03SVlastimil Babka */
shmem_swap_usage(struct vm_area_struct * vma)90848131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
90948131e03SVlastimil Babka {
91048131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file);
91148131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode);
91248131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping;
91348131e03SVlastimil Babka unsigned long swapped;
91448131e03SVlastimil Babka
91548131e03SVlastimil Babka /* Be careful as we don't hold info->lock */
91648131e03SVlastimil Babka swapped = READ_ONCE(info->swapped);
91748131e03SVlastimil Babka
91848131e03SVlastimil Babka /*
91948131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or
92048131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we
92148131e03SVlastimil Babka * already track.
92248131e03SVlastimil Babka */
92348131e03SVlastimil Babka if (!swapped)
92448131e03SVlastimil Babka return 0;
92548131e03SVlastimil Babka
92648131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
92748131e03SVlastimil Babka return swapped << PAGE_SHIFT;
92848131e03SVlastimil Babka
92948131e03SVlastimil Babka /* Here comes the more involved part */
93002399c88SPeter Xu return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
93102399c88SPeter Xu vma->vm_pgoff + vma_pages(vma));
93248131e03SVlastimil Babka }
93348131e03SVlastimil Babka
93448131e03SVlastimil Babka /*
93524513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
93624513264SHugh Dickins */
shmem_unlock_mapping(struct address_space * mapping)93724513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
93824513264SHugh Dickins {
939105c988fSMatthew Wilcox (Oracle) struct folio_batch fbatch;
94024513264SHugh Dickins pgoff_t index = 0;
94124513264SHugh Dickins
942105c988fSMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
94324513264SHugh Dickins /*
94424513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it.
94524513264SHugh Dickins */
946105c988fSMatthew Wilcox (Oracle) while (!mapping_unevictable(mapping) &&
947105c988fSMatthew Wilcox (Oracle) filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
948105c988fSMatthew Wilcox (Oracle) check_move_unevictable_folios(&fbatch);
949105c988fSMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
95024513264SHugh Dickins cond_resched();
95124513264SHugh Dickins }
9527a5d0fbbSHugh Dickins }
9537a5d0fbbSHugh Dickins
shmem_get_partial_folio(struct inode * inode,pgoff_t index)954b9a8a419SMatthew Wilcox (Oracle) static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
95571725ed1SHugh Dickins {
956b9a8a419SMatthew Wilcox (Oracle) struct folio *folio;
95771725ed1SHugh Dickins
958b9a8a419SMatthew Wilcox (Oracle) /*
959a7f5862cSMatthew Wilcox (Oracle) * At first avoid shmem_get_folio(,,,SGP_READ): that fails
96081914affSHugh Dickins * beyond i_size, and reports fallocated folios as holes.
961b9a8a419SMatthew Wilcox (Oracle) */
96281914affSHugh Dickins folio = filemap_get_entry(inode->i_mapping, index);
96381914affSHugh Dickins if (!folio)
964b9a8a419SMatthew Wilcox (Oracle) return folio;
96581914affSHugh Dickins if (!xa_is_value(folio)) {
96681914affSHugh Dickins folio_lock(folio);
96781914affSHugh Dickins if (folio->mapping == inode->i_mapping)
96881914affSHugh Dickins return folio;
96981914affSHugh Dickins /* The folio has been swapped out */
97081914affSHugh Dickins folio_unlock(folio);
97181914affSHugh Dickins folio_put(folio);
97281914affSHugh Dickins }
973b9a8a419SMatthew Wilcox (Oracle) /*
97481914affSHugh Dickins * But read a folio back from swap if any of it is within i_size
975b9a8a419SMatthew Wilcox (Oracle) * (although in some cases this is just a waste of time).
976b9a8a419SMatthew Wilcox (Oracle) */
977a7f5862cSMatthew Wilcox (Oracle) folio = NULL;
978a7f5862cSMatthew Wilcox (Oracle) shmem_get_folio(inode, index, &folio, SGP_READ);
979a7f5862cSMatthew Wilcox (Oracle) return folio;
98071725ed1SHugh Dickins }
98171725ed1SHugh Dickins
98271725ed1SHugh Dickins /*
9837f4446eeSMatthew Wilcox * Remove range of pages and swap entries from page cache, and free them.
9841635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
9857a5d0fbbSHugh Dickins */
shmem_undo_range(struct inode * inode,loff_t lstart,loff_t lend,bool unfalloc)9861635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
9871635f6a7SHugh Dickins bool unfalloc)
9881da177e4SLinus Torvalds {
989285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping;
9901da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode);
99109cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
99209cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT;
9930e499ed3SMatthew Wilcox (Oracle) struct folio_batch fbatch;
9947a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE];
995b9a8a419SMatthew Wilcox (Oracle) struct folio *folio;
996b9a8a419SMatthew Wilcox (Oracle) bool same_folio;
9977a5d0fbbSHugh Dickins long nr_swaps_freed = 0;
998285b2c4fSHugh Dickins pgoff_t index;
999bda97eabSHugh Dickins int i;
10001da177e4SLinus Torvalds
100183e4fa9cSHugh Dickins if (lend == -1)
100283e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */
1003bda97eabSHugh Dickins
1004d144bf62SHugh Dickins if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1005d144bf62SHugh Dickins info->fallocend = start;
1006d144bf62SHugh Dickins
100751dcbdacSMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
1008bda97eabSHugh Dickins index = start;
10093392ca12SVishal Moola (Oracle) while (index < end && find_lock_entries(mapping, &index, end - 1,
101051dcbdacSMatthew Wilcox (Oracle) &fbatch, indices)) {
101151dcbdacSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) {
1012b9a8a419SMatthew Wilcox (Oracle) folio = fbatch.folios[i];
1013bda97eabSHugh Dickins
10147b774aabSMatthew Wilcox (Oracle) if (xa_is_value(folio)) {
10151635f6a7SHugh Dickins if (unfalloc)
10161635f6a7SHugh Dickins continue;
10177a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping,
10183392ca12SVishal Moola (Oracle) indices[i], folio);
10197a5d0fbbSHugh Dickins continue;
10207a5d0fbbSHugh Dickins }
10217a5d0fbbSHugh Dickins
10227b774aabSMatthew Wilcox (Oracle) if (!unfalloc || !folio_test_uptodate(folio))
10231e84a3d9SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio);
10247b774aabSMatthew Wilcox (Oracle) folio_unlock(folio);
1025bda97eabSHugh Dickins }
102651dcbdacSMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch);
102751dcbdacSMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
1028bda97eabSHugh Dickins cond_resched();
1029bda97eabSHugh Dickins }
1030bda97eabSHugh Dickins
103144bcabd7SHugh Dickins /*
103244bcabd7SHugh Dickins * When undoing a failed fallocate, we want none of the partial folio
103344bcabd7SHugh Dickins * zeroing and splitting below, but shall want to truncate the whole
103444bcabd7SHugh Dickins * folio when !uptodate indicates that it was added by this fallocate,
103544bcabd7SHugh Dickins * even when [lstart, lend] covers only a part of the folio.
103644bcabd7SHugh Dickins */
103744bcabd7SHugh Dickins if (unfalloc)
103844bcabd7SHugh Dickins goto whole_folios;
103944bcabd7SHugh Dickins
1040b9a8a419SMatthew Wilcox (Oracle) same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1041b9a8a419SMatthew Wilcox (Oracle) folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1042b9a8a419SMatthew Wilcox (Oracle) if (folio) {
1043b9a8a419SMatthew Wilcox (Oracle) same_folio = lend < folio_pos(folio) + folio_size(folio);
1044b9a8a419SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
1045b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend)) {
104687b11f86SSidhartha Kumar start = folio_next_index(folio);
1047b9a8a419SMatthew Wilcox (Oracle) if (same_folio)
1048b9a8a419SMatthew Wilcox (Oracle) end = folio->index;
104983e4fa9cSHugh Dickins }
1050b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio);
1051b9a8a419SMatthew Wilcox (Oracle) folio_put(folio);
1052b9a8a419SMatthew Wilcox (Oracle) folio = NULL;
1053bda97eabSHugh Dickins }
1054b9a8a419SMatthew Wilcox (Oracle)
1055b9a8a419SMatthew Wilcox (Oracle) if (!same_folio)
1056b9a8a419SMatthew Wilcox (Oracle) folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1057b9a8a419SMatthew Wilcox (Oracle) if (folio) {
1058b9a8a419SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
1059b9a8a419SMatthew Wilcox (Oracle) if (!truncate_inode_partial_folio(folio, lstart, lend))
1060b9a8a419SMatthew Wilcox (Oracle) end = folio->index;
1061b9a8a419SMatthew Wilcox (Oracle) folio_unlock(folio);
1062b9a8a419SMatthew Wilcox (Oracle) folio_put(folio);
1063bda97eabSHugh Dickins }
1064bda97eabSHugh Dickins
106544bcabd7SHugh Dickins whole_folios:
106644bcabd7SHugh Dickins
1067bda97eabSHugh Dickins index = start;
1068b1a36650SHugh Dickins while (index < end) {
1069bda97eabSHugh Dickins cond_resched();
10700cd6144aSJohannes Weiner
10719fb6beeaSVishal Moola (Oracle) if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1072cf2039afSMatthew Wilcox (Oracle) indices)) {
1073b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */
1074b1a36650SHugh Dickins if (index == start || end != -1)
1075bda97eabSHugh Dickins break;
1076b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */
1077bda97eabSHugh Dickins index = start;
1078bda97eabSHugh Dickins continue;
1079bda97eabSHugh Dickins }
10800e499ed3SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); i++) {
1081b9a8a419SMatthew Wilcox (Oracle) folio = fbatch.folios[i];
1082bda97eabSHugh Dickins
10830e499ed3SMatthew Wilcox (Oracle) if (xa_is_value(folio)) {
10841635f6a7SHugh Dickins if (unfalloc)
10851635f6a7SHugh Dickins continue;
10869fb6beeaSVishal Moola (Oracle) if (shmem_free_swap(mapping, indices[i], folio)) {
1087b1a36650SHugh Dickins /* Swap was replaced by page: retry */
10889fb6beeaSVishal Moola (Oracle) index = indices[i];
1089b1a36650SHugh Dickins break;
1090b1a36650SHugh Dickins }
1091b1a36650SHugh Dickins nr_swaps_freed++;
10927a5d0fbbSHugh Dickins continue;
10937a5d0fbbSHugh Dickins }
10947a5d0fbbSHugh Dickins
10950e499ed3SMatthew Wilcox (Oracle) folio_lock(folio);
1096800d8c63SKirill A. Shutemov
10970e499ed3SMatthew Wilcox (Oracle) if (!unfalloc || !folio_test_uptodate(folio)) {
10980e499ed3SMatthew Wilcox (Oracle) if (folio_mapping(folio) != mapping) {
1099b1a36650SHugh Dickins /* Page was replaced by swap: retry */
11000e499ed3SMatthew Wilcox (Oracle) folio_unlock(folio);
11019fb6beeaSVishal Moola (Oracle) index = indices[i];
1102b1a36650SHugh Dickins break;
11037a5d0fbbSHugh Dickins }
11040e499ed3SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_writeback(folio),
11050e499ed3SMatthew Wilcox (Oracle) folio);
11067a4ae7acSDavid Stevens
11077a4ae7acSDavid Stevens if (!folio_test_large(folio)) {
11080e499ed3SMatthew Wilcox (Oracle) truncate_inode_folio(mapping, folio);
11097a4ae7acSDavid Stevens } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
11107a4ae7acSDavid Stevens /*
11117a4ae7acSDavid Stevens * If we split a page, reset the loop so
11127a4ae7acSDavid Stevens * that we pick up the new sub pages.
11137a4ae7acSDavid Stevens * Otherwise the THP was entirely
11147a4ae7acSDavid Stevens * dropped or the target range was
11157a4ae7acSDavid Stevens * zeroed, so just continue the loop as
11167a4ae7acSDavid Stevens * is.
11177a4ae7acSDavid Stevens */
11187a4ae7acSDavid Stevens if (!folio_test_large(folio)) {
11197a4ae7acSDavid Stevens folio_unlock(folio);
11207a4ae7acSDavid Stevens index = start;
11217a4ae7acSDavid Stevens break;
11227a4ae7acSDavid Stevens }
11237a4ae7acSDavid Stevens }
112471725ed1SHugh Dickins }
11250e499ed3SMatthew Wilcox (Oracle) folio_unlock(folio);
1126bda97eabSHugh Dickins }
11270e499ed3SMatthew Wilcox (Oracle) folio_batch_remove_exceptionals(&fbatch);
11280e499ed3SMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
1129bda97eabSHugh Dickins }
113094c1e62dSHugh Dickins
11313c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, -nr_swaps_freed);
11321635f6a7SHugh Dickins }
11331da177e4SLinus Torvalds
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)11341635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
11351635f6a7SHugh Dickins {
11361635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false);
113765287334SJeff Layton inode->i_mtime = inode_set_ctime_current(inode);
113836f05cabSJeff Layton inode_inc_iversion(inode);
11391da177e4SLinus Torvalds }
114094c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
11411da177e4SLinus Torvalds
shmem_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1142b74d24f7SChristian Brauner static int shmem_getattr(struct mnt_idmap *idmap,
1143549c7297SChristian Brauner const struct path *path, struct kstat *stat,
1144a528d35eSDavid Howells u32 request_mask, unsigned int query_flags)
114544a30220SYu Zhao {
1146a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode;
114744a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode);
114844a30220SYu Zhao
11493c1b7528SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages)
11503c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 0);
11513c1b7528SHugh Dickins
1152e408e695STheodore Ts'o if (info->fsflags & FS_APPEND_FL)
1153e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_APPEND;
1154e408e695STheodore Ts'o if (info->fsflags & FS_IMMUTABLE_FL)
1155e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_IMMUTABLE;
1156e408e695STheodore Ts'o if (info->fsflags & FS_NODUMP_FL)
1157e408e695STheodore Ts'o stat->attributes |= STATX_ATTR_NODUMP;
1158e408e695STheodore Ts'o stat->attributes_mask |= (STATX_ATTR_APPEND |
1159e408e695STheodore Ts'o STATX_ATTR_IMMUTABLE |
1160e408e695STheodore Ts'o STATX_ATTR_NODUMP);
11610d72b928SJeff Layton generic_fillattr(idmap, request_mask, inode, stat);
116289fdcd26SYang Shi
11632cf13384SDavid Stevens if (shmem_is_huge(inode, 0, false, NULL, 0))
116489fdcd26SYang Shi stat->blksize = HPAGE_PMD_SIZE;
116589fdcd26SYang Shi
1166f7cd16a5SXavier Roche if (request_mask & STATX_BTIME) {
1167f7cd16a5SXavier Roche stat->result_mask |= STATX_BTIME;
1168f7cd16a5SXavier Roche stat->btime.tv_sec = info->i_crtime.tv_sec;
1169f7cd16a5SXavier Roche stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1170f7cd16a5SXavier Roche }
1171f7cd16a5SXavier Roche
117244a30220SYu Zhao return 0;
117344a30220SYu Zhao }
117444a30220SYu Zhao
shmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1175c1632a0fSChristian Brauner static int shmem_setattr(struct mnt_idmap *idmap,
1176549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr)
11771da177e4SLinus Torvalds {
117875c3cfa8SDavid Howells struct inode *inode = d_inode(dentry);
117940e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode);
11801da177e4SLinus Torvalds int error;
118136f05cabSJeff Layton bool update_mtime = false;
118236f05cabSJeff Layton bool update_ctime = true;
11831da177e4SLinus Torvalds
11847a80e5b8SGiuseppe Scrivano error = setattr_prepare(idmap, dentry, attr);
1185db78b877SChristoph Hellwig if (error)
1186db78b877SChristoph Hellwig return error;
1187db78b877SChristoph Hellwig
11886fd73538SDaniel Verkamp if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
11896fd73538SDaniel Verkamp if ((inode->i_mode ^ attr->ia_mode) & 0111) {
11906fd73538SDaniel Verkamp return -EPERM;
11916fd73538SDaniel Verkamp }
11926fd73538SDaniel Verkamp }
11936fd73538SDaniel Verkamp
119494c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
119594c1e62dSHugh Dickins loff_t oldsize = inode->i_size;
119694c1e62dSHugh Dickins loff_t newsize = attr->ia_size;
11973889e6e7Snpiggin@suse.de
11989608703eSJan Kara /* protected by i_rwsem */
119940e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
120040e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW)))
120140e041a2SDavid Herrmann return -EPERM;
120240e041a2SDavid Herrmann
120394c1e62dSHugh Dickins if (newsize != oldsize) {
120477142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags,
120577142517SKonstantin Khlebnikov oldsize, newsize);
120677142517SKonstantin Khlebnikov if (error)
120777142517SKonstantin Khlebnikov return error;
120894c1e62dSHugh Dickins i_size_write(inode, newsize);
120936f05cabSJeff Layton update_mtime = true;
121036f05cabSJeff Layton } else {
121136f05cabSJeff Layton update_ctime = false;
121294c1e62dSHugh Dickins }
1213afa2db2fSJosef Bacik if (newsize <= oldsize) {
121494c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE);
1215d0424c42SHugh Dickins if (oldsize > holebegin)
1216d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping,
1217d0424c42SHugh Dickins holebegin, 0, 1);
1218d0424c42SHugh Dickins if (info->alloced)
1219d0424c42SHugh Dickins shmem_truncate_range(inode,
1220d0424c42SHugh Dickins newsize, (loff_t)-1);
122194c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */
1222d0424c42SHugh Dickins if (oldsize > holebegin)
1223d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping,
1224d0424c42SHugh Dickins holebegin, 0, 1);
122594c1e62dSHugh Dickins }
12261da177e4SLinus Torvalds }
12271da177e4SLinus Torvalds
1228e09764cfSCarlos Maiolino if (is_quota_modification(idmap, inode, attr)) {
1229e09764cfSCarlos Maiolino error = dquot_initialize(inode);
1230e09764cfSCarlos Maiolino if (error)
1231e09764cfSCarlos Maiolino return error;
1232e09764cfSCarlos Maiolino }
1233e09764cfSCarlos Maiolino
1234e09764cfSCarlos Maiolino /* Transfer quota accounting */
1235e09764cfSCarlos Maiolino if (i_uid_needs_update(idmap, attr, inode) ||
1236e09764cfSCarlos Maiolino i_gid_needs_update(idmap, attr, inode)) {
1237e09764cfSCarlos Maiolino error = dquot_transfer(idmap, inode, attr);
1238e09764cfSCarlos Maiolino
1239e09764cfSCarlos Maiolino if (error)
1240e09764cfSCarlos Maiolino return error;
1241e09764cfSCarlos Maiolino }
1242e09764cfSCarlos Maiolino
12437a80e5b8SGiuseppe Scrivano setattr_copy(idmap, inode, attr);
1244db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE)
12457a80e5b8SGiuseppe Scrivano error = posix_acl_chmod(idmap, dentry, inode->i_mode);
124636f05cabSJeff Layton if (!error && update_ctime) {
124765287334SJeff Layton inode_set_ctime_current(inode);
124836f05cabSJeff Layton if (update_mtime)
124965287334SJeff Layton inode->i_mtime = inode_get_ctime(inode);
125036f05cabSJeff Layton inode_inc_iversion(inode);
125136f05cabSJeff Layton }
12521da177e4SLinus Torvalds return error;
12531da177e4SLinus Torvalds }
12541da177e4SLinus Torvalds
shmem_evict_inode(struct inode * inode)12551f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
12561da177e4SLinus Torvalds {
12571da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode);
1258779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
12592daf18a7SHugh Dickins size_t freed = 0;
12601da177e4SLinus Torvalds
126130e6a51dSHui Su if (shmem_mapping(inode->i_mapping)) {
12621da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size);
12631da177e4SLinus Torvalds inode->i_size = 0;
1264bc786390SHugh Dickins mapping_set_exiting(inode->i_mapping);
12653889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1);
1266779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) {
1267779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock);
1268779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) {
1269779750d2SKirill A. Shutemov list_del_init(&info->shrinklist);
1270779750d2SKirill A. Shutemov sbinfo->shrinklist_len--;
1271779750d2SKirill A. Shutemov }
1272779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock);
1273779750d2SKirill A. Shutemov }
1274af53d3e9SHugh Dickins while (!list_empty(&info->swaplist)) {
1275af53d3e9SHugh Dickins /* Wait while shmem_unuse() is scanning this inode... */
1276af53d3e9SHugh Dickins wait_var_event(&info->stop_eviction,
1277af53d3e9SHugh Dickins !atomic_read(&info->stop_eviction));
1278cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex);
1279af53d3e9SHugh Dickins /* ...but beware of the race if we peeked too early */
1280af53d3e9SHugh Dickins if (!atomic_read(&info->stop_eviction))
12811da177e4SLinus Torvalds list_del_init(&info->swaplist);
1282cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex);
12831da177e4SLinus Torvalds }
12843ed47db3SAl Viro }
1285b09e0fa4SEric Paris
12862daf18a7SHugh Dickins simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
12872daf18a7SHugh Dickins shmem_free_inode(inode->i_sb, freed);
12880f3c42f5SHugh Dickins WARN_ON(inode->i_blocks);
1289dbd5768fSJan Kara clear_inode(inode);
1290e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
1291e09764cfSCarlos Maiolino dquot_free_inode(inode);
1292e09764cfSCarlos Maiolino dquot_drop(inode);
1293e09764cfSCarlos Maiolino #endif
12941da177e4SLinus Torvalds }
12951da177e4SLinus Torvalds
shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,struct folio_batch * fbatch,pgoff_t * indices,unsigned int type)1296b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1297da08e9b7SMatthew Wilcox (Oracle) pgoff_t start, struct folio_batch *fbatch,
1298da08e9b7SMatthew Wilcox (Oracle) pgoff_t *indices, unsigned int type)
1299478922e2SMatthew Wilcox {
1300b56a2d8aSVineeth Remanan Pillai XA_STATE(xas, &mapping->i_pages, start);
1301da08e9b7SMatthew Wilcox (Oracle) struct folio *folio;
130287039546SHugh Dickins swp_entry_t entry;
1303478922e2SMatthew Wilcox
1304478922e2SMatthew Wilcox rcu_read_lock();
1305da08e9b7SMatthew Wilcox (Oracle) xas_for_each(&xas, folio, ULONG_MAX) {
1306da08e9b7SMatthew Wilcox (Oracle) if (xas_retry(&xas, folio))
13075b9c98f3SMike Kravetz continue;
1308b56a2d8aSVineeth Remanan Pillai
1309da08e9b7SMatthew Wilcox (Oracle) if (!xa_is_value(folio))
1310478922e2SMatthew Wilcox continue;
1311b56a2d8aSVineeth Remanan Pillai
1312da08e9b7SMatthew Wilcox (Oracle) entry = radix_to_swp_entry(folio);
13136cec2b95SMiaohe Lin /*
13146cec2b95SMiaohe Lin * swapin error entries can be found in the mapping. But they're
13156cec2b95SMiaohe Lin * deliberately ignored here as we've done everything we can do.
13166cec2b95SMiaohe Lin */
131787039546SHugh Dickins if (swp_type(entry) != type)
1318b56a2d8aSVineeth Remanan Pillai continue;
1319b56a2d8aSVineeth Remanan Pillai
1320e384200eSHugh Dickins indices[folio_batch_count(fbatch)] = xas.xa_index;
1321da08e9b7SMatthew Wilcox (Oracle) if (!folio_batch_add(fbatch, folio))
1322da08e9b7SMatthew Wilcox (Oracle) break;
1323b56a2d8aSVineeth Remanan Pillai
1324b56a2d8aSVineeth Remanan Pillai if (need_resched()) {
1325e21a2955SMatthew Wilcox xas_pause(&xas);
1326478922e2SMatthew Wilcox cond_resched_rcu();
1327478922e2SMatthew Wilcox }
1328b56a2d8aSVineeth Remanan Pillai }
1329478922e2SMatthew Wilcox rcu_read_unlock();
1330e21a2955SMatthew Wilcox
1331da08e9b7SMatthew Wilcox (Oracle) return xas.xa_index;
1332b56a2d8aSVineeth Remanan Pillai }
1333b56a2d8aSVineeth Remanan Pillai
1334b56a2d8aSVineeth Remanan Pillai /*
1335b56a2d8aSVineeth Remanan Pillai * Move the swapped pages for an inode to page cache. Returns the count
1336b56a2d8aSVineeth Remanan Pillai * of pages swapped in, or the error in case of failure.
1337b56a2d8aSVineeth Remanan Pillai */
shmem_unuse_swap_entries(struct inode * inode,struct folio_batch * fbatch,pgoff_t * indices)1338da08e9b7SMatthew Wilcox (Oracle) static int shmem_unuse_swap_entries(struct inode *inode,
1339da08e9b7SMatthew Wilcox (Oracle) struct folio_batch *fbatch, pgoff_t *indices)
1340b56a2d8aSVineeth Remanan Pillai {
1341b56a2d8aSVineeth Remanan Pillai int i = 0;
1342b56a2d8aSVineeth Remanan Pillai int ret = 0;
1343b56a2d8aSVineeth Remanan Pillai int error = 0;
1344b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping;
1345b56a2d8aSVineeth Remanan Pillai
1346da08e9b7SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(fbatch); i++) {
1347da08e9b7SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i];
1348b56a2d8aSVineeth Remanan Pillai
1349da08e9b7SMatthew Wilcox (Oracle) if (!xa_is_value(folio))
1350b56a2d8aSVineeth Remanan Pillai continue;
1351da08e9b7SMatthew Wilcox (Oracle) error = shmem_swapin_folio(inode, indices[i],
1352da08e9b7SMatthew Wilcox (Oracle) &folio, SGP_CACHE,
1353b56a2d8aSVineeth Remanan Pillai mapping_gfp_mask(mapping),
1354b56a2d8aSVineeth Remanan Pillai NULL, NULL);
1355b56a2d8aSVineeth Remanan Pillai if (error == 0) {
1356da08e9b7SMatthew Wilcox (Oracle) folio_unlock(folio);
1357da08e9b7SMatthew Wilcox (Oracle) folio_put(folio);
1358b56a2d8aSVineeth Remanan Pillai ret++;
1359b56a2d8aSVineeth Remanan Pillai }
1360b56a2d8aSVineeth Remanan Pillai if (error == -ENOMEM)
1361b56a2d8aSVineeth Remanan Pillai break;
1362b56a2d8aSVineeth Remanan Pillai error = 0;
1363b56a2d8aSVineeth Remanan Pillai }
1364b56a2d8aSVineeth Remanan Pillai return error ? error : ret;
1365478922e2SMatthew Wilcox }
1366478922e2SMatthew Wilcox
136746f65ec1SHugh Dickins /*
136846f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache.
136946f65ec1SHugh Dickins */
shmem_unuse_inode(struct inode * inode,unsigned int type)137010a9c496SChristoph Hellwig static int shmem_unuse_inode(struct inode *inode, unsigned int type)
13711da177e4SLinus Torvalds {
1372b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping;
1373b56a2d8aSVineeth Remanan Pillai pgoff_t start = 0;
1374da08e9b7SMatthew Wilcox (Oracle) struct folio_batch fbatch;
1375b56a2d8aSVineeth Remanan Pillai pgoff_t indices[PAGEVEC_SIZE];
1376b56a2d8aSVineeth Remanan Pillai int ret = 0;
13771da177e4SLinus Torvalds
1378b56a2d8aSVineeth Remanan Pillai do {
1379da08e9b7SMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
1380da08e9b7SMatthew Wilcox (Oracle) shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1381da08e9b7SMatthew Wilcox (Oracle) if (folio_batch_count(&fbatch) == 0) {
1382b56a2d8aSVineeth Remanan Pillai ret = 0;
1383778dd893SHugh Dickins break;
1384b56a2d8aSVineeth Remanan Pillai }
1385b56a2d8aSVineeth Remanan Pillai
1386da08e9b7SMatthew Wilcox (Oracle) ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1387b56a2d8aSVineeth Remanan Pillai if (ret < 0)
1388b56a2d8aSVineeth Remanan Pillai break;
1389b56a2d8aSVineeth Remanan Pillai
1390da08e9b7SMatthew Wilcox (Oracle) start = indices[folio_batch_count(&fbatch) - 1];
1391b56a2d8aSVineeth Remanan Pillai } while (true);
1392b56a2d8aSVineeth Remanan Pillai
1393b56a2d8aSVineeth Remanan Pillai return ret;
1394b56a2d8aSVineeth Remanan Pillai }
1395b56a2d8aSVineeth Remanan Pillai
1396b56a2d8aSVineeth Remanan Pillai /*
1397b56a2d8aSVineeth Remanan Pillai * Read all the shared memory data that resides in the swap
1398b56a2d8aSVineeth Remanan Pillai * device 'type' back into memory, so the swap device can be
1399b56a2d8aSVineeth Remanan Pillai * unused.
1400b56a2d8aSVineeth Remanan Pillai */
shmem_unuse(unsigned int type)140110a9c496SChristoph Hellwig int shmem_unuse(unsigned int type)
1402b56a2d8aSVineeth Remanan Pillai {
1403b56a2d8aSVineeth Remanan Pillai struct shmem_inode_info *info, *next;
1404b56a2d8aSVineeth Remanan Pillai int error = 0;
1405b56a2d8aSVineeth Remanan Pillai
1406b56a2d8aSVineeth Remanan Pillai if (list_empty(&shmem_swaplist))
1407b56a2d8aSVineeth Remanan Pillai return 0;
1408b56a2d8aSVineeth Remanan Pillai
1409b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex);
1410b56a2d8aSVineeth Remanan Pillai list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1411b56a2d8aSVineeth Remanan Pillai if (!info->swapped) {
1412b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist);
1413b56a2d8aSVineeth Remanan Pillai continue;
1414b56a2d8aSVineeth Remanan Pillai }
1415af53d3e9SHugh Dickins /*
1416af53d3e9SHugh Dickins * Drop the swaplist mutex while searching the inode for swap;
1417af53d3e9SHugh Dickins * but before doing so, make sure shmem_evict_inode() will not
1418af53d3e9SHugh Dickins * remove placeholder inode from swaplist, nor let it be freed
1419af53d3e9SHugh Dickins * (igrab() would protect from unlink, but not from unmount).
1420af53d3e9SHugh Dickins */
1421af53d3e9SHugh Dickins atomic_inc(&info->stop_eviction);
1422b56a2d8aSVineeth Remanan Pillai mutex_unlock(&shmem_swaplist_mutex);
1423b56a2d8aSVineeth Remanan Pillai
142410a9c496SChristoph Hellwig error = shmem_unuse_inode(&info->vfs_inode, type);
1425b56a2d8aSVineeth Remanan Pillai cond_resched();
1426b56a2d8aSVineeth Remanan Pillai
1427b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex);
1428b56a2d8aSVineeth Remanan Pillai next = list_next_entry(info, swaplist);
1429b56a2d8aSVineeth Remanan Pillai if (!info->swapped)
1430b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist);
1431af53d3e9SHugh Dickins if (atomic_dec_and_test(&info->stop_eviction))
1432af53d3e9SHugh Dickins wake_up_var(&info->stop_eviction);
1433b56a2d8aSVineeth Remanan Pillai if (error)
1434b56a2d8aSVineeth Remanan Pillai break;
14351da177e4SLinus Torvalds }
1436cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex);
1437778dd893SHugh Dickins
1438778dd893SHugh Dickins return error;
14391da177e4SLinus Torvalds }
14401da177e4SLinus Torvalds
14411da177e4SLinus Torvalds /*
14421da177e4SLinus Torvalds * Move the page from the page cache to the swap cache.
14431da177e4SLinus Torvalds */
shmem_writepage(struct page * page,struct writeback_control * wbc)14441da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
14451da177e4SLinus Torvalds {
1446e2e3fdc7SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
14478ccee8c1SLuis Chamberlain struct address_space *mapping = folio->mapping;
14488ccee8c1SLuis Chamberlain struct inode *inode = mapping->host;
14498ccee8c1SLuis Chamberlain struct shmem_inode_info *info = SHMEM_I(inode);
14502c6efe9cSLuis Chamberlain struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
14516922c0c7SHugh Dickins swp_entry_t swap;
14526922c0c7SHugh Dickins pgoff_t index;
14531da177e4SLinus Torvalds
14541e6decf3SHugh Dickins /*
1455cf7992bfSLuis Chamberlain * Our capabilities prevent regular writeback or sync from ever calling
1456cf7992bfSLuis Chamberlain * shmem_writepage; but a stacking filesystem might use ->writepage of
1457cf7992bfSLuis Chamberlain * its underlying filesystem, in which case tmpfs should write out to
1458cf7992bfSLuis Chamberlain * swap only in response to memory pressure, and not for the writeback
1459cf7992bfSLuis Chamberlain * threads or sync.
1460cf7992bfSLuis Chamberlain */
1461cf7992bfSLuis Chamberlain if (WARN_ON_ONCE(!wbc->for_reclaim))
1462cf7992bfSLuis Chamberlain goto redirty;
1463cf7992bfSLuis Chamberlain
14642c6efe9cSLuis Chamberlain if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
14659a976f0cSLuis Chamberlain goto redirty;
14669a976f0cSLuis Chamberlain
14679a976f0cSLuis Chamberlain if (!total_swap_pages)
14689a976f0cSLuis Chamberlain goto redirty;
14699a976f0cSLuis Chamberlain
1470cf7992bfSLuis Chamberlain /*
14711e6decf3SHugh Dickins * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
14721e6decf3SHugh Dickins * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
14731e6decf3SHugh Dickins * and its shmem_writeback() needs them to be split when swapping.
14741e6decf3SHugh Dickins */
1475f530ed0eSMatthew Wilcox (Oracle) if (folio_test_large(folio)) {
14761e6decf3SHugh Dickins /* Ensure the subpages are still dirty */
1477f530ed0eSMatthew Wilcox (Oracle) folio_test_set_dirty(folio);
14781e6decf3SHugh Dickins if (split_huge_page(page) < 0)
14791e6decf3SHugh Dickins goto redirty;
1480f530ed0eSMatthew Wilcox (Oracle) folio = page_folio(page);
1481f530ed0eSMatthew Wilcox (Oracle) folio_clear_dirty(folio);
14821e6decf3SHugh Dickins }
14831e6decf3SHugh Dickins
1484f530ed0eSMatthew Wilcox (Oracle) index = folio->index;
14851635f6a7SHugh Dickins
14861635f6a7SHugh Dickins /*
14871635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
14881635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a
1489f530ed0eSMatthew Wilcox (Oracle) * fallocated folio arriving here is now to initialize it and write it.
14901aac1400SHugh Dickins *
1491f530ed0eSMatthew Wilcox (Oracle) * That's okay for a folio already fallocated earlier, but if we have
14921aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track
1493f530ed0eSMatthew Wilcox (Oracle) * of this folio in case we have to undo it, and (b) it may not be a
14941aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So
1495f530ed0eSMatthew Wilcox (Oracle) * reactivate the folio, and let shmem_fallocate() quit when too many.
14961635f6a7SHugh Dickins */
1497f530ed0eSMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) {
14981aac1400SHugh Dickins if (inode->i_private) {
14991aac1400SHugh Dickins struct shmem_falloc *shmem_falloc;
15001aac1400SHugh Dickins spin_lock(&inode->i_lock);
15011aac1400SHugh Dickins shmem_falloc = inode->i_private;
15021aac1400SHugh Dickins if (shmem_falloc &&
15038e205f77SHugh Dickins !shmem_falloc->waitq &&
15041aac1400SHugh Dickins index >= shmem_falloc->start &&
15051aac1400SHugh Dickins index < shmem_falloc->next)
15061aac1400SHugh Dickins shmem_falloc->nr_unswapped++;
15071aac1400SHugh Dickins else
15081aac1400SHugh Dickins shmem_falloc = NULL;
15091aac1400SHugh Dickins spin_unlock(&inode->i_lock);
15101aac1400SHugh Dickins if (shmem_falloc)
15111aac1400SHugh Dickins goto redirty;
15121aac1400SHugh Dickins }
1513f530ed0eSMatthew Wilcox (Oracle) folio_zero_range(folio, 0, folio_size(folio));
1514f530ed0eSMatthew Wilcox (Oracle) flush_dcache_folio(folio);
1515f530ed0eSMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
15161635f6a7SHugh Dickins }
15171635f6a7SHugh Dickins
1518e2e3fdc7SMatthew Wilcox (Oracle) swap = folio_alloc_swap(folio);
151948f170fbSHugh Dickins if (!swap.val)
152048f170fbSHugh Dickins goto redirty;
1521d9fe526aSHugh Dickins
1522b1dea800SHugh Dickins /*
1523b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes,
1524f530ed0eSMatthew Wilcox (Oracle) * if it's not already there. Do it now before the folio is
15256922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects
1526b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until
15276922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will
15286922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex.
1529b1dea800SHugh Dickins */
1530b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex);
153105bf86b4SHugh Dickins if (list_empty(&info->swaplist))
1532b56a2d8aSVineeth Remanan Pillai list_add(&info->swaplist, &shmem_swaplist);
1533b1dea800SHugh Dickins
1534a4c366f0SMatthew Wilcox (Oracle) if (add_to_swap_cache(folio, swap,
15353852f676SJoonsoo Kim __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
15363852f676SJoonsoo Kim NULL) == 0) {
15373c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 1);
1538aaa46865SHugh Dickins swap_shmem_alloc(swap);
15394cd400fdSMatthew Wilcox (Oracle) shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
15406922c0c7SHugh Dickins
15416922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex);
1542f530ed0eSMatthew Wilcox (Oracle) BUG_ON(folio_mapped(folio));
1543f530ed0eSMatthew Wilcox (Oracle) swap_writepage(&folio->page, wbc);
15441da177e4SLinus Torvalds return 0;
15451da177e4SLinus Torvalds }
15461da177e4SLinus Torvalds
15476922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex);
15484081f744SMatthew Wilcox (Oracle) put_swap_folio(folio, swap);
15491da177e4SLinus Torvalds redirty:
1550f530ed0eSMatthew Wilcox (Oracle) folio_mark_dirty(folio);
1551d9fe526aSHugh Dickins if (wbc->for_reclaim)
1552f530ed0eSMatthew Wilcox (Oracle) return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1553f530ed0eSMatthew Wilcox (Oracle) folio_unlock(folio);
1554d9fe526aSHugh Dickins return 0;
15551da177e4SLinus Torvalds }
15561da177e4SLinus Torvalds
155775edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)155871fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1559680d794bSakpm@linux-foundation.org {
1560680d794bSakpm@linux-foundation.org char buffer[64];
1561680d794bSakpm@linux-foundation.org
156271fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT)
1563095f1fc4SLee Schermerhorn return; /* show nothing */
1564095f1fc4SLee Schermerhorn
1565a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol);
1566095f1fc4SLee Schermerhorn
1567095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer);
1568680d794bSakpm@linux-foundation.org }
156971fe804bSLee Schermerhorn
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)157071fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
157171fe804bSLee Schermerhorn {
157271fe804bSLee Schermerhorn struct mempolicy *mpol = NULL;
157371fe804bSLee Schermerhorn if (sbinfo->mpol) {
1574bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
157571fe804bSLee Schermerhorn mpol = sbinfo->mpol;
157671fe804bSLee Schermerhorn mpol_get(mpol);
1577bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock);
157871fe804bSLee Schermerhorn }
157971fe804bSLee Schermerhorn return mpol;
158071fe804bSLee Schermerhorn }
158175edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)158275edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
158375edd345SHugh Dickins {
158475edd345SHugh Dickins }
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)158575edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
158675edd345SHugh Dickins {
158775edd345SHugh Dickins return NULL;
158875edd345SHugh Dickins }
158975edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
159075edd345SHugh Dickins #ifndef CONFIG_NUMA
159175edd345SHugh Dickins #define vm_policy vm_private_data
159275edd345SHugh Dickins #endif
1593680d794bSakpm@linux-foundation.org
shmem_pseudo_vma_init(struct vm_area_struct * vma,struct shmem_inode_info * info,pgoff_t index)1594800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1595800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index)
1596800d8c63SKirill A. Shutemov {
1597800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */
15982c4541e2SKirill A. Shutemov vma_init(vma, NULL);
1599800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */
1600800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino;
1601800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1602800d8c63SKirill A. Shutemov }
1603800d8c63SKirill A. Shutemov
shmem_pseudo_vma_destroy(struct vm_area_struct * vma)1604800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1605800d8c63SKirill A. Shutemov {
1606800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */
1607800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy);
1608800d8c63SKirill A. Shutemov }
1609800d8c63SKirill A. Shutemov
shmem_swapin(swp_entry_t swap,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)16105739a81cSMatthew Wilcox (Oracle) static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
161141ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index)
16121da177e4SLinus Torvalds {
16131da177e4SLinus Torvalds struct vm_area_struct pvma;
161418a2f371SMel Gorman struct page *page;
16158c63ca5bSWill Deacon struct vm_fault vmf = {
16168c63ca5bSWill Deacon .vma = &pvma,
16178c63ca5bSWill Deacon };
16181da177e4SLinus Torvalds
1619800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index);
1620e9e9b7ecSMinchan Kim page = swap_cluster_readahead(swap, gfp, &vmf);
1621800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma);
162218a2f371SMel Gorman
16235739a81cSMatthew Wilcox (Oracle) if (!page)
16245739a81cSMatthew Wilcox (Oracle) return NULL;
16255739a81cSMatthew Wilcox (Oracle) return page_folio(page);
1626800d8c63SKirill A. Shutemov }
162718a2f371SMel Gorman
162878cc8cdcSRik van Riel /*
162978cc8cdcSRik van Riel * Make sure huge_gfp is always more limited than limit_gfp.
163078cc8cdcSRik van Riel * Some of the flags set permissions, while others set limitations.
163178cc8cdcSRik van Riel */
limit_gfp_mask(gfp_t huge_gfp,gfp_t limit_gfp)163278cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
163378cc8cdcSRik van Riel {
163478cc8cdcSRik van Riel gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
163578cc8cdcSRik van Riel gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1636187df5ddSRik van Riel gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1637187df5ddSRik van Riel gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1638187df5ddSRik van Riel
1639187df5ddSRik van Riel /* Allow allocations only from the originally specified zones. */
1640187df5ddSRik van Riel result |= zoneflags;
164178cc8cdcSRik van Riel
164278cc8cdcSRik van Riel /*
164378cc8cdcSRik van Riel * Minimize the result gfp by taking the union with the deny flags,
164478cc8cdcSRik van Riel * and the intersection of the allow flags.
164578cc8cdcSRik van Riel */
164678cc8cdcSRik van Riel result |= (limit_gfp & denyflags);
164778cc8cdcSRik van Riel result |= (huge_gfp & limit_gfp) & allowflags;
164878cc8cdcSRik van Riel
164978cc8cdcSRik van Riel return result;
165078cc8cdcSRik van Riel }
165178cc8cdcSRik van Riel
shmem_alloc_hugefolio(gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)165272827e5cSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1653800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index)
1654800d8c63SKirill A. Shutemov {
1655800d8c63SKirill A. Shutemov struct vm_area_struct pvma;
16567b8d046fSMatthew Wilcox struct address_space *mapping = info->vfs_inode.i_mapping;
16577b8d046fSMatthew Wilcox pgoff_t hindex;
1658dfe98499SMatthew Wilcox (Oracle) struct folio *folio;
1659800d8c63SKirill A. Shutemov
16604620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR);
16617b8d046fSMatthew Wilcox if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
16627b8d046fSMatthew Wilcox XA_PRESENT))
1663800d8c63SKirill A. Shutemov return NULL;
1664800d8c63SKirill A. Shutemov
1665800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex);
1666dfe98499SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
1667800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma);
1668dfe98499SMatthew Wilcox (Oracle) if (!folio)
1669dcdf11eeSDavid Rientjes count_vm_event(THP_FILE_FALLBACK);
167072827e5cSMatthew Wilcox (Oracle) return folio;
167118a2f371SMel Gorman }
167218a2f371SMel Gorman
shmem_alloc_folio(gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)16730c023ef5SMatthew Wilcox (Oracle) static struct folio *shmem_alloc_folio(gfp_t gfp,
167418a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index)
167518a2f371SMel Gorman {
167618a2f371SMel Gorman struct vm_area_struct pvma;
16770c023ef5SMatthew Wilcox (Oracle) struct folio *folio;
167818a2f371SMel Gorman
1679800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index);
16800c023ef5SMatthew Wilcox (Oracle) folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
1681800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma);
168218a2f371SMel Gorman
16830c023ef5SMatthew Wilcox (Oracle) return folio;
168418a2f371SMel Gorman }
168518a2f371SMel Gorman
shmem_alloc_and_acct_folio(gfp_t gfp,struct inode * inode,pgoff_t index,bool huge)1686b1d0ec3aSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1687800d8c63SKirill A. Shutemov pgoff_t index, bool huge)
1688800d8c63SKirill A. Shutemov {
16890f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode);
169072827e5cSMatthew Wilcox (Oracle) struct folio *folio;
1691800d8c63SKirill A. Shutemov int nr;
1692c7e263abSLukas Czerner int err;
1693800d8c63SKirill A. Shutemov
1694396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1695800d8c63SKirill A. Shutemov huge = false;
1696800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1;
1697800d8c63SKirill A. Shutemov
1698c7e263abSLukas Czerner err = shmem_inode_acct_block(inode, nr);
1699c7e263abSLukas Czerner if (err)
1700800d8c63SKirill A. Shutemov goto failed;
1701800d8c63SKirill A. Shutemov
1702800d8c63SKirill A. Shutemov if (huge)
170372827e5cSMatthew Wilcox (Oracle) folio = shmem_alloc_hugefolio(gfp, info, index);
1704800d8c63SKirill A. Shutemov else
170572827e5cSMatthew Wilcox (Oracle) folio = shmem_alloc_folio(gfp, info, index);
170672827e5cSMatthew Wilcox (Oracle) if (folio) {
170772827e5cSMatthew Wilcox (Oracle) __folio_set_locked(folio);
170872827e5cSMatthew Wilcox (Oracle) __folio_set_swapbacked(folio);
1709b1d0ec3aSMatthew Wilcox (Oracle) return folio;
171075edd345SHugh Dickins }
171118a2f371SMel Gorman
1712800d8c63SKirill A. Shutemov err = -ENOMEM;
17130f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr);
1714800d8c63SKirill A. Shutemov failed:
1715800d8c63SKirill A. Shutemov return ERR_PTR(err);
17161da177e4SLinus Torvalds }
171771fe804bSLee Schermerhorn
17181da177e4SLinus Torvalds /*
1719bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the
1720fc26babbSMatthew Wilcox (Oracle) * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1721bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in
1722bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special
1723bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1724bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache.
1725bde05d1cSHugh Dickins *
1726bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and
1727bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1728bde05d1cSHugh Dickins * but for now it is a simple matter of zone.
1729bde05d1cSHugh Dickins */
shmem_should_replace_folio(struct folio * folio,gfp_t gfp)1730069d849cSMatthew Wilcox (Oracle) static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1731bde05d1cSHugh Dickins {
1732069d849cSMatthew Wilcox (Oracle) return folio_zonenum(folio) > gfp_zone(gfp);
1733bde05d1cSHugh Dickins }
1734bde05d1cSHugh Dickins
shmem_replace_folio(struct folio ** foliop,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)17350d698e25SMatthew Wilcox (Oracle) static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1736bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index)
1737bde05d1cSHugh Dickins {
1738d21bba2bSMatthew Wilcox (Oracle) struct folio *old, *new;
1739bde05d1cSHugh Dickins struct address_space *swap_mapping;
1740c1cb20d4SYu Zhao swp_entry_t entry;
1741bde05d1cSHugh Dickins pgoff_t swap_index;
1742bde05d1cSHugh Dickins int error;
1743bde05d1cSHugh Dickins
17440d698e25SMatthew Wilcox (Oracle) old = *foliop;
17453d2c9087SDavid Hildenbrand entry = old->swap;
1746c1cb20d4SYu Zhao swap_index = swp_offset(entry);
1747907ea17eSMatthew Wilcox (Oracle) swap_mapping = swap_address_space(entry);
1748bde05d1cSHugh Dickins
1749bde05d1cSHugh Dickins /*
1750bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't
1751bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints.
1752bde05d1cSHugh Dickins */
1753bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK;
1754907ea17eSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(old), old);
1755907ea17eSMatthew Wilcox (Oracle) new = shmem_alloc_folio(gfp, info, index);
1756907ea17eSMatthew Wilcox (Oracle) if (!new)
1757bde05d1cSHugh Dickins return -ENOMEM;
1758bde05d1cSHugh Dickins
1759907ea17eSMatthew Wilcox (Oracle) folio_get(new);
1760907ea17eSMatthew Wilcox (Oracle) folio_copy(new, old);
1761907ea17eSMatthew Wilcox (Oracle) flush_dcache_folio(new);
1762bde05d1cSHugh Dickins
1763907ea17eSMatthew Wilcox (Oracle) __folio_set_locked(new);
1764907ea17eSMatthew Wilcox (Oracle) __folio_set_swapbacked(new);
1765907ea17eSMatthew Wilcox (Oracle) folio_mark_uptodate(new);
17663d2c9087SDavid Hildenbrand new->swap = entry;
1767907ea17eSMatthew Wilcox (Oracle) folio_set_swapcache(new);
1768bde05d1cSHugh Dickins
1769bde05d1cSHugh Dickins /*
1770bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's
1771bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there.
1772bde05d1cSHugh Dickins */
1773b93b0163SMatthew Wilcox xa_lock_irq(&swap_mapping->i_pages);
1774907ea17eSMatthew Wilcox (Oracle) error = shmem_replace_entry(swap_mapping, swap_index, old, new);
17750142ef6cSHugh Dickins if (!error) {
1776d21bba2bSMatthew Wilcox (Oracle) mem_cgroup_migrate(old, new);
1777907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
1778907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(new, NR_SHMEM, 1);
1779907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
1780907ea17eSMatthew Wilcox (Oracle) __lruvec_stat_mod_folio(old, NR_SHMEM, -1);
17810142ef6cSHugh Dickins }
1782b93b0163SMatthew Wilcox xa_unlock_irq(&swap_mapping->i_pages);
1783bde05d1cSHugh Dickins
17840142ef6cSHugh Dickins if (unlikely(error)) {
17850142ef6cSHugh Dickins /*
17860142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check
17870142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock;
17880142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free.
17890142ef6cSHugh Dickins */
1790907ea17eSMatthew Wilcox (Oracle) old = new;
17910142ef6cSHugh Dickins } else {
1792907ea17eSMatthew Wilcox (Oracle) folio_add_lru(new);
17930d698e25SMatthew Wilcox (Oracle) *foliop = new;
17940142ef6cSHugh Dickins }
1795bde05d1cSHugh Dickins
1796907ea17eSMatthew Wilcox (Oracle) folio_clear_swapcache(old);
1797907ea17eSMatthew Wilcox (Oracle) old->private = NULL;
1798bde05d1cSHugh Dickins
1799907ea17eSMatthew Wilcox (Oracle) folio_unlock(old);
1800907ea17eSMatthew Wilcox (Oracle) folio_put_refs(old, 2);
18010142ef6cSHugh Dickins return error;
1802bde05d1cSHugh Dickins }
1803bde05d1cSHugh Dickins
shmem_set_folio_swapin_error(struct inode * inode,pgoff_t index,struct folio * folio,swp_entry_t swap)18046cec2b95SMiaohe Lin static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
18056cec2b95SMiaohe Lin struct folio *folio, swp_entry_t swap)
18066cec2b95SMiaohe Lin {
18076cec2b95SMiaohe Lin struct address_space *mapping = inode->i_mapping;
18086cec2b95SMiaohe Lin swp_entry_t swapin_error;
18096cec2b95SMiaohe Lin void *old;
18106cec2b95SMiaohe Lin
1811af19487fSAxel Rasmussen swapin_error = make_poisoned_swp_entry();
18126cec2b95SMiaohe Lin old = xa_cmpxchg_irq(&mapping->i_pages, index,
18136cec2b95SMiaohe Lin swp_to_radix_entry(swap),
18146cec2b95SMiaohe Lin swp_to_radix_entry(swapin_error), 0);
18156cec2b95SMiaohe Lin if (old != swp_to_radix_entry(swap))
18166cec2b95SMiaohe Lin return;
18176cec2b95SMiaohe Lin
18186cec2b95SMiaohe Lin folio_wait_writeback(folio);
181975fa68a5SMatthew Wilcox (Oracle) delete_from_swap_cache(folio);
18206cec2b95SMiaohe Lin /*
18213c1b7528SHugh Dickins * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
18223c1b7528SHugh Dickins * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
18233c1b7528SHugh Dickins * in shmem_evict_inode().
18246cec2b95SMiaohe Lin */
18253c1b7528SHugh Dickins shmem_recalc_inode(inode, -1, -1);
18266cec2b95SMiaohe Lin swap_free(swap);
18276cec2b95SMiaohe Lin }
18286cec2b95SMiaohe Lin
1829bde05d1cSHugh Dickins /*
1830833de10fSMiaohe Lin * Swap in the folio pointed to by *foliop.
1831833de10fSMiaohe Lin * Caller has to make sure that *foliop contains a valid swapped folio.
1832833de10fSMiaohe Lin * Returns 0 and the folio in foliop if success. On failure, returns the
1833833de10fSMiaohe Lin * error code and NULL in *foliop.
18341da177e4SLinus Torvalds */
shmem_swapin_folio(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,vm_fault_t * fault_type)1835da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1836da08e9b7SMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp,
1837c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma,
18382b740303SSouptick Joarder vm_fault_t *fault_type)
18391da177e4SLinus Torvalds {
18401da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping;
184123f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode);
184204f94e3fSDan Schatzberg struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1843cbc2bd98SKairui Song struct swap_info_struct *si;
1844da08e9b7SMatthew Wilcox (Oracle) struct folio *folio = NULL;
18451da177e4SLinus Torvalds swp_entry_t swap;
18461da177e4SLinus Torvalds int error;
18471da177e4SLinus Torvalds
1848da08e9b7SMatthew Wilcox (Oracle) VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1849da08e9b7SMatthew Wilcox (Oracle) swap = radix_to_swp_entry(*foliop);
1850da08e9b7SMatthew Wilcox (Oracle) *foliop = NULL;
185154af6042SHugh Dickins
1852af19487fSAxel Rasmussen if (is_poisoned_swp_entry(swap))
18536cec2b95SMiaohe Lin return -EIO;
18546cec2b95SMiaohe Lin
1855cbc2bd98SKairui Song si = get_swap_device(swap);
1856cbc2bd98SKairui Song if (!si) {
1857cbc2bd98SKairui Song if (!shmem_confirm_swap(mapping, index, swap))
1858cbc2bd98SKairui Song return -EEXIST;
1859cbc2bd98SKairui Song else
1860cbc2bd98SKairui Song return -EINVAL;
1861cbc2bd98SKairui Song }
1862cbc2bd98SKairui Song
18631da177e4SLinus Torvalds /* Look it up and read it in.. */
18645739a81cSMatthew Wilcox (Oracle) folio = swap_cache_get_folio(swap, NULL, 0);
18655739a81cSMatthew Wilcox (Oracle) if (!folio) {
18669e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */
18679e18eb29SAndres Lagar-Cavilla if (fault_type) {
186868da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR;
18699e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT);
18702262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT);
18719e18eb29SAndres Lagar-Cavilla }
18729e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */
18735739a81cSMatthew Wilcox (Oracle) folio = shmem_swapin(swap, gfp, info, index);
18745739a81cSMatthew Wilcox (Oracle) if (!folio) {
18751da177e4SLinus Torvalds error = -ENOMEM;
187654af6042SHugh Dickins goto failed;
1877285b2c4fSHugh Dickins }
18781da177e4SLinus Torvalds }
18791da177e4SLinus Torvalds
1880833de10fSMiaohe Lin /* We have to do this with folio locked to prevent races */
1881da08e9b7SMatthew Wilcox (Oracle) folio_lock(folio);
1882da08e9b7SMatthew Wilcox (Oracle) if (!folio_test_swapcache(folio) ||
18833d2c9087SDavid Hildenbrand folio->swap.val != swap.val ||
1884d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) {
1885c5bf121eSVineeth Remanan Pillai error = -EEXIST;
1886d1899228SHugh Dickins goto unlock;
1887bde05d1cSHugh Dickins }
1888da08e9b7SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) {
18891da177e4SLinus Torvalds error = -EIO;
189054af6042SHugh Dickins goto failed;
189154af6042SHugh Dickins }
1892da08e9b7SMatthew Wilcox (Oracle) folio_wait_writeback(folio);
189354af6042SHugh Dickins
18948a84802eSSteven Price /*
18958a84802eSSteven Price * Some architectures may have to restore extra metadata to the
1896da08e9b7SMatthew Wilcox (Oracle) * folio after reading from swap.
18978a84802eSSteven Price */
1898da08e9b7SMatthew Wilcox (Oracle) arch_swap_restore(swap, folio);
18998a84802eSSteven Price
1900069d849cSMatthew Wilcox (Oracle) if (shmem_should_replace_folio(folio, gfp)) {
19010d698e25SMatthew Wilcox (Oracle) error = shmem_replace_folio(&folio, gfp, info, index);
1902bde05d1cSHugh Dickins if (error)
190354af6042SHugh Dickins goto failed;
19041da177e4SLinus Torvalds }
19051da177e4SLinus Torvalds
1906b7dd44a1SMatthew Wilcox (Oracle) error = shmem_add_to_page_cache(folio, mapping, index,
19073fea5a49SJohannes Weiner swp_to_radix_entry(swap), gfp,
19083fea5a49SJohannes Weiner charge_mm);
190954af6042SHugh Dickins if (error)
191054af6042SHugh Dickins goto failed;
191154af6042SHugh Dickins
19123c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, -1);
191327ab7006SHugh Dickins
191466d2f4d2SHugh Dickins if (sgp == SGP_WRITE)
1915da08e9b7SMatthew Wilcox (Oracle) folio_mark_accessed(folio);
191666d2f4d2SHugh Dickins
191775fa68a5SMatthew Wilcox (Oracle) delete_from_swap_cache(folio);
1918da08e9b7SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
191927ab7006SHugh Dickins swap_free(swap);
1920cbc2bd98SKairui Song put_swap_device(si);
192127ab7006SHugh Dickins
1922da08e9b7SMatthew Wilcox (Oracle) *foliop = folio;
1923c5bf121eSVineeth Remanan Pillai return 0;
1924c5bf121eSVineeth Remanan Pillai failed:
1925c5bf121eSVineeth Remanan Pillai if (!shmem_confirm_swap(mapping, index, swap))
1926c5bf121eSVineeth Remanan Pillai error = -EEXIST;
19276cec2b95SMiaohe Lin if (error == -EIO)
19286cec2b95SMiaohe Lin shmem_set_folio_swapin_error(inode, index, folio, swap);
1929c5bf121eSVineeth Remanan Pillai unlock:
1930da08e9b7SMatthew Wilcox (Oracle) if (folio) {
1931da08e9b7SMatthew Wilcox (Oracle) folio_unlock(folio);
1932da08e9b7SMatthew Wilcox (Oracle) folio_put(folio);
1933c5bf121eSVineeth Remanan Pillai }
1934cbc2bd98SKairui Song put_swap_device(si);
1935c5bf121eSVineeth Remanan Pillai
1936c5bf121eSVineeth Remanan Pillai return error;
1937c5bf121eSVineeth Remanan Pillai }
1938c5bf121eSVineeth Remanan Pillai
1939c5bf121eSVineeth Remanan Pillai /*
1940fc26babbSMatthew Wilcox (Oracle) * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1941c5bf121eSVineeth Remanan Pillai *
1942c5bf121eSVineeth Remanan Pillai * If we allocate a new one we do not mark it dirty. That's up to the
1943c5bf121eSVineeth Remanan Pillai * vm. If we swap it in we mark it dirty since we also free the swap
1944c5bf121eSVineeth Remanan Pillai * entry since a page cannot live in both the swap and page cache.
1945c5bf121eSVineeth Remanan Pillai *
1946c949b097SAxel Rasmussen * vma, vmf, and fault_type are only supplied by shmem_fault:
1947c5bf121eSVineeth Remanan Pillai * otherwise they are NULL.
1948c5bf121eSVineeth Remanan Pillai */
shmem_get_folio_gfp(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,struct vm_fault * vmf,vm_fault_t * fault_type)1949fc26babbSMatthew Wilcox (Oracle) static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1950fc26babbSMatthew Wilcox (Oracle) struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1951c5bf121eSVineeth Remanan Pillai struct vm_area_struct *vma, struct vm_fault *vmf,
1952c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type)
1953c5bf121eSVineeth Remanan Pillai {
1954c5bf121eSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping;
1955c5bf121eSVineeth Remanan Pillai struct shmem_inode_info *info = SHMEM_I(inode);
1956c5bf121eSVineeth Remanan Pillai struct shmem_sb_info *sbinfo;
1957c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm;
1958b7dd44a1SMatthew Wilcox (Oracle) struct folio *folio;
19596fe7d712SLukas Bulwahn pgoff_t hindex;
1960164cc4feSRik van Riel gfp_t huge_gfp;
1961c5bf121eSVineeth Remanan Pillai int error;
1962c5bf121eSVineeth Remanan Pillai int once = 0;
1963c5bf121eSVineeth Remanan Pillai int alloced = 0;
1964c5bf121eSVineeth Remanan Pillai
1965c5bf121eSVineeth Remanan Pillai if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1966c5bf121eSVineeth Remanan Pillai return -EFBIG;
1967c5bf121eSVineeth Remanan Pillai repeat:
1968c5bf121eSVineeth Remanan Pillai if (sgp <= SGP_CACHE &&
1969c5bf121eSVineeth Remanan Pillai ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1970c5bf121eSVineeth Remanan Pillai return -EINVAL;
1971c5bf121eSVineeth Remanan Pillai }
1972c5bf121eSVineeth Remanan Pillai
1973c5bf121eSVineeth Remanan Pillai sbinfo = SHMEM_SB(inode->i_sb);
197404f94e3fSDan Schatzberg charge_mm = vma ? vma->vm_mm : NULL;
1975c5bf121eSVineeth Remanan Pillai
1976aaeb94ebSChristoph Hellwig folio = filemap_get_entry(mapping, index);
1977b1d0ec3aSMatthew Wilcox (Oracle) if (folio && vma && userfaultfd_minor(vma)) {
1978aaeb94ebSChristoph Hellwig if (!xa_is_value(folio))
1979b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio);
1980c949b097SAxel Rasmussen *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1981c949b097SAxel Rasmussen return 0;
1982c949b097SAxel Rasmussen }
1983c949b097SAxel Rasmussen
1984b1d0ec3aSMatthew Wilcox (Oracle) if (xa_is_value(folio)) {
1985da08e9b7SMatthew Wilcox (Oracle) error = shmem_swapin_folio(inode, index, &folio,
1986c5bf121eSVineeth Remanan Pillai sgp, gfp, vma, fault_type);
1987c5bf121eSVineeth Remanan Pillai if (error == -EEXIST)
1988c5bf121eSVineeth Remanan Pillai goto repeat;
1989c5bf121eSVineeth Remanan Pillai
1990fc26babbSMatthew Wilcox (Oracle) *foliop = folio;
1991c5bf121eSVineeth Remanan Pillai return error;
1992c5bf121eSVineeth Remanan Pillai }
1993c5bf121eSVineeth Remanan Pillai
1994b1d0ec3aSMatthew Wilcox (Oracle) if (folio) {
1995aaeb94ebSChristoph Hellwig folio_lock(folio);
1996aaeb94ebSChristoph Hellwig
1997aaeb94ebSChristoph Hellwig /* Has the folio been truncated or swapped out? */
1998aaeb94ebSChristoph Hellwig if (unlikely(folio->mapping != mapping)) {
1999aaeb94ebSChristoph Hellwig folio_unlock(folio);
2000aaeb94ebSChristoph Hellwig folio_put(folio);
2001aaeb94ebSChristoph Hellwig goto repeat;
2002aaeb94ebSChristoph Hellwig }
2003acdd9f8eSHugh Dickins if (sgp == SGP_WRITE)
2004b1d0ec3aSMatthew Wilcox (Oracle) folio_mark_accessed(folio);
2005b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio))
2006acdd9f8eSHugh Dickins goto out;
2007fc26babbSMatthew Wilcox (Oracle) /* fallocated folio */
2008c5bf121eSVineeth Remanan Pillai if (sgp != SGP_READ)
2009c5bf121eSVineeth Remanan Pillai goto clear;
2010b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio);
2011b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio);
2012c5bf121eSVineeth Remanan Pillai }
2013c5bf121eSVineeth Remanan Pillai
2014c5bf121eSVineeth Remanan Pillai /*
2015fc26babbSMatthew Wilcox (Oracle) * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2016fc26babbSMatthew Wilcox (Oracle) * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2017acdd9f8eSHugh Dickins */
2018fc26babbSMatthew Wilcox (Oracle) *foliop = NULL;
2019acdd9f8eSHugh Dickins if (sgp == SGP_READ)
2020acdd9f8eSHugh Dickins return 0;
2021acdd9f8eSHugh Dickins if (sgp == SGP_NOALLOC)
2022acdd9f8eSHugh Dickins return -ENOENT;
2023acdd9f8eSHugh Dickins
2024acdd9f8eSHugh Dickins /*
2025acdd9f8eSHugh Dickins * Fast cache lookup and swap lookup did not find it: allocate.
2026c5bf121eSVineeth Remanan Pillai */
2027c5bf121eSVineeth Remanan Pillai
2028cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) {
2029cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2030cfda0526SMike Rapoport return 0;
2031cfda0526SMike Rapoport }
2032cfda0526SMike Rapoport
20332cf13384SDavid Stevens if (!shmem_is_huge(inode, index, false,
20342cf13384SDavid Stevens vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0))
2035800d8c63SKirill A. Shutemov goto alloc_nohuge;
203627d80fa2SKees Cook
2037164cc4feSRik van Riel huge_gfp = vma_thp_gfp_mask(vma);
203878cc8cdcSRik van Riel huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2039b1d0ec3aSMatthew Wilcox (Oracle) folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
2040b1d0ec3aSMatthew Wilcox (Oracle) if (IS_ERR(folio)) {
2041c5bf121eSVineeth Remanan Pillai alloc_nohuge:
2042b1d0ec3aSMatthew Wilcox (Oracle) folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
204354af6042SHugh Dickins }
2044b1d0ec3aSMatthew Wilcox (Oracle) if (IS_ERR(folio)) {
2045779750d2SKirill A. Shutemov int retry = 5;
2046c5bf121eSVineeth Remanan Pillai
2047b1d0ec3aSMatthew Wilcox (Oracle) error = PTR_ERR(folio);
2048b1d0ec3aSMatthew Wilcox (Oracle) folio = NULL;
2049779750d2SKirill A. Shutemov if (error != -ENOSPC)
2050c5bf121eSVineeth Remanan Pillai goto unlock;
2051779750d2SKirill A. Shutemov /*
2052fc26babbSMatthew Wilcox (Oracle) * Try to reclaim some space by splitting a large folio
2053779750d2SKirill A. Shutemov * beyond i_size on the filesystem.
2054779750d2SKirill A. Shutemov */
2055779750d2SKirill A. Shutemov while (retry--) {
2056779750d2SKirill A. Shutemov int ret;
2057c5bf121eSVineeth Remanan Pillai
2058779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
2059779750d2SKirill A. Shutemov if (ret == SHRINK_STOP)
2060779750d2SKirill A. Shutemov break;
2061779750d2SKirill A. Shutemov if (ret)
2062779750d2SKirill A. Shutemov goto alloc_nohuge;
2063779750d2SKirill A. Shutemov }
2064c5bf121eSVineeth Remanan Pillai goto unlock;
2065800d8c63SKirill A. Shutemov }
2066800d8c63SKirill A. Shutemov
2067b1d0ec3aSMatthew Wilcox (Oracle) hindex = round_down(index, folio_nr_pages(folio));
2068800d8c63SKirill A. Shutemov
206966d2f4d2SHugh Dickins if (sgp == SGP_WRITE)
2070b1d0ec3aSMatthew Wilcox (Oracle) __folio_set_referenced(folio);
207166d2f4d2SHugh Dickins
2072b7dd44a1SMatthew Wilcox (Oracle) error = shmem_add_to_page_cache(folio, mapping, hindex,
20733fea5a49SJohannes Weiner NULL, gfp & GFP_RECLAIM_MASK,
20743fea5a49SJohannes Weiner charge_mm);
20753fea5a49SJohannes Weiner if (error)
2076800d8c63SKirill A. Shutemov goto unacct;
207754af6042SHugh Dickins
20783c1b7528SHugh Dickins folio_add_lru(folio);
20793c1b7528SHugh Dickins shmem_recalc_inode(inode, folio_nr_pages(folio), 0);
20801635f6a7SHugh Dickins alloced = true;
208154af6042SHugh Dickins
2082b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_pmd_mappable(folio) &&
2083779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2084fc26babbSMatthew Wilcox (Oracle) folio_next_index(folio) - 1) {
2085779750d2SKirill A. Shutemov /*
2086fc26babbSMatthew Wilcox (Oracle) * Part of the large folio is beyond i_size: subject
2087779750d2SKirill A. Shutemov * to shrink under memory pressure.
2088779750d2SKirill A. Shutemov */
2089779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock);
2090d041353dSCong Wang /*
2091d041353dSCong Wang * _careful to defend against unlocked access to
2092d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink()
2093d041353dSCong Wang */
2094d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) {
2095779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist,
2096779750d2SKirill A. Shutemov &sbinfo->shrinklist);
2097779750d2SKirill A. Shutemov sbinfo->shrinklist_len++;
2098779750d2SKirill A. Shutemov }
2099779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock);
2100779750d2SKirill A. Shutemov }
2101779750d2SKirill A. Shutemov
2102ec9516fbSHugh Dickins /*
2103fc26babbSMatthew Wilcox (Oracle) * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
21041635f6a7SHugh Dickins */
21051635f6a7SHugh Dickins if (sgp == SGP_FALLOC)
21061635f6a7SHugh Dickins sgp = SGP_WRITE;
21071635f6a7SHugh Dickins clear:
21081635f6a7SHugh Dickins /*
2109fc26babbSMatthew Wilcox (Oracle) * Let SGP_WRITE caller clear ends if write does not fill folio;
2110fc26babbSMatthew Wilcox (Oracle) * but SGP_FALLOC on a folio fallocated earlier must initialize
21111635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee.
2112ec9516fbSHugh Dickins */
2113b1d0ec3aSMatthew Wilcox (Oracle) if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2114b1d0ec3aSMatthew Wilcox (Oracle) long i, n = folio_nr_pages(folio);
2115800d8c63SKirill A. Shutemov
2116b1d0ec3aSMatthew Wilcox (Oracle) for (i = 0; i < n; i++)
2117b1d0ec3aSMatthew Wilcox (Oracle) clear_highpage(folio_page(folio, i));
2118b1d0ec3aSMatthew Wilcox (Oracle) flush_dcache_folio(folio);
2119b1d0ec3aSMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
2120ec9516fbSHugh Dickins }
2121bde05d1cSHugh Dickins
212254af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */
212375edd345SHugh Dickins if (sgp <= SGP_CACHE &&
212409cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2125267a4c76SHugh Dickins if (alloced) {
2126b1d0ec3aSMatthew Wilcox (Oracle) folio_clear_dirty(folio);
2127b1d0ec3aSMatthew Wilcox (Oracle) filemap_remove_folio(folio);
21283c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 0);
2129267a4c76SHugh Dickins }
213054af6042SHugh Dickins error = -EINVAL;
2131267a4c76SHugh Dickins goto unlock;
2132ff36b801SShaohua Li }
213363ec1973SMatthew Wilcox (Oracle) out:
2134fc26babbSMatthew Wilcox (Oracle) *foliop = folio;
213554af6042SHugh Dickins return 0;
2136d00806b1SNick Piggin
2137d0217ac0SNick Piggin /*
213854af6042SHugh Dickins * Error recovery.
21391da177e4SLinus Torvalds */
214054af6042SHugh Dickins unacct:
2141b1d0ec3aSMatthew Wilcox (Oracle) shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
2142800d8c63SKirill A. Shutemov
2143b1d0ec3aSMatthew Wilcox (Oracle) if (folio_test_large(folio)) {
2144b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio);
2145b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio);
2146800d8c63SKirill A. Shutemov goto alloc_nohuge;
2147800d8c63SKirill A. Shutemov }
2148d1899228SHugh Dickins unlock:
2149b1d0ec3aSMatthew Wilcox (Oracle) if (folio) {
2150b1d0ec3aSMatthew Wilcox (Oracle) folio_unlock(folio);
2151b1d0ec3aSMatthew Wilcox (Oracle) folio_put(folio);
215254af6042SHugh Dickins }
215354af6042SHugh Dickins if (error == -ENOSPC && !once++) {
21543c1b7528SHugh Dickins shmem_recalc_inode(inode, 0, 0);
21551da177e4SLinus Torvalds goto repeat;
2156d8dc74f2SAdrian Bunk }
21577f4446eeSMatthew Wilcox if (error == -EEXIST)
215854af6042SHugh Dickins goto repeat;
215954af6042SHugh Dickins return error;
21601da177e4SLinus Torvalds }
21611da177e4SLinus Torvalds
shmem_get_folio(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp)21624e1fc793SMatthew Wilcox (Oracle) int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
21634e1fc793SMatthew Wilcox (Oracle) enum sgp_type sgp)
21644e1fc793SMatthew Wilcox (Oracle) {
21654e1fc793SMatthew Wilcox (Oracle) return shmem_get_folio_gfp(inode, index, foliop, sgp,
21664e1fc793SMatthew Wilcox (Oracle) mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
21674e1fc793SMatthew Wilcox (Oracle) }
21684e1fc793SMatthew Wilcox (Oracle)
216910d20bd2SLinus Torvalds /*
217010d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue
217110d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the
217210d20bd2SLinus Torvalds * target.
217310d20bd2SLinus Torvalds */
synchronous_wake_function(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)2174ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
217510d20bd2SLinus Torvalds {
217610d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key);
21772055da97SIngo Molnar list_del_init(&wait->entry);
217810d20bd2SLinus Torvalds return ret;
217910d20bd2SLinus Torvalds }
218010d20bd2SLinus Torvalds
shmem_fault(struct vm_fault * vmf)218120acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
21821da177e4SLinus Torvalds {
218311bac800SDave Jiang struct vm_area_struct *vma = vmf->vma;
2184496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file);
21859e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
218668a54100SMatthew Wilcox (Oracle) struct folio *folio = NULL;
218720acce67SSouptick Joarder int err;
218820acce67SSouptick Joarder vm_fault_t ret = VM_FAULT_LOCKED;
21891da177e4SLinus Torvalds
2190f00cdc6dSHugh Dickins /*
2191f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can
2192f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn
21939608703eSJan Kara * locks writers out with its hold on i_rwsem. So refrain from
21948e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although
21958e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to
21968e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call,
21978e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added.
21988e205f77SHugh Dickins *
21998e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the
22008e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch:
22018e205f77SHugh Dickins * we just need to make racing faults a rare case.
22028e205f77SHugh Dickins *
22038e205f77SHugh Dickins * The implementation below would be much simpler if we just used a
22049608703eSJan Kara * standard mutex or completion: but we cannot take i_rwsem in fault,
22058e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad.
2206f00cdc6dSHugh Dickins */
2207f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) {
2208f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc;
2209f00cdc6dSHugh Dickins
2210f00cdc6dSHugh Dickins spin_lock(&inode->i_lock);
2211f00cdc6dSHugh Dickins shmem_falloc = inode->i_private;
22128e205f77SHugh Dickins if (shmem_falloc &&
22138e205f77SHugh Dickins shmem_falloc->waitq &&
22148e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start &&
22158e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) {
22168897c1b1SKirill A. Shutemov struct file *fpin;
22178e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq;
221810d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
22198e205f77SHugh Dickins
22208e205f77SHugh Dickins ret = VM_FAULT_NOPAGE;
22218897c1b1SKirill A. Shutemov fpin = maybe_unlock_mmap_for_io(vmf, NULL);
22228897c1b1SKirill A. Shutemov if (fpin)
22238e205f77SHugh Dickins ret = VM_FAULT_RETRY;
22248e205f77SHugh Dickins
22258e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq;
22268e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
22278e205f77SHugh Dickins TASK_UNINTERRUPTIBLE);
22288e205f77SHugh Dickins spin_unlock(&inode->i_lock);
22298e205f77SHugh Dickins schedule();
22308e205f77SHugh Dickins
22318e205f77SHugh Dickins /*
22328e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate()
22338e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq
22348e205f77SHugh Dickins * is usually invalid by the time we reach here, but
22358e205f77SHugh Dickins * finish_wait() does not dereference it in that case;
22368e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all().
22378e205f77SHugh Dickins */
22388e205f77SHugh Dickins spin_lock(&inode->i_lock);
22398e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
22408e205f77SHugh Dickins spin_unlock(&inode->i_lock);
22418897c1b1SKirill A. Shutemov
22428897c1b1SKirill A. Shutemov if (fpin)
22438897c1b1SKirill A. Shutemov fput(fpin);
22448e205f77SHugh Dickins return ret;
2245f00cdc6dSHugh Dickins }
22468e205f77SHugh Dickins spin_unlock(&inode->i_lock);
2247f00cdc6dSHugh Dickins }
2248f00cdc6dSHugh Dickins
224968a54100SMatthew Wilcox (Oracle) err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2250cfda0526SMike Rapoport gfp, vma, vmf, &ret);
225120acce67SSouptick Joarder if (err)
225220acce67SSouptick Joarder return vmf_error(err);
225368a54100SMatthew Wilcox (Oracle) if (folio)
225468a54100SMatthew Wilcox (Oracle) vmf->page = folio_file_page(folio, vmf->pgoff);
225568da9f05SHugh Dickins return ret;
22561da177e4SLinus Torvalds }
22571da177e4SLinus Torvalds
shmem_get_unmapped_area(struct file * file,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)2258c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2259c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len,
2260c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags)
2261c01d5b30SHugh Dickins {
2262c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *,
2263c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long);
2264c01d5b30SHugh Dickins unsigned long addr;
2265c01d5b30SHugh Dickins unsigned long offset;
2266c01d5b30SHugh Dickins unsigned long inflated_len;
2267c01d5b30SHugh Dickins unsigned long inflated_addr;
2268c01d5b30SHugh Dickins unsigned long inflated_offset;
2269c01d5b30SHugh Dickins
2270c01d5b30SHugh Dickins if (len > TASK_SIZE)
2271c01d5b30SHugh Dickins return -ENOMEM;
2272c01d5b30SHugh Dickins
2273c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area;
2274c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags);
2275c01d5b30SHugh Dickins
2276396bcc52SMatthew Wilcox (Oracle) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2277c01d5b30SHugh Dickins return addr;
2278c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr))
2279c01d5b30SHugh Dickins return addr;
2280c01d5b30SHugh Dickins if (addr & ~PAGE_MASK)
2281c01d5b30SHugh Dickins return addr;
2282c01d5b30SHugh Dickins if (addr > TASK_SIZE - len)
2283c01d5b30SHugh Dickins return addr;
2284c01d5b30SHugh Dickins
2285c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY)
2286c01d5b30SHugh Dickins return addr;
2287c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE)
2288c01d5b30SHugh Dickins return addr;
2289c01d5b30SHugh Dickins if (flags & MAP_FIXED)
2290c01d5b30SHugh Dickins return addr;
2291c01d5b30SHugh Dickins /*
2292c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely;
2293c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed.
229499158997SKirill A. Shutemov * But if caller specified an address hint and we allocated area there
229599158997SKirill A. Shutemov * successfully, respect that as before.
2296c01d5b30SHugh Dickins */
229799158997SKirill A. Shutemov if (uaddr == addr)
2298c01d5b30SHugh Dickins return addr;
2299c01d5b30SHugh Dickins
2300c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) {
2301c01d5b30SHugh Dickins struct super_block *sb;
2302c01d5b30SHugh Dickins
2303c01d5b30SHugh Dickins if (file) {
2304c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations);
2305c01d5b30SHugh Dickins sb = file_inode(file)->i_sb;
2306c01d5b30SHugh Dickins } else {
2307c01d5b30SHugh Dickins /*
2308c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c
2309c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object.
2310c01d5b30SHugh Dickins */
2311c01d5b30SHugh Dickins if (IS_ERR(shm_mnt))
2312c01d5b30SHugh Dickins return addr;
2313c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb;
2314c01d5b30SHugh Dickins }
23153089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2316c01d5b30SHugh Dickins return addr;
2317c01d5b30SHugh Dickins }
2318c01d5b30SHugh Dickins
2319c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2320c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2321c01d5b30SHugh Dickins return addr;
2322c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2323c01d5b30SHugh Dickins return addr;
2324c01d5b30SHugh Dickins
2325c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2326c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE)
2327c01d5b30SHugh Dickins return addr;
2328c01d5b30SHugh Dickins if (inflated_len < len)
2329c01d5b30SHugh Dickins return addr;
2330c01d5b30SHugh Dickins
233199158997SKirill A. Shutemov inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2332c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr))
2333c01d5b30SHugh Dickins return addr;
2334c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK)
2335c01d5b30SHugh Dickins return addr;
2336c01d5b30SHugh Dickins
2337c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2338c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset;
2339c01d5b30SHugh Dickins if (inflated_offset > offset)
2340c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE;
2341c01d5b30SHugh Dickins
2342c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len)
2343c01d5b30SHugh Dickins return addr;
2344c01d5b30SHugh Dickins return inflated_addr;
2345c01d5b30SHugh Dickins }
2346c01d5b30SHugh Dickins
23471da177e4SLinus Torvalds #ifdef CONFIG_NUMA
shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol)234841ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
23491da177e4SLinus Torvalds {
2350496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file);
235141ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
23521da177e4SLinus Torvalds }
23531da177e4SLinus Torvalds
shmem_get_policy(struct vm_area_struct * vma,unsigned long addr)2354d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2355d8dc74f2SAdrian Bunk unsigned long addr)
23561da177e4SLinus Torvalds {
2357496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file);
235841ffe5d5SHugh Dickins pgoff_t index;
23591da177e4SLinus Torvalds
236041ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
236141ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
23621da177e4SLinus Torvalds }
23631da177e4SLinus Torvalds #endif
23641da177e4SLinus Torvalds
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)2365d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
23661da177e4SLinus Torvalds {
2367496ad9aaSAl Viro struct inode *inode = file_inode(file);
23681da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode);
23691da177e4SLinus Torvalds int retval = -ENOMEM;
23701da177e4SLinus Torvalds
2371ea0dfeb4SHugh Dickins /*
2372ea0dfeb4SHugh Dickins * What serializes the accesses to info->flags?
2373ea0dfeb4SHugh Dickins * ipc_lock_object() when called from shmctl_do_lock(),
2374ea0dfeb4SHugh Dickins * no serialization needed when called from shm_destroy().
2375ea0dfeb4SHugh Dickins */
23761da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) {
2377d7c9e99aSAlexey Gladkov if (!user_shm_lock(inode->i_size, ucounts))
23781da177e4SLinus Torvalds goto out_nomem;
23791da177e4SLinus Torvalds info->flags |= VM_LOCKED;
238089e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping);
23811da177e4SLinus Torvalds }
2382d7c9e99aSAlexey Gladkov if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2383d7c9e99aSAlexey Gladkov user_shm_unlock(inode->i_size, ucounts);
23841da177e4SLinus Torvalds info->flags &= ~VM_LOCKED;
238589e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping);
23861da177e4SLinus Torvalds }
23871da177e4SLinus Torvalds retval = 0;
238889e004eaSLee Schermerhorn
23891da177e4SLinus Torvalds out_nomem:
23901da177e4SLinus Torvalds return retval;
23911da177e4SLinus Torvalds }
23921da177e4SLinus Torvalds
shmem_mmap(struct file * file,struct vm_area_struct * vma)23939b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
23941da177e4SLinus Torvalds {
2395d09e8ca6SPasha Tatashin struct inode *inode = file_inode(file);
2396d09e8ca6SPasha Tatashin struct shmem_inode_info *info = SHMEM_I(inode);
239722247efdSPeter Xu int ret;
2398ab3948f5SJoel Fernandes (Google)
239922247efdSPeter Xu ret = seal_check_future_write(info->seals, vma);
240022247efdSPeter Xu if (ret)
240122247efdSPeter Xu return ret;
2402ab3948f5SJoel Fernandes (Google)
24031da177e4SLinus Torvalds file_accessed(file);
2404d09e8ca6SPasha Tatashin /* This is anonymous shared memory if it is unlinked at the time of mmap */
2405d09e8ca6SPasha Tatashin if (inode->i_nlink)
24061da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops;
2407d09e8ca6SPasha Tatashin else
2408d09e8ca6SPasha Tatashin vma->vm_ops = &shmem_anon_vm_ops;
24091da177e4SLinus Torvalds return 0;
24101da177e4SLinus Torvalds }
24111da177e4SLinus Torvalds
shmem_file_open(struct inode * inode,struct file * file)2412e88e0d36SHugh Dickins static int shmem_file_open(struct inode *inode, struct file *file)
2413e88e0d36SHugh Dickins {
2414e88e0d36SHugh Dickins file->f_mode |= FMODE_CAN_ODIRECT;
2415e88e0d36SHugh Dickins return generic_file_open(inode, file);
2416e88e0d36SHugh Dickins }
2417e88e0d36SHugh Dickins
2418cb241339SHugh Dickins #ifdef CONFIG_TMPFS_XATTR
2419cb241339SHugh Dickins static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2420cb241339SHugh Dickins
2421cb241339SHugh Dickins /*
2422cb241339SHugh Dickins * chattr's fsflags are unrelated to extended attributes,
2423cb241339SHugh Dickins * but tmpfs has chosen to enable them under the same config option.
2424cb241339SHugh Dickins */
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags)2425cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2426e408e695STheodore Ts'o {
2427cb241339SHugh Dickins unsigned int i_flags = 0;
2428cb241339SHugh Dickins
2429cb241339SHugh Dickins if (fsflags & FS_NOATIME_FL)
2430cb241339SHugh Dickins i_flags |= S_NOATIME;
2431cb241339SHugh Dickins if (fsflags & FS_APPEND_FL)
2432cb241339SHugh Dickins i_flags |= S_APPEND;
2433cb241339SHugh Dickins if (fsflags & FS_IMMUTABLE_FL)
2434cb241339SHugh Dickins i_flags |= S_IMMUTABLE;
2435cb241339SHugh Dickins /*
2436cb241339SHugh Dickins * But FS_NODUMP_FL does not require any action in i_flags.
2437cb241339SHugh Dickins */
2438cb241339SHugh Dickins inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2439e408e695STheodore Ts'o }
2440cb241339SHugh Dickins #else
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags)2441cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2442cb241339SHugh Dickins {
2443cb241339SHugh Dickins }
2444cb241339SHugh Dickins #define shmem_initxattrs NULL
2445cb241339SHugh Dickins #endif
2446e408e695STheodore Ts'o
shmem_get_offset_ctx(struct inode * inode)2447a2e45955SChuck Lever static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2448a2e45955SChuck Lever {
2449a2e45955SChuck Lever return &SHMEM_I(inode)->dir_offsets;
2450a2e45955SChuck Lever }
2451a2e45955SChuck Lever
__shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2452e09764cfSCarlos Maiolino static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2453e09764cfSCarlos Maiolino struct super_block *sb,
2454e09764cfSCarlos Maiolino struct inode *dir, umode_t mode,
2455e09764cfSCarlos Maiolino dev_t dev, unsigned long flags)
24561da177e4SLinus Torvalds {
24571da177e4SLinus Torvalds struct inode *inode;
24581da177e4SLinus Torvalds struct shmem_inode_info *info;
24591da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2460e809d5f0SChris Down ino_t ino;
246171480663SCarlos Maiolino int err;
24621da177e4SLinus Torvalds
246371480663SCarlos Maiolino err = shmem_reserve_inode(sb, &ino);
246471480663SCarlos Maiolino if (err)
246571480663SCarlos Maiolino return ERR_PTR(err);
246671480663SCarlos Maiolino
24671da177e4SLinus Torvalds
24681da177e4SLinus Torvalds inode = new_inode(sb);
246971480663SCarlos Maiolino if (!inode) {
24702daf18a7SHugh Dickins shmem_free_inode(sb, 0);
247171480663SCarlos Maiolino return ERR_PTR(-ENOSPC);
247271480663SCarlos Maiolino }
247371480663SCarlos Maiolino
2474e809d5f0SChris Down inode->i_ino = ino;
24757a80e5b8SGiuseppe Scrivano inode_init_owner(idmap, inode, dir, mode);
24761da177e4SLinus Torvalds inode->i_blocks = 0;
247765287334SJeff Layton inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
2478a251c17aSJason A. Donenfeld inode->i_generation = get_random_u32();
24791da177e4SLinus Torvalds info = SHMEM_I(inode);
24801da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info);
24811da177e4SLinus Torvalds spin_lock_init(&info->lock);
2482af53d3e9SHugh Dickins atomic_set(&info->stop_eviction, 0);
248340e041a2SDavid Herrmann info->seals = F_SEAL_SEAL;
24840b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE;
2485f7cd16a5SXavier Roche info->i_crtime = inode->i_mtime;
2486e408e695STheodore Ts'o info->fsflags = (dir == NULL) ? 0 :
2487e408e695STheodore Ts'o SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2488cb241339SHugh Dickins if (info->fsflags)
2489cb241339SHugh Dickins shmem_set_inode_flags(inode, info->fsflags);
2490779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist);
24911da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist);
249271480663SCarlos Maiolino INIT_LIST_HEAD(&info->swaplist);
24932c6efe9cSLuis Chamberlain if (sbinfo->noswap)
24942c6efe9cSLuis Chamberlain mapping_set_unevictable(inode->i_mapping);
249538f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs);
249672c04902SAl Viro cache_no_acl(inode);
2497ff36da69SMatthew Wilcox (Oracle) mapping_set_large_folios(inode->i_mapping);
24981da177e4SLinus Torvalds
24991da177e4SLinus Torvalds switch (mode & S_IFMT) {
25001da177e4SLinus Torvalds default:
250139f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations;
25021da177e4SLinus Torvalds init_special_inode(inode, mode, dev);
25031da177e4SLinus Torvalds break;
25041da177e4SLinus Torvalds case S_IFREG:
250514fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops;
25061da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations;
25071da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations;
250871fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy,
250971fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo));
25101da177e4SLinus Torvalds break;
25111da177e4SLinus Torvalds case S_IFDIR:
2512d8c76e6fSDave Hansen inc_nlink(inode);
25131da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */
25141da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE;
25151da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations;
2516a2e45955SChuck Lever inode->i_fop = &simple_offset_dir_operations;
2517a2e45955SChuck Lever simple_offset_init(shmem_get_offset_ctx(inode));
25181da177e4SLinus Torvalds break;
25191da177e4SLinus Torvalds case S_IFLNK:
25201da177e4SLinus Torvalds /*
25211da177e4SLinus Torvalds * Must not load anything in the rbtree,
25221da177e4SLinus Torvalds * mpol_free_shared_policy will not be called.
25231da177e4SLinus Torvalds */
252471fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL);
25251da177e4SLinus Torvalds break;
25261da177e4SLinus Torvalds }
2527b45d71fbSJoel Fernandes (Google)
2528b45d71fbSJoel Fernandes (Google) lockdep_annotate_inode_mutex_key(inode);
25291da177e4SLinus Torvalds return inode;
25301da177e4SLinus Torvalds }
25311da177e4SLinus Torvalds
2532e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2533e09764cfSCarlos Maiolino static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2534e09764cfSCarlos Maiolino struct super_block *sb, struct inode *dir,
2535e09764cfSCarlos Maiolino umode_t mode, dev_t dev, unsigned long flags)
2536e09764cfSCarlos Maiolino {
2537e09764cfSCarlos Maiolino int err;
2538e09764cfSCarlos Maiolino struct inode *inode;
2539e09764cfSCarlos Maiolino
2540e09764cfSCarlos Maiolino inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2541e09764cfSCarlos Maiolino if (IS_ERR(inode))
2542e09764cfSCarlos Maiolino return inode;
2543e09764cfSCarlos Maiolino
2544e09764cfSCarlos Maiolino err = dquot_initialize(inode);
2545e09764cfSCarlos Maiolino if (err)
2546e09764cfSCarlos Maiolino goto errout;
2547e09764cfSCarlos Maiolino
2548e09764cfSCarlos Maiolino err = dquot_alloc_inode(inode);
2549e09764cfSCarlos Maiolino if (err) {
2550e09764cfSCarlos Maiolino dquot_drop(inode);
2551e09764cfSCarlos Maiolino goto errout;
2552e09764cfSCarlos Maiolino }
2553e09764cfSCarlos Maiolino return inode;
2554e09764cfSCarlos Maiolino
2555e09764cfSCarlos Maiolino errout:
2556e09764cfSCarlos Maiolino inode->i_flags |= S_NOQUOTA;
2557e09764cfSCarlos Maiolino iput(inode);
2558e09764cfSCarlos Maiolino return ERR_PTR(err);
2559e09764cfSCarlos Maiolino }
2560e09764cfSCarlos Maiolino #else
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2561e09764cfSCarlos Maiolino static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2562e09764cfSCarlos Maiolino struct super_block *sb, struct inode *dir,
2563e09764cfSCarlos Maiolino umode_t mode, dev_t dev, unsigned long flags)
2564e09764cfSCarlos Maiolino {
2565e09764cfSCarlos Maiolino return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2566e09764cfSCarlos Maiolino }
2567e09764cfSCarlos Maiolino #endif /* CONFIG_TMPFS_QUOTA */
2568e09764cfSCarlos Maiolino
25693460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
shmem_mfill_atomic_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)257061c50040SAxel Rasmussen int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
25714c27fe4cSMike Rapoport struct vm_area_struct *dst_vma,
25724c27fe4cSMike Rapoport unsigned long dst_addr,
25734c27fe4cSMike Rapoport unsigned long src_addr,
2574d9712937SAxel Rasmussen uffd_flags_t flags,
2575d7be6d7eSZhangPeng struct folio **foliop)
25764c27fe4cSMike Rapoport {
25774c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file);
25784c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode);
25794c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping;
25804c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping);
25814c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
25824c27fe4cSMike Rapoport void *page_kaddr;
2583b7dd44a1SMatthew Wilcox (Oracle) struct folio *folio;
25844c27fe4cSMike Rapoport int ret;
25853460f6e5SAxel Rasmussen pgoff_t max_off;
25864c27fe4cSMike Rapoport
2587c7e263abSLukas Czerner if (shmem_inode_acct_block(inode, 1)) {
25887ed9d238SAxel Rasmussen /*
25897ed9d238SAxel Rasmussen * We may have got a page, returned -ENOENT triggering a retry,
25907ed9d238SAxel Rasmussen * and now we find ourselves with -ENOMEM. Release the page, to
25917ed9d238SAxel Rasmussen * avoid a BUG_ON in our caller.
25927ed9d238SAxel Rasmussen */
2593d7be6d7eSZhangPeng if (unlikely(*foliop)) {
2594d7be6d7eSZhangPeng folio_put(*foliop);
2595d7be6d7eSZhangPeng *foliop = NULL;
25967ed9d238SAxel Rasmussen }
25977d64ae3aSAxel Rasmussen return -ENOMEM;
25987ed9d238SAxel Rasmussen }
25994c27fe4cSMike Rapoport
2600d7be6d7eSZhangPeng if (!*foliop) {
26017d64ae3aSAxel Rasmussen ret = -ENOMEM;
26027a7256d5SMatthew Wilcox (Oracle) folio = shmem_alloc_folio(gfp, info, pgoff);
26037a7256d5SMatthew Wilcox (Oracle) if (!folio)
26040f079694SMike Rapoport goto out_unacct_blocks;
26054c27fe4cSMike Rapoport
2606d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
26077a7256d5SMatthew Wilcox (Oracle) page_kaddr = kmap_local_folio(folio, 0);
26085dc21f0cSIra Weiny /*
26095dc21f0cSIra Weiny * The read mmap_lock is held here. Despite the
26105dc21f0cSIra Weiny * mmap_lock being read recursive a deadlock is still
26115dc21f0cSIra Weiny * possible if a writer has taken a lock. For example:
26125dc21f0cSIra Weiny *
26135dc21f0cSIra Weiny * process A thread 1 takes read lock on own mmap_lock
26145dc21f0cSIra Weiny * process A thread 2 calls mmap, blocks taking write lock
26155dc21f0cSIra Weiny * process B thread 1 takes page fault, read lock on own mmap lock
26165dc21f0cSIra Weiny * process B thread 2 calls mmap, blocks taking write lock
26175dc21f0cSIra Weiny * process A thread 1 blocks taking read lock on process B
26185dc21f0cSIra Weiny * process B thread 1 blocks taking read lock on process A
26195dc21f0cSIra Weiny *
26205dc21f0cSIra Weiny * Disable page faults to prevent potential deadlock
26215dc21f0cSIra Weiny * and retry the copy outside the mmap_lock.
26225dc21f0cSIra Weiny */
26235dc21f0cSIra Weiny pagefault_disable();
26248d103963SMike Rapoport ret = copy_from_user(page_kaddr,
26258d103963SMike Rapoport (const void __user *)src_addr,
26264c27fe4cSMike Rapoport PAGE_SIZE);
26275dc21f0cSIra Weiny pagefault_enable();
26287a7256d5SMatthew Wilcox (Oracle) kunmap_local(page_kaddr);
26294c27fe4cSMike Rapoport
2630c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */
26314c27fe4cSMike Rapoport if (unlikely(ret)) {
2632d7be6d7eSZhangPeng *foliop = folio;
26337d64ae3aSAxel Rasmussen ret = -ENOENT;
26344c27fe4cSMike Rapoport /* don't free the page */
26357d64ae3aSAxel Rasmussen goto out_unacct_blocks;
26364c27fe4cSMike Rapoport }
263719b482c2SMuchun Song
26387a7256d5SMatthew Wilcox (Oracle) flush_dcache_folio(folio);
26393460f6e5SAxel Rasmussen } else { /* ZEROPAGE */
26407a7256d5SMatthew Wilcox (Oracle) clear_user_highpage(&folio->page, dst_addr);
26418d103963SMike Rapoport }
26424c27fe4cSMike Rapoport } else {
2643d7be6d7eSZhangPeng folio = *foliop;
26447a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2645d7be6d7eSZhangPeng *foliop = NULL;
26464c27fe4cSMike Rapoport }
26474c27fe4cSMike Rapoport
26487a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON(folio_test_locked(folio));
26497a7256d5SMatthew Wilcox (Oracle) VM_BUG_ON(folio_test_swapbacked(folio));
26507a7256d5SMatthew Wilcox (Oracle) __folio_set_locked(folio);
26517a7256d5SMatthew Wilcox (Oracle) __folio_set_swapbacked(folio);
26527a7256d5SMatthew Wilcox (Oracle) __folio_mark_uptodate(folio);
26539cc90c66SAndrea Arcangeli
2654e2a50c1fSAndrea Arcangeli ret = -EFAULT;
2655e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
26563460f6e5SAxel Rasmussen if (unlikely(pgoff >= max_off))
2657e2a50c1fSAndrea Arcangeli goto out_release;
2658e2a50c1fSAndrea Arcangeli
2659b7dd44a1SMatthew Wilcox (Oracle) ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
266061c50040SAxel Rasmussen gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
26614c27fe4cSMike Rapoport if (ret)
26624c27fe4cSMike Rapoport goto out_release;
26634c27fe4cSMike Rapoport
266461c50040SAxel Rasmussen ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2665d9712937SAxel Rasmussen &folio->page, true, flags);
26667d64ae3aSAxel Rasmussen if (ret)
26677d64ae3aSAxel Rasmussen goto out_delete_from_cache;
26684c27fe4cSMike Rapoport
26693c1b7528SHugh Dickins shmem_recalc_inode(inode, 1, 0);
26707a7256d5SMatthew Wilcox (Oracle) folio_unlock(folio);
26717d64ae3aSAxel Rasmussen return 0;
26727d64ae3aSAxel Rasmussen out_delete_from_cache:
26737a7256d5SMatthew Wilcox (Oracle) filemap_remove_folio(folio);
26744c27fe4cSMike Rapoport out_release:
26757a7256d5SMatthew Wilcox (Oracle) folio_unlock(folio);
26767a7256d5SMatthew Wilcox (Oracle) folio_put(folio);
26774c27fe4cSMike Rapoport out_unacct_blocks:
26780f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1);
26797d64ae3aSAxel Rasmussen return ret;
26804c27fe4cSMike Rapoport }
26813460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
26828d103963SMike Rapoport
26831da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
268492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
268569f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
26861da177e4SLinus Torvalds
26871da177e4SLinus Torvalds static int
shmem_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)2688800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
26899d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len,
2690800d15a5SNick Piggin struct page **pagep, void **fsdata)
26911da177e4SLinus Torvalds {
2692800d15a5SNick Piggin struct inode *inode = mapping->host;
269340e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode);
269409cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT;
2695eff1f906SMatthew Wilcox (Oracle) struct folio *folio;
2696a7605426SYang Shi int ret = 0;
269740e041a2SDavid Herrmann
26989608703eSJan Kara /* i_rwsem is held by caller */
2699ab3948f5SJoel Fernandes (Google) if (unlikely(info->seals & (F_SEAL_GROW |
2700ab3948f5SJoel Fernandes (Google) F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2701ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
270240e041a2SDavid Herrmann return -EPERM;
270340e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
270440e041a2SDavid Herrmann return -EPERM;
270540e041a2SDavid Herrmann }
270640e041a2SDavid Herrmann
2707eff1f906SMatthew Wilcox (Oracle) ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2708a7605426SYang Shi
2709a7605426SYang Shi if (ret)
2710a7605426SYang Shi return ret;
2711a7605426SYang Shi
2712eff1f906SMatthew Wilcox (Oracle) *pagep = folio_file_page(folio, index);
2713a7605426SYang Shi if (PageHWPoison(*pagep)) {
2714eff1f906SMatthew Wilcox (Oracle) folio_unlock(folio);
2715eff1f906SMatthew Wilcox (Oracle) folio_put(folio);
2716a7605426SYang Shi *pagep = NULL;
2717a7605426SYang Shi return -EIO;
2718a7605426SYang Shi }
2719a7605426SYang Shi
2720a7605426SYang Shi return 0;
2721800d15a5SNick Piggin }
2722800d15a5SNick Piggin
2723800d15a5SNick Piggin static int
shmem_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2724800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2725800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied,
2726800d15a5SNick Piggin struct page *page, void *fsdata)
2727800d15a5SNick Piggin {
272869bbb87bSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
2729800d15a5SNick Piggin struct inode *inode = mapping->host;
2730800d15a5SNick Piggin
2731800d15a5SNick Piggin if (pos + copied > inode->i_size)
2732800d15a5SNick Piggin i_size_write(inode, pos + copied);
2733800d15a5SNick Piggin
273469bbb87bSMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) {
273569bbb87bSMatthew Wilcox (Oracle) if (copied < folio_size(folio)) {
273669bbb87bSMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos);
273769bbb87bSMatthew Wilcox (Oracle) folio_zero_segments(folio, 0, from,
273869bbb87bSMatthew Wilcox (Oracle) from + copied, folio_size(folio));
2739800d8c63SKirill A. Shutemov }
274069bbb87bSMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
2741800d8c63SKirill A. Shutemov }
274269bbb87bSMatthew Wilcox (Oracle) folio_mark_dirty(folio);
274369bbb87bSMatthew Wilcox (Oracle) folio_unlock(folio);
274469bbb87bSMatthew Wilcox (Oracle) folio_put(folio);
2745d3602444SHugh Dickins
2746800d15a5SNick Piggin return copied;
27471da177e4SLinus Torvalds }
27481da177e4SLinus Torvalds
shmem_file_read_iter(struct kiocb * iocb,struct iov_iter * to)27492ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
27501da177e4SLinus Torvalds {
27516e58e79dSAl Viro struct file *file = iocb->ki_filp;
27526e58e79dSAl Viro struct inode *inode = file_inode(file);
27531da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping;
275441ffe5d5SHugh Dickins pgoff_t index;
275541ffe5d5SHugh Dickins unsigned long offset;
2756f7c1d074SGeert Uytterhoeven int error = 0;
2757cb66a7a1SAl Viro ssize_t retval = 0;
27586e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos;
2759a0ee5ec5SHugh Dickins
276009cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT;
276109cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK;
27621da177e4SLinus Torvalds
27631da177e4SLinus Torvalds for (;;) {
27644601e2fcSMatthew Wilcox (Oracle) struct folio *folio = NULL;
27651da177e4SLinus Torvalds struct page *page = NULL;
276641ffe5d5SHugh Dickins pgoff_t end_index;
276741ffe5d5SHugh Dickins unsigned long nr, ret;
27681da177e4SLinus Torvalds loff_t i_size = i_size_read(inode);
27691da177e4SLinus Torvalds
277009cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT;
27711da177e4SLinus Torvalds if (index > end_index)
27721da177e4SLinus Torvalds break;
27731da177e4SLinus Torvalds if (index == end_index) {
277409cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK;
27751da177e4SLinus Torvalds if (nr <= offset)
27761da177e4SLinus Torvalds break;
27771da177e4SLinus Torvalds }
27781da177e4SLinus Torvalds
27794601e2fcSMatthew Wilcox (Oracle) error = shmem_get_folio(inode, index, &folio, SGP_READ);
27806e58e79dSAl Viro if (error) {
27816e58e79dSAl Viro if (error == -EINVAL)
27826e58e79dSAl Viro error = 0;
27831da177e4SLinus Torvalds break;
27841da177e4SLinus Torvalds }
27854601e2fcSMatthew Wilcox (Oracle) if (folio) {
27864601e2fcSMatthew Wilcox (Oracle) folio_unlock(folio);
2787a7605426SYang Shi
27884601e2fcSMatthew Wilcox (Oracle) page = folio_file_page(folio, index);
2789a7605426SYang Shi if (PageHWPoison(page)) {
27904601e2fcSMatthew Wilcox (Oracle) folio_put(folio);
2791a7605426SYang Shi error = -EIO;
2792a7605426SYang Shi break;
2793a7605426SYang Shi }
279475edd345SHugh Dickins }
27951da177e4SLinus Torvalds
27961da177e4SLinus Torvalds /*
27971da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes)
27989608703eSJan Kara * are called without i_rwsem protection against truncate
27991da177e4SLinus Torvalds */
280009cbfeafSKirill A. Shutemov nr = PAGE_SIZE;
28011da177e4SLinus Torvalds i_size = i_size_read(inode);
280209cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT;
28031da177e4SLinus Torvalds if (index == end_index) {
280409cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK;
28051da177e4SLinus Torvalds if (nr <= offset) {
28064601e2fcSMatthew Wilcox (Oracle) if (folio)
28074601e2fcSMatthew Wilcox (Oracle) folio_put(folio);
28081da177e4SLinus Torvalds break;
28091da177e4SLinus Torvalds }
28101da177e4SLinus Torvalds }
28111da177e4SLinus Torvalds nr -= offset;
28121da177e4SLinus Torvalds
28134601e2fcSMatthew Wilcox (Oracle) if (folio) {
28141da177e4SLinus Torvalds /*
28151da177e4SLinus Torvalds * If users can be writing to this page using arbitrary
28161da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing
28171da177e4SLinus Torvalds * before reading the page on the kernel side.
28181da177e4SLinus Torvalds */
28191da177e4SLinus Torvalds if (mapping_writably_mapped(mapping))
28201da177e4SLinus Torvalds flush_dcache_page(page);
28211da177e4SLinus Torvalds /*
28221da177e4SLinus Torvalds * Mark the page accessed if we read the beginning.
28231da177e4SLinus Torvalds */
28241da177e4SLinus Torvalds if (!offset)
28254601e2fcSMatthew Wilcox (Oracle) folio_mark_accessed(folio);
28261da177e4SLinus Torvalds /*
28271da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so
28281da177e4SLinus Torvalds * now we can copy it to user space...
28291da177e4SLinus Torvalds */
28302ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to);
28314601e2fcSMatthew Wilcox (Oracle) folio_put(folio);
28321bdec44bSHugh Dickins
2833fcb14cb1SAl Viro } else if (user_backed_iter(to)) {
28341bdec44bSHugh Dickins /*
28351bdec44bSHugh Dickins * Copy to user tends to be so well optimized, but
28361bdec44bSHugh Dickins * clear_user() not so much, that it is noticeably
28371bdec44bSHugh Dickins * faster to copy the zero page instead of clearing.
28381bdec44bSHugh Dickins */
28391bdec44bSHugh Dickins ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
28401bdec44bSHugh Dickins } else {
28411bdec44bSHugh Dickins /*
28421bdec44bSHugh Dickins * But submitting the same page twice in a row to
28431bdec44bSHugh Dickins * splice() - or others? - can result in confusion:
28441bdec44bSHugh Dickins * so don't attempt that optimization on pipes etc.
28451bdec44bSHugh Dickins */
28461bdec44bSHugh Dickins ret = iov_iter_zero(nr, to);
28471bdec44bSHugh Dickins }
28481bdec44bSHugh Dickins
28496e58e79dSAl Viro retval += ret;
28501da177e4SLinus Torvalds offset += ret;
285109cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT;
285209cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK;
28531da177e4SLinus Torvalds
28542ba5bbedSAl Viro if (!iov_iter_count(to))
28551da177e4SLinus Torvalds break;
28566e58e79dSAl Viro if (ret < nr) {
28576e58e79dSAl Viro error = -EFAULT;
28586e58e79dSAl Viro break;
28596e58e79dSAl Viro }
28601da177e4SLinus Torvalds cond_resched();
28611da177e4SLinus Torvalds }
28621da177e4SLinus Torvalds
286309cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
28646e58e79dSAl Viro file_accessed(file);
28656e58e79dSAl Viro return retval ? retval : error;
28661da177e4SLinus Torvalds }
28671da177e4SLinus Torvalds
shmem_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2868e88e0d36SHugh Dickins static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2869e88e0d36SHugh Dickins {
2870e88e0d36SHugh Dickins struct file *file = iocb->ki_filp;
2871e88e0d36SHugh Dickins struct inode *inode = file->f_mapping->host;
2872e88e0d36SHugh Dickins ssize_t ret;
2873e88e0d36SHugh Dickins
2874e88e0d36SHugh Dickins inode_lock(inode);
2875e88e0d36SHugh Dickins ret = generic_write_checks(iocb, from);
2876e88e0d36SHugh Dickins if (ret <= 0)
2877e88e0d36SHugh Dickins goto unlock;
2878e88e0d36SHugh Dickins ret = file_remove_privs(file);
2879e88e0d36SHugh Dickins if (ret)
2880e88e0d36SHugh Dickins goto unlock;
2881e88e0d36SHugh Dickins ret = file_update_time(file);
2882e88e0d36SHugh Dickins if (ret)
2883e88e0d36SHugh Dickins goto unlock;
2884e88e0d36SHugh Dickins ret = generic_perform_write(iocb, from);
2885e88e0d36SHugh Dickins unlock:
2886e88e0d36SHugh Dickins inode_unlock(inode);
2887e88e0d36SHugh Dickins return ret;
2888e88e0d36SHugh Dickins }
2889e88e0d36SHugh Dickins
zero_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)2890bd194b18SDavid Howells static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
2891bd194b18SDavid Howells struct pipe_buffer *buf)
2892bd194b18SDavid Howells {
2893bd194b18SDavid Howells return true;
2894bd194b18SDavid Howells }
2895bd194b18SDavid Howells
zero_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)2896bd194b18SDavid Howells static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
2897bd194b18SDavid Howells struct pipe_buffer *buf)
2898bd194b18SDavid Howells {
2899bd194b18SDavid Howells }
2900bd194b18SDavid Howells
zero_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)2901bd194b18SDavid Howells static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
2902bd194b18SDavid Howells struct pipe_buffer *buf)
2903bd194b18SDavid Howells {
2904bd194b18SDavid Howells return false;
2905bd194b18SDavid Howells }
2906bd194b18SDavid Howells
2907bd194b18SDavid Howells static const struct pipe_buf_operations zero_pipe_buf_ops = {
2908bd194b18SDavid Howells .release = zero_pipe_buf_release,
2909bd194b18SDavid Howells .try_steal = zero_pipe_buf_try_steal,
2910bd194b18SDavid Howells .get = zero_pipe_buf_get,
2911bd194b18SDavid Howells };
2912bd194b18SDavid Howells
splice_zeropage_into_pipe(struct pipe_inode_info * pipe,loff_t fpos,size_t size)2913bd194b18SDavid Howells static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
2914bd194b18SDavid Howells loff_t fpos, size_t size)
2915bd194b18SDavid Howells {
2916bd194b18SDavid Howells size_t offset = fpos & ~PAGE_MASK;
2917bd194b18SDavid Howells
2918bd194b18SDavid Howells size = min_t(size_t, size, PAGE_SIZE - offset);
2919bd194b18SDavid Howells
2920bd194b18SDavid Howells if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2921bd194b18SDavid Howells struct pipe_buffer *buf = pipe_head_buf(pipe);
2922bd194b18SDavid Howells
2923bd194b18SDavid Howells *buf = (struct pipe_buffer) {
2924bd194b18SDavid Howells .ops = &zero_pipe_buf_ops,
2925bd194b18SDavid Howells .page = ZERO_PAGE(0),
2926bd194b18SDavid Howells .offset = offset,
2927bd194b18SDavid Howells .len = size,
2928bd194b18SDavid Howells };
2929bd194b18SDavid Howells pipe->head++;
2930bd194b18SDavid Howells }
2931bd194b18SDavid Howells
2932bd194b18SDavid Howells return size;
2933bd194b18SDavid Howells }
2934bd194b18SDavid Howells
shmem_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)2935bd194b18SDavid Howells static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
2936bd194b18SDavid Howells struct pipe_inode_info *pipe,
2937bd194b18SDavid Howells size_t len, unsigned int flags)
2938bd194b18SDavid Howells {
2939bd194b18SDavid Howells struct inode *inode = file_inode(in);
2940bd194b18SDavid Howells struct address_space *mapping = inode->i_mapping;
2941bd194b18SDavid Howells struct folio *folio = NULL;
2942bd194b18SDavid Howells size_t total_spliced = 0, used, npages, n, part;
2943bd194b18SDavid Howells loff_t isize;
2944bd194b18SDavid Howells int error = 0;
2945bd194b18SDavid Howells
2946bd194b18SDavid Howells /* Work out how much data we can actually add into the pipe */
2947bd194b18SDavid Howells used = pipe_occupancy(pipe->head, pipe->tail);
2948bd194b18SDavid Howells npages = max_t(ssize_t, pipe->max_usage - used, 0);
2949bd194b18SDavid Howells len = min_t(size_t, len, npages * PAGE_SIZE);
2950bd194b18SDavid Howells
2951bd194b18SDavid Howells do {
2952bd194b18SDavid Howells if (*ppos >= i_size_read(inode))
2953bd194b18SDavid Howells break;
2954bd194b18SDavid Howells
2955fa598952SHugh Dickins error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
2956fa598952SHugh Dickins SGP_READ);
2957bd194b18SDavid Howells if (error) {
2958bd194b18SDavid Howells if (error == -EINVAL)
2959bd194b18SDavid Howells error = 0;
2960bd194b18SDavid Howells break;
2961bd194b18SDavid Howells }
2962bd194b18SDavid Howells if (folio) {
2963bd194b18SDavid Howells folio_unlock(folio);
2964bd194b18SDavid Howells
2965fa598952SHugh Dickins if (folio_test_hwpoison(folio) ||
2966fa598952SHugh Dickins (folio_test_large(folio) &&
2967fa598952SHugh Dickins folio_test_has_hwpoisoned(folio))) {
2968bd194b18SDavid Howells error = -EIO;
2969bd194b18SDavid Howells break;
2970bd194b18SDavid Howells }
2971bd194b18SDavid Howells }
2972bd194b18SDavid Howells
2973bd194b18SDavid Howells /*
2974bd194b18SDavid Howells * i_size must be checked after we know the pages are Uptodate.
2975bd194b18SDavid Howells *
2976bd194b18SDavid Howells * Checking i_size after the check allows us to calculate
2977bd194b18SDavid Howells * the correct value for "nr", which means the zero-filled
2978bd194b18SDavid Howells * part of the page is not copied back to userspace (unless
2979bd194b18SDavid Howells * another truncate extends the file - this is desired though).
2980bd194b18SDavid Howells */
2981bd194b18SDavid Howells isize = i_size_read(inode);
2982bd194b18SDavid Howells if (unlikely(*ppos >= isize))
2983bd194b18SDavid Howells break;
2984bd194b18SDavid Howells part = min_t(loff_t, isize - *ppos, len);
2985bd194b18SDavid Howells
2986bd194b18SDavid Howells if (folio) {
2987bd194b18SDavid Howells /*
2988bd194b18SDavid Howells * If users can be writing to this page using arbitrary
2989bd194b18SDavid Howells * virtual addresses, take care about potential aliasing
2990bd194b18SDavid Howells * before reading the page on the kernel side.
2991bd194b18SDavid Howells */
2992bd194b18SDavid Howells if (mapping_writably_mapped(mapping))
2993bd194b18SDavid Howells flush_dcache_folio(folio);
2994bd194b18SDavid Howells folio_mark_accessed(folio);
2995bd194b18SDavid Howells /*
2996bd194b18SDavid Howells * Ok, we have the page, and it's up-to-date, so we can
2997bd194b18SDavid Howells * now splice it into the pipe.
2998bd194b18SDavid Howells */
2999bd194b18SDavid Howells n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3000bd194b18SDavid Howells folio_put(folio);
3001bd194b18SDavid Howells folio = NULL;
3002bd194b18SDavid Howells } else {
3003fa598952SHugh Dickins n = splice_zeropage_into_pipe(pipe, *ppos, part);
3004bd194b18SDavid Howells }
3005bd194b18SDavid Howells
3006bd194b18SDavid Howells if (!n)
3007bd194b18SDavid Howells break;
3008bd194b18SDavid Howells len -= n;
3009bd194b18SDavid Howells total_spliced += n;
3010bd194b18SDavid Howells *ppos += n;
3011bd194b18SDavid Howells in->f_ra.prev_pos = *ppos;
3012bd194b18SDavid Howells if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
3013bd194b18SDavid Howells break;
3014bd194b18SDavid Howells
3015bd194b18SDavid Howells cond_resched();
3016bd194b18SDavid Howells } while (len);
3017bd194b18SDavid Howells
3018bd194b18SDavid Howells if (folio)
3019bd194b18SDavid Howells folio_put(folio);
3020bd194b18SDavid Howells
3021bd194b18SDavid Howells file_accessed(in);
3022bd194b18SDavid Howells return total_spliced ? total_spliced : error;
3023bd194b18SDavid Howells }
3024bd194b18SDavid Howells
shmem_file_llseek(struct file * file,loff_t offset,int whence)3025965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3026220f2ac9SHugh Dickins {
3027220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping;
3028220f2ac9SHugh Dickins struct inode *inode = mapping->host;
3029220f2ac9SHugh Dickins
3030965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE)
3031965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence,
3032220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode));
303341139aa4SMatthew Wilcox (Oracle) if (offset < 0)
303441139aa4SMatthew Wilcox (Oracle) return -ENXIO;
303541139aa4SMatthew Wilcox (Oracle)
30365955102cSAl Viro inode_lock(inode);
30379608703eSJan Kara /* We're holding i_rwsem so we can access i_size directly */
303841139aa4SMatthew Wilcox (Oracle) offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3039387aae6fSHugh Dickins if (offset >= 0)
304046a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
30415955102cSAl Viro inode_unlock(inode);
3042220f2ac9SHugh Dickins return offset;
3043220f2ac9SHugh Dickins }
3044220f2ac9SHugh Dickins
shmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)304583e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
304683e4fa9cSHugh Dickins loff_t len)
304783e4fa9cSHugh Dickins {
3048496ad9aaSAl Viro struct inode *inode = file_inode(file);
3049e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
305040e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode);
30511aac1400SHugh Dickins struct shmem_falloc shmem_falloc;
3052d144bf62SHugh Dickins pgoff_t start, index, end, undo_fallocend;
3053e2d12e22SHugh Dickins int error;
305483e4fa9cSHugh Dickins
305513ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
305613ace4d0SHugh Dickins return -EOPNOTSUPP;
305713ace4d0SHugh Dickins
30585955102cSAl Viro inode_lock(inode);
305983e4fa9cSHugh Dickins
306083e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) {
306183e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping;
306283e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE);
306383e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
30648e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
306583e4fa9cSHugh Dickins
30669608703eSJan Kara /* protected by i_rwsem */
3067ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
306840e041a2SDavid Herrmann error = -EPERM;
306940e041a2SDavid Herrmann goto out;
307040e041a2SDavid Herrmann }
307140e041a2SDavid Herrmann
30728e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq;
3073aa71ecd8SChen Jun shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3074f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3075f00cdc6dSHugh Dickins spin_lock(&inode->i_lock);
3076f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc;
3077f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock);
3078f00cdc6dSHugh Dickins
307983e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start)
308083e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start,
308183e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0);
308283e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1);
308383e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */
30848e205f77SHugh Dickins
30858e205f77SHugh Dickins spin_lock(&inode->i_lock);
30868e205f77SHugh Dickins inode->i_private = NULL;
30878e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq);
30882055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
30898e205f77SHugh Dickins spin_unlock(&inode->i_lock);
309083e4fa9cSHugh Dickins error = 0;
30918e205f77SHugh Dickins goto out;
309283e4fa9cSHugh Dickins }
309383e4fa9cSHugh Dickins
3094e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3095e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len);
3096e2d12e22SHugh Dickins if (error)
3097e2d12e22SHugh Dickins goto out;
3098e2d12e22SHugh Dickins
309940e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
310040e041a2SDavid Herrmann error = -EPERM;
310140e041a2SDavid Herrmann goto out;
310240e041a2SDavid Herrmann }
310340e041a2SDavid Herrmann
310409cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT;
310509cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3106e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */
3107e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3108e2d12e22SHugh Dickins error = -ENOSPC;
3109e2d12e22SHugh Dickins goto out;
3110e2d12e22SHugh Dickins }
3111e2d12e22SHugh Dickins
31128e205f77SHugh Dickins shmem_falloc.waitq = NULL;
31131aac1400SHugh Dickins shmem_falloc.start = start;
31141aac1400SHugh Dickins shmem_falloc.next = start;
31151aac1400SHugh Dickins shmem_falloc.nr_falloced = 0;
31161aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0;
31171aac1400SHugh Dickins spin_lock(&inode->i_lock);
31181aac1400SHugh Dickins inode->i_private = &shmem_falloc;
31191aac1400SHugh Dickins spin_unlock(&inode->i_lock);
31201aac1400SHugh Dickins
3121d144bf62SHugh Dickins /*
3122d144bf62SHugh Dickins * info->fallocend is only relevant when huge pages might be
3123d144bf62SHugh Dickins * involved: to prevent split_huge_page() freeing fallocated
3124d144bf62SHugh Dickins * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3125d144bf62SHugh Dickins */
3126d144bf62SHugh Dickins undo_fallocend = info->fallocend;
3127d144bf62SHugh Dickins if (info->fallocend < end)
3128d144bf62SHugh Dickins info->fallocend = end;
3129d144bf62SHugh Dickins
3130050dcb5cSHugh Dickins for (index = start; index < end; ) {
3131b0802b22SMatthew Wilcox (Oracle) struct folio *folio;
3132e2d12e22SHugh Dickins
3133e2d12e22SHugh Dickins /*
3134e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have
3135e2d12e22SHugh Dickins * been interrupted because we are using up too much memory.
3136e2d12e22SHugh Dickins */
3137e2d12e22SHugh Dickins if (signal_pending(current))
3138e2d12e22SHugh Dickins error = -EINTR;
31391aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
31401aac1400SHugh Dickins error = -ENOMEM;
3141e2d12e22SHugh Dickins else
3142b0802b22SMatthew Wilcox (Oracle) error = shmem_get_folio(inode, index, &folio,
3143b0802b22SMatthew Wilcox (Oracle) SGP_FALLOC);
3144e2d12e22SHugh Dickins if (error) {
3145d144bf62SHugh Dickins info->fallocend = undo_fallocend;
3146b0802b22SMatthew Wilcox (Oracle) /* Remove the !uptodate folios we added */
31477f556567SHugh Dickins if (index > start) {
31481635f6a7SHugh Dickins shmem_undo_range(inode,
314909cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT,
3150b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true);
31517f556567SHugh Dickins }
31521aac1400SHugh Dickins goto undone;
3153e2d12e22SHugh Dickins }
3154e2d12e22SHugh Dickins
3155050dcb5cSHugh Dickins /*
3156050dcb5cSHugh Dickins * Here is a more important optimization than it appears:
3157b0802b22SMatthew Wilcox (Oracle) * a second SGP_FALLOC on the same large folio will clear it,
3158b0802b22SMatthew Wilcox (Oracle) * making it uptodate and un-undoable if we fail later.
3159050dcb5cSHugh Dickins */
3160b0802b22SMatthew Wilcox (Oracle) index = folio_next_index(folio);
3161050dcb5cSHugh Dickins /* Beware 32-bit wraparound */
3162050dcb5cSHugh Dickins if (!index)
3163050dcb5cSHugh Dickins index--;
3164050dcb5cSHugh Dickins
3165e2d12e22SHugh Dickins /*
31661aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached.
31671aac1400SHugh Dickins * No need for lock or barrier: we have the page lock.
31681aac1400SHugh Dickins */
3169b0802b22SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio))
3170050dcb5cSHugh Dickins shmem_falloc.nr_falloced += index - shmem_falloc.next;
3171050dcb5cSHugh Dickins shmem_falloc.next = index;
31721aac1400SHugh Dickins
31731aac1400SHugh Dickins /*
3174b0802b22SMatthew Wilcox (Oracle) * If !uptodate, leave it that way so that freeable folios
31751635f6a7SHugh Dickins * can be recognized if we need to rollback on error later.
3176b0802b22SMatthew Wilcox (Oracle) * But mark it dirty so that memory pressure will swap rather
3177b0802b22SMatthew Wilcox (Oracle) * than free the folios we are allocating (and SGP_CACHE folios
3178e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too).
3179e2d12e22SHugh Dickins */
3180b0802b22SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
3181b0802b22SMatthew Wilcox (Oracle) folio_unlock(folio);
3182b0802b22SMatthew Wilcox (Oracle) folio_put(folio);
3183e2d12e22SHugh Dickins cond_resched();
3184e2d12e22SHugh Dickins }
3185e2d12e22SHugh Dickins
3186e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3187e2d12e22SHugh Dickins i_size_write(inode, offset + len);
31881aac1400SHugh Dickins undone:
31891aac1400SHugh Dickins spin_lock(&inode->i_lock);
31901aac1400SHugh Dickins inode->i_private = NULL;
31911aac1400SHugh Dickins spin_unlock(&inode->i_lock);
3192e2d12e22SHugh Dickins out:
319315f242bbSHugh Dickins if (!error)
319415f242bbSHugh Dickins file_modified(file);
31955955102cSAl Viro inode_unlock(inode);
319683e4fa9cSHugh Dickins return error;
319783e4fa9cSHugh Dickins }
319883e4fa9cSHugh Dickins
shmem_statfs(struct dentry * dentry,struct kstatfs * buf)3199726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
32001da177e4SLinus Torvalds {
3201726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
32021da177e4SLinus Torvalds
32031da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC;
320409cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE;
32051da177e4SLinus Torvalds buf->f_namelen = NAME_MAX;
32060edd73b3SHugh Dickins if (sbinfo->max_blocks) {
32071da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks;
320841ffe5d5SHugh Dickins buf->f_bavail =
320941ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks -
321041ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks);
32110edd73b3SHugh Dickins }
32120edd73b3SHugh Dickins if (sbinfo->max_inodes) {
32131da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes;
3214e07c469eSHugh Dickins buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
32151da177e4SLinus Torvalds }
32161da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */
321759cda49eSAmir Goldstein
321859cda49eSAmir Goldstein buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
321959cda49eSAmir Goldstein
32201da177e4SLinus Torvalds return 0;
32211da177e4SLinus Torvalds }
32221da177e4SLinus Torvalds
32231da177e4SLinus Torvalds /*
32241da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done..
32251da177e4SLinus Torvalds */
32261da177e4SLinus Torvalds static int
shmem_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t dev)32275ebb29beSChristian Brauner shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3228549c7297SChristian Brauner struct dentry *dentry, umode_t mode, dev_t dev)
32291da177e4SLinus Torvalds {
32300b0a0806SHugh Dickins struct inode *inode;
323171480663SCarlos Maiolino int error;
32321da177e4SLinus Torvalds
32337a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
323471480663SCarlos Maiolino if (IS_ERR(inode))
323571480663SCarlos Maiolino return PTR_ERR(inode);
323671480663SCarlos Maiolino
3237feda821eSChristoph Hellwig error = simple_acl_create(dir, inode);
3238feda821eSChristoph Hellwig if (error)
3239feda821eSChristoph Hellwig goto out_iput;
32402a7dba39SEric Paris error = security_inode_init_security(inode, dir,
32419d8f13baSMimi Zohar &dentry->d_name,
32426d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL);
3243feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP)
3244feda821eSChristoph Hellwig goto out_iput;
324537ec43cdSMimi Zohar
3246a2e45955SChuck Lever error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3247a2e45955SChuck Lever if (error)
3248a2e45955SChuck Lever goto out_iput;
3249a2e45955SChuck Lever
32501da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE;
325165287334SJeff Layton dir->i_mtime = inode_set_ctime_current(dir);
325236f05cabSJeff Layton inode_inc_iversion(dir);
32531da177e4SLinus Torvalds d_instantiate(dentry, inode);
32541da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */
32551da177e4SLinus Torvalds return error;
325671480663SCarlos Maiolino
3257feda821eSChristoph Hellwig out_iput:
3258feda821eSChristoph Hellwig iput(inode);
3259feda821eSChristoph Hellwig return error;
32601da177e4SLinus Torvalds }
32611da177e4SLinus Torvalds
326260545d0dSAl Viro static int
shmem_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)3263011e2b71SChristian Brauner shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3264863f144fSMiklos Szeredi struct file *file, umode_t mode)
326560545d0dSAl Viro {
326660545d0dSAl Viro struct inode *inode;
326771480663SCarlos Maiolino int error;
326860545d0dSAl Viro
32697a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
327071480663SCarlos Maiolino
327171480663SCarlos Maiolino if (IS_ERR(inode)) {
327271480663SCarlos Maiolino error = PTR_ERR(inode);
327371480663SCarlos Maiolino goto err_out;
327471480663SCarlos Maiolino }
327571480663SCarlos Maiolino
327660545d0dSAl Viro error = security_inode_init_security(inode, dir,
327760545d0dSAl Viro NULL,
327860545d0dSAl Viro shmem_initxattrs, NULL);
3279feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP)
3280feda821eSChristoph Hellwig goto out_iput;
3281feda821eSChristoph Hellwig error = simple_acl_create(dir, inode);
3282feda821eSChristoph Hellwig if (error)
3283feda821eSChristoph Hellwig goto out_iput;
3284863f144fSMiklos Szeredi d_tmpfile(file, inode);
328571480663SCarlos Maiolino
328671480663SCarlos Maiolino err_out:
3287863f144fSMiklos Szeredi return finish_open_simple(file, error);
3288feda821eSChristoph Hellwig out_iput:
3289feda821eSChristoph Hellwig iput(inode);
3290feda821eSChristoph Hellwig return error;
329160545d0dSAl Viro }
329260545d0dSAl Viro
shmem_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)3293c54bd91eSChristian Brauner static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3294549c7297SChristian Brauner struct dentry *dentry, umode_t mode)
32951da177e4SLinus Torvalds {
32961da177e4SLinus Torvalds int error;
32971da177e4SLinus Torvalds
32987a80e5b8SGiuseppe Scrivano error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
32997a80e5b8SGiuseppe Scrivano if (error)
33001da177e4SLinus Torvalds return error;
3301d8c76e6fSDave Hansen inc_nlink(dir);
33021da177e4SLinus Torvalds return 0;
33031da177e4SLinus Torvalds }
33041da177e4SLinus Torvalds
shmem_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)33056c960e68SChristian Brauner static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3306549c7297SChristian Brauner struct dentry *dentry, umode_t mode, bool excl)
33071da177e4SLinus Torvalds {
33087a80e5b8SGiuseppe Scrivano return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
33091da177e4SLinus Torvalds }
33101da177e4SLinus Torvalds
33111da177e4SLinus Torvalds /*
33121da177e4SLinus Torvalds * Link a file..
33131da177e4SLinus Torvalds */
shmem_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)33141da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
33151da177e4SLinus Torvalds {
331675c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry);
331729b00e60SDarrick J. Wong int ret = 0;
33181da177e4SLinus Torvalds
33191da177e4SLinus Torvalds /*
33201da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes;
33211da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and
33221da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked.
33231062af92SDarrick J. Wong * But if an O_TMPFILE file is linked into the tmpfs, the
33241062af92SDarrick J. Wong * first link must skip that, to get the accounting right.
33251da177e4SLinus Torvalds */
33261062af92SDarrick J. Wong if (inode->i_nlink) {
3327e809d5f0SChris Down ret = shmem_reserve_inode(inode->i_sb, NULL);
33285b04c689SPavel Emelyanov if (ret)
33295b04c689SPavel Emelyanov goto out;
33301062af92SDarrick J. Wong }
33311da177e4SLinus Torvalds
3332a2e45955SChuck Lever ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3333a2e45955SChuck Lever if (ret) {
3334a2e45955SChuck Lever if (inode->i_nlink)
33352daf18a7SHugh Dickins shmem_free_inode(inode->i_sb, 0);
3336a2e45955SChuck Lever goto out;
3337a2e45955SChuck Lever }
3338a2e45955SChuck Lever
33391da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE;
334065287334SJeff Layton dir->i_mtime = inode_set_ctime_to_ts(dir,
334165287334SJeff Layton inode_set_ctime_current(inode));
334236f05cabSJeff Layton inode_inc_iversion(dir);
3343d8c76e6fSDave Hansen inc_nlink(inode);
33447de9c6eeSAl Viro ihold(inode); /* New dentry reference */
33451da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */
33461da177e4SLinus Torvalds d_instantiate(dentry, inode);
33475b04c689SPavel Emelyanov out:
33485b04c689SPavel Emelyanov return ret;
33491da177e4SLinus Torvalds }
33501da177e4SLinus Torvalds
shmem_unlink(struct inode * dir,struct dentry * dentry)33511da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
33521da177e4SLinus Torvalds {
335375c3cfa8SDavid Howells struct inode *inode = d_inode(dentry);
33541da177e4SLinus Torvalds
33555b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
33562daf18a7SHugh Dickins shmem_free_inode(inode->i_sb, 0);
33571da177e4SLinus Torvalds
3358a2e45955SChuck Lever simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
33591da177e4SLinus Torvalds
33601da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE;
336165287334SJeff Layton dir->i_mtime = inode_set_ctime_to_ts(dir,
336265287334SJeff Layton inode_set_ctime_current(inode));
336336f05cabSJeff Layton inode_inc_iversion(dir);
33649a53c3a7SDave Hansen drop_nlink(inode);
33651da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */
33661da177e4SLinus Torvalds return 0;
33671da177e4SLinus Torvalds }
33681da177e4SLinus Torvalds
shmem_rmdir(struct inode * dir,struct dentry * dentry)33691da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
33701da177e4SLinus Torvalds {
33711da177e4SLinus Torvalds if (!simple_empty(dentry))
33721da177e4SLinus Torvalds return -ENOTEMPTY;
33731da177e4SLinus Torvalds
337475c3cfa8SDavid Howells drop_nlink(d_inode(dentry));
33759a53c3a7SDave Hansen drop_nlink(dir);
33761da177e4SLinus Torvalds return shmem_unlink(dir, dentry);
33771da177e4SLinus Torvalds }
33781da177e4SLinus Torvalds
shmem_whiteout(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry)3379e18275aeSChristian Brauner static int shmem_whiteout(struct mnt_idmap *idmap,
3380549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry)
338146fdb794SMiklos Szeredi {
338246fdb794SMiklos Szeredi struct dentry *whiteout;
338346fdb794SMiklos Szeredi int error;
338446fdb794SMiklos Szeredi
338546fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
338646fdb794SMiklos Szeredi if (!whiteout)
338746fdb794SMiklos Szeredi return -ENOMEM;
338846fdb794SMiklos Szeredi
33897a80e5b8SGiuseppe Scrivano error = shmem_mknod(idmap, old_dir, whiteout,
339046fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
339146fdb794SMiklos Szeredi dput(whiteout);
339246fdb794SMiklos Szeredi if (error)
339346fdb794SMiklos Szeredi return error;
339446fdb794SMiklos Szeredi
339546fdb794SMiklos Szeredi /*
339646fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in
339746fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
339846fdb794SMiklos Szeredi *
339946fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point,
340046fdb794SMiklos Szeredi * not sure which one, but that isn't even important.
340146fdb794SMiklos Szeredi */
340246fdb794SMiklos Szeredi d_rehash(whiteout);
340346fdb794SMiklos Szeredi return 0;
340446fdb794SMiklos Szeredi }
340546fdb794SMiklos Szeredi
34061da177e4SLinus Torvalds /*
34071da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename,
34081da177e4SLinus Torvalds * we just have to decrement the usage count for the target if
34091da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it
34101da177e4SLinus Torvalds * gets overwritten.
34111da177e4SLinus Torvalds */
shmem_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)3412e18275aeSChristian Brauner static int shmem_rename2(struct mnt_idmap *idmap,
3413549c7297SChristian Brauner struct inode *old_dir, struct dentry *old_dentry,
3414549c7297SChristian Brauner struct inode *new_dir, struct dentry *new_dentry,
3415549c7297SChristian Brauner unsigned int flags)
34161da177e4SLinus Torvalds {
341775c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry);
34181da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode);
3419a2e45955SChuck Lever int error;
34201da177e4SLinus Torvalds
342146fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
34223b69ff51SMiklos Szeredi return -EINVAL;
34233b69ff51SMiklos Szeredi
342437456771SMiklos Szeredi if (flags & RENAME_EXCHANGE)
3425a2e45955SChuck Lever return simple_offset_rename_exchange(old_dir, old_dentry,
3426a2e45955SChuck Lever new_dir, new_dentry);
342737456771SMiklos Szeredi
34281da177e4SLinus Torvalds if (!simple_empty(new_dentry))
34291da177e4SLinus Torvalds return -ENOTEMPTY;
34301da177e4SLinus Torvalds
343146fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) {
34327a80e5b8SGiuseppe Scrivano error = shmem_whiteout(idmap, old_dir, old_dentry);
343346fdb794SMiklos Szeredi if (error)
343446fdb794SMiklos Szeredi return error;
343546fdb794SMiklos Szeredi }
343646fdb794SMiklos Szeredi
3437a2e45955SChuck Lever simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry);
3438a2e45955SChuck Lever error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry);
3439a2e45955SChuck Lever if (error)
3440a2e45955SChuck Lever return error;
3441a2e45955SChuck Lever
344275c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) {
34431da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry);
3444b928095bSMiklos Szeredi if (they_are_dirs) {
344575c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry));
34469a53c3a7SDave Hansen drop_nlink(old_dir);
3447b928095bSMiklos Szeredi }
34481da177e4SLinus Torvalds } else if (they_are_dirs) {
34499a53c3a7SDave Hansen drop_nlink(old_dir);
3450d8c76e6fSDave Hansen inc_nlink(new_dir);
34511da177e4SLinus Torvalds }
34521da177e4SLinus Torvalds
34531da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE;
34541da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE;
3455944d0d9dSJeff Layton simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
345636f05cabSJeff Layton inode_inc_iversion(old_dir);
345736f05cabSJeff Layton inode_inc_iversion(new_dir);
34581da177e4SLinus Torvalds return 0;
34591da177e4SLinus Torvalds }
34601da177e4SLinus Torvalds
shmem_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)34617a77db95SChristian Brauner static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3462549c7297SChristian Brauner struct dentry *dentry, const char *symname)
34631da177e4SLinus Torvalds {
34641da177e4SLinus Torvalds int error;
34651da177e4SLinus Torvalds int len;
34661da177e4SLinus Torvalds struct inode *inode;
34677ad0414bSMatthew Wilcox (Oracle) struct folio *folio;
34681da177e4SLinus Torvalds
34691da177e4SLinus Torvalds len = strlen(symname) + 1;
347009cbfeafSKirill A. Shutemov if (len > PAGE_SIZE)
34711da177e4SLinus Torvalds return -ENAMETOOLONG;
34721da177e4SLinus Torvalds
34737a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
34740825a6f9SJoe Perches VM_NORESERVE);
347571480663SCarlos Maiolino
347671480663SCarlos Maiolino if (IS_ERR(inode))
347771480663SCarlos Maiolino return PTR_ERR(inode);
34781da177e4SLinus Torvalds
34799d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name,
34806d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL);
348123a31d87SChuck Lever if (error && error != -EOPNOTSUPP)
348223a31d87SChuck Lever goto out_iput;
3483570bc1c2SStephen Smalley
3484a2e45955SChuck Lever error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3485a2e45955SChuck Lever if (error)
3486a2e45955SChuck Lever goto out_iput;
34871da177e4SLinus Torvalds
34881da177e4SLinus Torvalds inode->i_size = len-1;
348969f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) {
34903ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL);
34913ed47db3SAl Viro if (!inode->i_link) {
349223a31d87SChuck Lever error = -ENOMEM;
3493a2e45955SChuck Lever goto out_remove_offset;
349469f07ec9SHugh Dickins }
349569f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations;
34961da177e4SLinus Torvalds } else {
3497e8ecde25SAl Viro inode_nohighmem(inode);
34987ad0414bSMatthew Wilcox (Oracle) error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
349923a31d87SChuck Lever if (error)
3500a2e45955SChuck Lever goto out_remove_offset;
350114fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops;
35021da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations;
35037ad0414bSMatthew Wilcox (Oracle) memcpy(folio_address(folio), symname, len);
35047ad0414bSMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
35057ad0414bSMatthew Wilcox (Oracle) folio_mark_dirty(folio);
35067ad0414bSMatthew Wilcox (Oracle) folio_unlock(folio);
35077ad0414bSMatthew Wilcox (Oracle) folio_put(folio);
35081da177e4SLinus Torvalds }
35091da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE;
351065287334SJeff Layton dir->i_mtime = inode_set_ctime_current(dir);
351136f05cabSJeff Layton inode_inc_iversion(dir);
35121da177e4SLinus Torvalds d_instantiate(dentry, inode);
35131da177e4SLinus Torvalds dget(dentry);
35141da177e4SLinus Torvalds return 0;
3515a2e45955SChuck Lever
3516a2e45955SChuck Lever out_remove_offset:
3517a2e45955SChuck Lever simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
351823a31d87SChuck Lever out_iput:
351923a31d87SChuck Lever iput(inode);
352023a31d87SChuck Lever return error;
35211da177e4SLinus Torvalds }
35221da177e4SLinus Torvalds
shmem_put_link(void * arg)3523fceef393SAl Viro static void shmem_put_link(void *arg)
3524fceef393SAl Viro {
3525e4b57722SMatthew Wilcox (Oracle) folio_mark_accessed(arg);
3526e4b57722SMatthew Wilcox (Oracle) folio_put(arg);
3527fceef393SAl Viro }
3528fceef393SAl Viro
shmem_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)35296b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3530fceef393SAl Viro struct inode *inode,
3531fceef393SAl Viro struct delayed_call *done)
35321da177e4SLinus Torvalds {
3533e4b57722SMatthew Wilcox (Oracle) struct folio *folio = NULL;
35346b255391SAl Viro int error;
3535e4b57722SMatthew Wilcox (Oracle)
35366a6c9904SAl Viro if (!dentry) {
3537e4b57722SMatthew Wilcox (Oracle) folio = filemap_get_folio(inode->i_mapping, 0);
353866dabbb6SChristoph Hellwig if (IS_ERR(folio))
35396b255391SAl Viro return ERR_PTR(-ECHILD);
35407459c149SMatthew Wilcox (Oracle) if (PageHWPoison(folio_page(folio, 0)) ||
3541e4b57722SMatthew Wilcox (Oracle) !folio_test_uptodate(folio)) {
3542e4b57722SMatthew Wilcox (Oracle) folio_put(folio);
35436a6c9904SAl Viro return ERR_PTR(-ECHILD);
35446a6c9904SAl Viro }
35456a6c9904SAl Viro } else {
3546e4b57722SMatthew Wilcox (Oracle) error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3547680baacbSAl Viro if (error)
3548680baacbSAl Viro return ERR_PTR(error);
3549e4b57722SMatthew Wilcox (Oracle) if (!folio)
3550a7605426SYang Shi return ERR_PTR(-ECHILD);
35517459c149SMatthew Wilcox (Oracle) if (PageHWPoison(folio_page(folio, 0))) {
3552e4b57722SMatthew Wilcox (Oracle) folio_unlock(folio);
3553e4b57722SMatthew Wilcox (Oracle) folio_put(folio);
3554a7605426SYang Shi return ERR_PTR(-ECHILD);
3555a7605426SYang Shi }
3556e4b57722SMatthew Wilcox (Oracle) folio_unlock(folio);
35571da177e4SLinus Torvalds }
3558e4b57722SMatthew Wilcox (Oracle) set_delayed_call(done, shmem_put_link, folio);
3559e4b57722SMatthew Wilcox (Oracle) return folio_address(folio);
35601da177e4SLinus Torvalds }
35611da177e4SLinus Torvalds
3562b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3563e408e695STheodore Ts'o
shmem_fileattr_get(struct dentry * dentry,struct fileattr * fa)3564e408e695STheodore Ts'o static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3565e408e695STheodore Ts'o {
3566e408e695STheodore Ts'o struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3567e408e695STheodore Ts'o
3568e408e695STheodore Ts'o fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3569e408e695STheodore Ts'o
3570e408e695STheodore Ts'o return 0;
3571e408e695STheodore Ts'o }
3572e408e695STheodore Ts'o
shmem_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct fileattr * fa)35738782a9aeSChristian Brauner static int shmem_fileattr_set(struct mnt_idmap *idmap,
3574e408e695STheodore Ts'o struct dentry *dentry, struct fileattr *fa)
3575e408e695STheodore Ts'o {
3576e408e695STheodore Ts'o struct inode *inode = d_inode(dentry);
3577e408e695STheodore Ts'o struct shmem_inode_info *info = SHMEM_I(inode);
3578e408e695STheodore Ts'o
3579e408e695STheodore Ts'o if (fileattr_has_fsx(fa))
3580e408e695STheodore Ts'o return -EOPNOTSUPP;
3581cb241339SHugh Dickins if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3582cb241339SHugh Dickins return -EOPNOTSUPP;
3583e408e695STheodore Ts'o
3584e408e695STheodore Ts'o info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3585e408e695STheodore Ts'o (fa->flags & SHMEM_FL_USER_MODIFIABLE);
3586e408e695STheodore Ts'o
3587cb241339SHugh Dickins shmem_set_inode_flags(inode, info->fsflags);
358865287334SJeff Layton inode_set_ctime_current(inode);
358936f05cabSJeff Layton inode_inc_iversion(inode);
3590e408e695STheodore Ts'o return 0;
3591e408e695STheodore Ts'o }
3592e408e695STheodore Ts'o
3593b09e0fa4SEric Paris /*
3594b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr
3595b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs
3596b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at
3597b09e0fa4SEric Paris * filesystem level, though.
3598b09e0fa4SEric Paris */
3599b09e0fa4SEric Paris
36006d9d88d0SJarkko Sakkinen /*
36016d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs.
36026d9d88d0SJarkko Sakkinen */
shmem_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)36036d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
36046d9d88d0SJarkko Sakkinen const struct xattr *xattr_array,
36056d9d88d0SJarkko Sakkinen void *fs_info)
36066d9d88d0SJarkko Sakkinen {
36076d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode);
36082daf18a7SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
36096d9d88d0SJarkko Sakkinen const struct xattr *xattr;
361038f38657SAristeu Rozanski struct simple_xattr *new_xattr;
36112daf18a7SHugh Dickins size_t ispace = 0;
36126d9d88d0SJarkko Sakkinen size_t len;
36136d9d88d0SJarkko Sakkinen
36142daf18a7SHugh Dickins if (sbinfo->max_inodes) {
36152daf18a7SHugh Dickins for (xattr = xattr_array; xattr->name != NULL; xattr++) {
36162daf18a7SHugh Dickins ispace += simple_xattr_space(xattr->name,
36172daf18a7SHugh Dickins xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
36182daf18a7SHugh Dickins }
36192daf18a7SHugh Dickins if (ispace) {
36202daf18a7SHugh Dickins raw_spin_lock(&sbinfo->stat_lock);
36212daf18a7SHugh Dickins if (sbinfo->free_ispace < ispace)
36222daf18a7SHugh Dickins ispace = 0;
36232daf18a7SHugh Dickins else
36242daf18a7SHugh Dickins sbinfo->free_ispace -= ispace;
36252daf18a7SHugh Dickins raw_spin_unlock(&sbinfo->stat_lock);
36262daf18a7SHugh Dickins if (!ispace)
36272daf18a7SHugh Dickins return -ENOSPC;
36282daf18a7SHugh Dickins }
36292daf18a7SHugh Dickins }
36302daf18a7SHugh Dickins
36316d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) {
363238f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
36336d9d88d0SJarkko Sakkinen if (!new_xattr)
36342daf18a7SHugh Dickins break;
36356d9d88d0SJarkko Sakkinen
36366d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1;
36376d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3638572a3d1eSHugh Dickins GFP_KERNEL_ACCOUNT);
36396d9d88d0SJarkko Sakkinen if (!new_xattr->name) {
36403bef735aSChengguang Xu kvfree(new_xattr);
36412daf18a7SHugh Dickins break;
36426d9d88d0SJarkko Sakkinen }
36436d9d88d0SJarkko Sakkinen
36446d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
36456d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN);
36466d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
36476d9d88d0SJarkko Sakkinen xattr->name, len);
36486d9d88d0SJarkko Sakkinen
36493b4c7bc0SChristian Brauner simple_xattr_add(&info->xattrs, new_xattr);
36506d9d88d0SJarkko Sakkinen }
36516d9d88d0SJarkko Sakkinen
36522daf18a7SHugh Dickins if (xattr->name != NULL) {
36532daf18a7SHugh Dickins if (ispace) {
36542daf18a7SHugh Dickins raw_spin_lock(&sbinfo->stat_lock);
36552daf18a7SHugh Dickins sbinfo->free_ispace += ispace;
36562daf18a7SHugh Dickins raw_spin_unlock(&sbinfo->stat_lock);
36572daf18a7SHugh Dickins }
36582daf18a7SHugh Dickins simple_xattrs_free(&info->xattrs, NULL);
36592daf18a7SHugh Dickins return -ENOMEM;
36602daf18a7SHugh Dickins }
36612daf18a7SHugh Dickins
36626d9d88d0SJarkko Sakkinen return 0;
36636d9d88d0SJarkko Sakkinen }
36646d9d88d0SJarkko Sakkinen
shmem_xattr_handler_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size)3665aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3666b296821aSAl Viro struct dentry *unused, struct inode *inode,
3667b296821aSAl Viro const char *name, void *buffer, size_t size)
3668aa7c5241SAndreas Gruenbacher {
3669b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode);
3670aa7c5241SAndreas Gruenbacher
3671aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name);
3672aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size);
3673aa7c5241SAndreas Gruenbacher }
3674aa7c5241SAndreas Gruenbacher
shmem_xattr_handler_set(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)3675aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
367639f60c1cSChristian Brauner struct mnt_idmap *idmap,
367759301226SAl Viro struct dentry *unused, struct inode *inode,
367859301226SAl Viro const char *name, const void *value,
367959301226SAl Viro size_t size, int flags)
3680aa7c5241SAndreas Gruenbacher {
368159301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode);
36822daf18a7SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
36835de75970SHugh Dickins struct simple_xattr *old_xattr;
36842daf18a7SHugh Dickins size_t ispace = 0;
3685aa7c5241SAndreas Gruenbacher
3686aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name);
36872daf18a7SHugh Dickins if (value && sbinfo->max_inodes) {
36882daf18a7SHugh Dickins ispace = simple_xattr_space(name, size);
36892daf18a7SHugh Dickins raw_spin_lock(&sbinfo->stat_lock);
36902daf18a7SHugh Dickins if (sbinfo->free_ispace < ispace)
36912daf18a7SHugh Dickins ispace = 0;
36922daf18a7SHugh Dickins else
36932daf18a7SHugh Dickins sbinfo->free_ispace -= ispace;
36942daf18a7SHugh Dickins raw_spin_unlock(&sbinfo->stat_lock);
36952daf18a7SHugh Dickins if (!ispace)
36962daf18a7SHugh Dickins return -ENOSPC;
36972daf18a7SHugh Dickins }
36982daf18a7SHugh Dickins
36995de75970SHugh Dickins old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
37005de75970SHugh Dickins if (!IS_ERR(old_xattr)) {
37012daf18a7SHugh Dickins ispace = 0;
37022daf18a7SHugh Dickins if (old_xattr && sbinfo->max_inodes)
37032daf18a7SHugh Dickins ispace = simple_xattr_space(old_xattr->name,
37042daf18a7SHugh Dickins old_xattr->size);
37055de75970SHugh Dickins simple_xattr_free(old_xattr);
37065de75970SHugh Dickins old_xattr = NULL;
370765287334SJeff Layton inode_set_ctime_current(inode);
370836f05cabSJeff Layton inode_inc_iversion(inode);
370936f05cabSJeff Layton }
37102daf18a7SHugh Dickins if (ispace) {
37112daf18a7SHugh Dickins raw_spin_lock(&sbinfo->stat_lock);
37122daf18a7SHugh Dickins sbinfo->free_ispace += ispace;
37132daf18a7SHugh Dickins raw_spin_unlock(&sbinfo->stat_lock);
37142daf18a7SHugh Dickins }
37155de75970SHugh Dickins return PTR_ERR(old_xattr);
3716aa7c5241SAndreas Gruenbacher }
3717aa7c5241SAndreas Gruenbacher
3718aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3719aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX,
3720aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get,
3721aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set,
3722aa7c5241SAndreas Gruenbacher };
3723aa7c5241SAndreas Gruenbacher
3724aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3725aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX,
3726aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get,
3727aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set,
3728aa7c5241SAndreas Gruenbacher };
3729aa7c5241SAndreas Gruenbacher
37302daf18a7SHugh Dickins static const struct xattr_handler shmem_user_xattr_handler = {
37312daf18a7SHugh Dickins .prefix = XATTR_USER_PREFIX,
37322daf18a7SHugh Dickins .get = shmem_xattr_handler_get,
37332daf18a7SHugh Dickins .set = shmem_xattr_handler_set,
37342daf18a7SHugh Dickins };
37352daf18a7SHugh Dickins
3736b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3737aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler,
3738aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler,
37392daf18a7SHugh Dickins &shmem_user_xattr_handler,
3740b09e0fa4SEric Paris NULL
3741b09e0fa4SEric Paris };
3742b09e0fa4SEric Paris
shmem_listxattr(struct dentry * dentry,char * buffer,size_t size)3743b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3744b09e0fa4SEric Paris {
374575c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3746786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3747b09e0fa4SEric Paris }
3748b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3749b09e0fa4SEric Paris
375069f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
3751f7cd16a5SXavier Roche .getattr = shmem_getattr,
3752e09764cfSCarlos Maiolino .setattr = shmem_setattr,
37536b255391SAl Viro .get_link = simple_get_link,
3754b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3755b09e0fa4SEric Paris .listxattr = shmem_listxattr,
3756b09e0fa4SEric Paris #endif
37571da177e4SLinus Torvalds };
37581da177e4SLinus Torvalds
375992e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
3760f7cd16a5SXavier Roche .getattr = shmem_getattr,
3761e09764cfSCarlos Maiolino .setattr = shmem_setattr,
37626b255391SAl Viro .get_link = shmem_get_link,
3763b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3764b09e0fa4SEric Paris .listxattr = shmem_listxattr,
376539f0247dSAndreas Gruenbacher #endif
3766b09e0fa4SEric Paris };
376739f0247dSAndreas Gruenbacher
shmem_get_parent(struct dentry * child)376891828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
376991828a40SDavid M. Grimes {
377091828a40SDavid M. Grimes return ERR_PTR(-ESTALE);
377191828a40SDavid M. Grimes }
377291828a40SDavid M. Grimes
shmem_match(struct inode * ino,void * vfh)377391828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
377491828a40SDavid M. Grimes {
377591828a40SDavid M. Grimes __u32 *fh = vfh;
377691828a40SDavid M. Grimes __u64 inum = fh[2];
377791828a40SDavid M. Grimes inum = (inum << 32) | fh[1];
377891828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation;
377991828a40SDavid M. Grimes }
378091828a40SDavid M. Grimes
378112ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
shmem_find_alias(struct inode * inode)378212ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
378312ba780dSAmir Goldstein {
378412ba780dSAmir Goldstein struct dentry *alias = d_find_alias(inode);
378512ba780dSAmir Goldstein
378612ba780dSAmir Goldstein return alias ?: d_find_any_alias(inode);
378712ba780dSAmir Goldstein }
378812ba780dSAmir Goldstein
378912ba780dSAmir Goldstein
shmem_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)3790480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3791480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type)
379291828a40SDavid M. Grimes {
379391828a40SDavid M. Grimes struct inode *inode;
3794480b116cSChristoph Hellwig struct dentry *dentry = NULL;
379535c2a7f4SHugh Dickins u64 inum;
379691828a40SDavid M. Grimes
3797480b116cSChristoph Hellwig if (fh_len < 3)
3798480b116cSChristoph Hellwig return NULL;
3799480b116cSChristoph Hellwig
380035c2a7f4SHugh Dickins inum = fid->raw[2];
380135c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1];
380235c2a7f4SHugh Dickins
3803480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3804480b116cSChristoph Hellwig shmem_match, fid->raw);
380591828a40SDavid M. Grimes if (inode) {
380612ba780dSAmir Goldstein dentry = shmem_find_alias(inode);
380791828a40SDavid M. Grimes iput(inode);
380891828a40SDavid M. Grimes }
380991828a40SDavid M. Grimes
3810480b116cSChristoph Hellwig return dentry;
381191828a40SDavid M. Grimes }
381291828a40SDavid M. Grimes
shmem_encode_fh(struct inode * inode,__u32 * fh,int * len,struct inode * parent)3813b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3814b0b0382bSAl Viro struct inode *parent)
381591828a40SDavid M. Grimes {
38165fe0c237SAneesh Kumar K.V if (*len < 3) {
38175fe0c237SAneesh Kumar K.V *len = 3;
381894e07a75SNamjae Jeon return FILEID_INVALID;
38195fe0c237SAneesh Kumar K.V }
382091828a40SDavid M. Grimes
38211d3382cbSAl Viro if (inode_unhashed(inode)) {
382291828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent,
382391828a40SDavid M. Grimes * so as we hash inodes here rather than at creation
382491828a40SDavid M. Grimes * time, we need a lock to ensure we only try
382591828a40SDavid M. Grimes * to do it once
382691828a40SDavid M. Grimes */
382791828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock);
382891828a40SDavid M. Grimes spin_lock(&lock);
38291d3382cbSAl Viro if (inode_unhashed(inode))
383091828a40SDavid M. Grimes __insert_inode_hash(inode,
383191828a40SDavid M. Grimes inode->i_ino + inode->i_generation);
383291828a40SDavid M. Grimes spin_unlock(&lock);
383391828a40SDavid M. Grimes }
383491828a40SDavid M. Grimes
383591828a40SDavid M. Grimes fh[0] = inode->i_generation;
383691828a40SDavid M. Grimes fh[1] = inode->i_ino;
383791828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32;
383891828a40SDavid M. Grimes
383991828a40SDavid M. Grimes *len = 3;
384091828a40SDavid M. Grimes return 1;
384191828a40SDavid M. Grimes }
384291828a40SDavid M. Grimes
384339655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
384491828a40SDavid M. Grimes .get_parent = shmem_get_parent,
384591828a40SDavid M. Grimes .encode_fh = shmem_encode_fh,
3846480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry,
384791828a40SDavid M. Grimes };
384891828a40SDavid M. Grimes
3849626c3920SAl Viro enum shmem_param {
3850626c3920SAl Viro Opt_gid,
3851626c3920SAl Viro Opt_huge,
3852626c3920SAl Viro Opt_mode,
3853626c3920SAl Viro Opt_mpol,
3854626c3920SAl Viro Opt_nr_blocks,
3855626c3920SAl Viro Opt_nr_inodes,
3856626c3920SAl Viro Opt_size,
3857626c3920SAl Viro Opt_uid,
3858ea3271f7SChris Down Opt_inode32,
3859ea3271f7SChris Down Opt_inode64,
38602c6efe9cSLuis Chamberlain Opt_noswap,
3861e09764cfSCarlos Maiolino Opt_quota,
3862e09764cfSCarlos Maiolino Opt_usrquota,
3863e09764cfSCarlos Maiolino Opt_grpquota,
3864de4c0e7cSLukas Czerner Opt_usrquota_block_hardlimit,
3865de4c0e7cSLukas Czerner Opt_usrquota_inode_hardlimit,
3866de4c0e7cSLukas Czerner Opt_grpquota_block_hardlimit,
3867de4c0e7cSLukas Czerner Opt_grpquota_inode_hardlimit,
3868626c3920SAl Viro };
38691da177e4SLinus Torvalds
38705eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = {
38712710c957SAl Viro {"never", SHMEM_HUGE_NEVER },
38722710c957SAl Viro {"always", SHMEM_HUGE_ALWAYS },
38732710c957SAl Viro {"within_size", SHMEM_HUGE_WITHIN_SIZE },
38742710c957SAl Viro {"advise", SHMEM_HUGE_ADVISE },
38752710c957SAl Viro {}
38762710c957SAl Viro };
38772710c957SAl Viro
3878d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = {
3879626c3920SAl Viro fsparam_u32 ("gid", Opt_gid),
38802710c957SAl Viro fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
3881626c3920SAl Viro fsparam_u32oct("mode", Opt_mode),
3882626c3920SAl Viro fsparam_string("mpol", Opt_mpol),
3883626c3920SAl Viro fsparam_string("nr_blocks", Opt_nr_blocks),
3884626c3920SAl Viro fsparam_string("nr_inodes", Opt_nr_inodes),
3885626c3920SAl Viro fsparam_string("size", Opt_size),
3886626c3920SAl Viro fsparam_u32 ("uid", Opt_uid),
3887ea3271f7SChris Down fsparam_flag ("inode32", Opt_inode32),
3888ea3271f7SChris Down fsparam_flag ("inode64", Opt_inode64),
38892c6efe9cSLuis Chamberlain fsparam_flag ("noswap", Opt_noswap),
3890e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
3891e09764cfSCarlos Maiolino fsparam_flag ("quota", Opt_quota),
3892e09764cfSCarlos Maiolino fsparam_flag ("usrquota", Opt_usrquota),
3893e09764cfSCarlos Maiolino fsparam_flag ("grpquota", Opt_grpquota),
3894de4c0e7cSLukas Czerner fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
3895de4c0e7cSLukas Czerner fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
3896de4c0e7cSLukas Czerner fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
3897de4c0e7cSLukas Czerner fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
3898e09764cfSCarlos Maiolino #endif
3899626c3920SAl Viro {}
3900626c3920SAl Viro };
3901626c3920SAl Viro
shmem_parse_one(struct fs_context * fc,struct fs_parameter * param)3902f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3903626c3920SAl Viro {
3904f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private;
3905626c3920SAl Viro struct fs_parse_result result;
3906e04dc423SAl Viro unsigned long long size;
3907626c3920SAl Viro char *rest;
3908626c3920SAl Viro int opt;
39090200679fSChristian Brauner kuid_t kuid;
39100200679fSChristian Brauner kgid_t kgid;
3911626c3920SAl Viro
3912d7167b14SAl Viro opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3913f3235626SDavid Howells if (opt < 0)
3914626c3920SAl Viro return opt;
3915626c3920SAl Viro
3916626c3920SAl Viro switch (opt) {
3917626c3920SAl Viro case Opt_size:
3918626c3920SAl Viro size = memparse(param->string, &rest);
3919e04dc423SAl Viro if (*rest == '%') {
3920e04dc423SAl Viro size <<= PAGE_SHIFT;
3921e04dc423SAl Viro size *= totalram_pages();
3922e04dc423SAl Viro do_div(size, 100);
3923e04dc423SAl Viro rest++;
3924e04dc423SAl Viro }
3925e04dc423SAl Viro if (*rest)
3926626c3920SAl Viro goto bad_value;
3927e04dc423SAl Viro ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3928e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS;
3929626c3920SAl Viro break;
3930626c3920SAl Viro case Opt_nr_blocks:
3931626c3920SAl Viro ctx->blocks = memparse(param->string, &rest);
3932e07c469eSHugh Dickins if (*rest || ctx->blocks > LONG_MAX)
3933626c3920SAl Viro goto bad_value;
3934e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS;
3935626c3920SAl Viro break;
3936626c3920SAl Viro case Opt_nr_inodes:
3937626c3920SAl Viro ctx->inodes = memparse(param->string, &rest);
3938e07c469eSHugh Dickins if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
3939626c3920SAl Viro goto bad_value;
3940e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_INODES;
3941626c3920SAl Viro break;
3942626c3920SAl Viro case Opt_mode:
3943626c3920SAl Viro ctx->mode = result.uint_32 & 07777;
3944626c3920SAl Viro break;
3945626c3920SAl Viro case Opt_uid:
39460200679fSChristian Brauner kuid = make_kuid(current_user_ns(), result.uint_32);
39470200679fSChristian Brauner if (!uid_valid(kuid))
3948626c3920SAl Viro goto bad_value;
39490200679fSChristian Brauner
39500200679fSChristian Brauner /*
39510200679fSChristian Brauner * The requested uid must be representable in the
39520200679fSChristian Brauner * filesystem's idmapping.
39530200679fSChristian Brauner */
39540200679fSChristian Brauner if (!kuid_has_mapping(fc->user_ns, kuid))
39550200679fSChristian Brauner goto bad_value;
39560200679fSChristian Brauner
39570200679fSChristian Brauner ctx->uid = kuid;
3958626c3920SAl Viro break;
3959626c3920SAl Viro case Opt_gid:
39600200679fSChristian Brauner kgid = make_kgid(current_user_ns(), result.uint_32);
39610200679fSChristian Brauner if (!gid_valid(kgid))
3962626c3920SAl Viro goto bad_value;
39630200679fSChristian Brauner
39640200679fSChristian Brauner /*
39650200679fSChristian Brauner * The requested gid must be representable in the
39660200679fSChristian Brauner * filesystem's idmapping.
39670200679fSChristian Brauner */
39680200679fSChristian Brauner if (!kgid_has_mapping(fc->user_ns, kgid))
39690200679fSChristian Brauner goto bad_value;
39700200679fSChristian Brauner
39710200679fSChristian Brauner ctx->gid = kgid;
3972626c3920SAl Viro break;
3973626c3920SAl Viro case Opt_huge:
3974626c3920SAl Viro ctx->huge = result.uint_32;
3975626c3920SAl Viro if (ctx->huge != SHMEM_HUGE_NEVER &&
3976396bcc52SMatthew Wilcox (Oracle) !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3977626c3920SAl Viro has_transparent_hugepage()))
3978626c3920SAl Viro goto unsupported_parameter;
3979e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_HUGE;
3980626c3920SAl Viro break;
3981626c3920SAl Viro case Opt_mpol:
3982626c3920SAl Viro if (IS_ENABLED(CONFIG_NUMA)) {
3983e04dc423SAl Viro mpol_put(ctx->mpol);
3984e04dc423SAl Viro ctx->mpol = NULL;
3985626c3920SAl Viro if (mpol_parse_str(param->string, &ctx->mpol))
3986626c3920SAl Viro goto bad_value;
3987626c3920SAl Viro break;
3988626c3920SAl Viro }
3989626c3920SAl Viro goto unsupported_parameter;
3990ea3271f7SChris Down case Opt_inode32:
3991ea3271f7SChris Down ctx->full_inums = false;
3992ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS;
3993ea3271f7SChris Down break;
3994ea3271f7SChris Down case Opt_inode64:
3995ea3271f7SChris Down if (sizeof(ino_t) < 8) {
3996ea3271f7SChris Down return invalfc(fc,
3997ea3271f7SChris Down "Cannot use inode64 with <64bit inums in kernel\n");
3998ea3271f7SChris Down }
3999ea3271f7SChris Down ctx->full_inums = true;
4000ea3271f7SChris Down ctx->seen |= SHMEM_SEEN_INUMS;
4001ea3271f7SChris Down break;
40022c6efe9cSLuis Chamberlain case Opt_noswap:
400301106e14SChristian Brauner if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
400401106e14SChristian Brauner return invalfc(fc,
400501106e14SChristian Brauner "Turning off swap in unprivileged tmpfs mounts unsupported");
400601106e14SChristian Brauner }
40072c6efe9cSLuis Chamberlain ctx->noswap = true;
40082c6efe9cSLuis Chamberlain ctx->seen |= SHMEM_SEEN_NOSWAP;
40092c6efe9cSLuis Chamberlain break;
4010e09764cfSCarlos Maiolino case Opt_quota:
4011e09764cfSCarlos Maiolino if (fc->user_ns != &init_user_ns)
4012e09764cfSCarlos Maiolino return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4013e09764cfSCarlos Maiolino ctx->seen |= SHMEM_SEEN_QUOTA;
4014e09764cfSCarlos Maiolino ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4015e09764cfSCarlos Maiolino break;
4016e09764cfSCarlos Maiolino case Opt_usrquota:
4017e09764cfSCarlos Maiolino if (fc->user_ns != &init_user_ns)
4018e09764cfSCarlos Maiolino return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4019e09764cfSCarlos Maiolino ctx->seen |= SHMEM_SEEN_QUOTA;
4020e09764cfSCarlos Maiolino ctx->quota_types |= QTYPE_MASK_USR;
4021e09764cfSCarlos Maiolino break;
4022e09764cfSCarlos Maiolino case Opt_grpquota:
4023e09764cfSCarlos Maiolino if (fc->user_ns != &init_user_ns)
4024e09764cfSCarlos Maiolino return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4025e09764cfSCarlos Maiolino ctx->seen |= SHMEM_SEEN_QUOTA;
4026e09764cfSCarlos Maiolino ctx->quota_types |= QTYPE_MASK_GRP;
4027e09764cfSCarlos Maiolino break;
4028de4c0e7cSLukas Czerner case Opt_usrquota_block_hardlimit:
4029de4c0e7cSLukas Czerner size = memparse(param->string, &rest);
4030de4c0e7cSLukas Czerner if (*rest || !size)
4031de4c0e7cSLukas Czerner goto bad_value;
4032de4c0e7cSLukas Czerner if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4033de4c0e7cSLukas Czerner return invalfc(fc,
4034de4c0e7cSLukas Czerner "User quota block hardlimit too large.");
4035de4c0e7cSLukas Czerner ctx->qlimits.usrquota_bhardlimit = size;
4036de4c0e7cSLukas Czerner break;
4037de4c0e7cSLukas Czerner case Opt_grpquota_block_hardlimit:
4038de4c0e7cSLukas Czerner size = memparse(param->string, &rest);
4039de4c0e7cSLukas Czerner if (*rest || !size)
4040de4c0e7cSLukas Czerner goto bad_value;
4041de4c0e7cSLukas Czerner if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4042de4c0e7cSLukas Czerner return invalfc(fc,
4043de4c0e7cSLukas Czerner "Group quota block hardlimit too large.");
4044de4c0e7cSLukas Czerner ctx->qlimits.grpquota_bhardlimit = size;
4045de4c0e7cSLukas Czerner break;
4046de4c0e7cSLukas Czerner case Opt_usrquota_inode_hardlimit:
4047de4c0e7cSLukas Czerner size = memparse(param->string, &rest);
4048de4c0e7cSLukas Czerner if (*rest || !size)
4049de4c0e7cSLukas Czerner goto bad_value;
4050de4c0e7cSLukas Czerner if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4051de4c0e7cSLukas Czerner return invalfc(fc,
4052de4c0e7cSLukas Czerner "User quota inode hardlimit too large.");
4053de4c0e7cSLukas Czerner ctx->qlimits.usrquota_ihardlimit = size;
4054de4c0e7cSLukas Czerner break;
4055de4c0e7cSLukas Czerner case Opt_grpquota_inode_hardlimit:
4056de4c0e7cSLukas Czerner size = memparse(param->string, &rest);
4057de4c0e7cSLukas Czerner if (*rest || !size)
4058de4c0e7cSLukas Czerner goto bad_value;
4059de4c0e7cSLukas Czerner if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4060de4c0e7cSLukas Czerner return invalfc(fc,
4061de4c0e7cSLukas Czerner "Group quota inode hardlimit too large.");
4062de4c0e7cSLukas Czerner ctx->qlimits.grpquota_ihardlimit = size;
4063de4c0e7cSLukas Czerner break;
4064e04dc423SAl Viro }
4065e04dc423SAl Viro return 0;
4066e04dc423SAl Viro
4067626c3920SAl Viro unsupported_parameter:
4068f35aa2bcSAl Viro return invalfc(fc, "Unsupported parameter '%s'", param->key);
4069626c3920SAl Viro bad_value:
4070f35aa2bcSAl Viro return invalfc(fc, "Bad value for '%s'", param->key);
4071e04dc423SAl Viro }
4072e04dc423SAl Viro
shmem_parse_options(struct fs_context * fc,void * data)4073f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data)
4074e04dc423SAl Viro {
4075f3235626SDavid Howells char *options = data;
4076f3235626SDavid Howells
407733f37c64SAl Viro if (options) {
407833f37c64SAl Viro int err = security_sb_eat_lsm_opts(options, &fc->security);
407933f37c64SAl Viro if (err)
408033f37c64SAl Viro return err;
408133f37c64SAl Viro }
408233f37c64SAl Viro
4083b00dc3adSHugh Dickins while (options != NULL) {
4084626c3920SAl Viro char *this_char = options;
4085b00dc3adSHugh Dickins for (;;) {
4086b00dc3adSHugh Dickins /*
4087b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately,
4088b00dc3adSHugh Dickins * mount options form a comma-separated list,
4089b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas.
4090b00dc3adSHugh Dickins */
4091b00dc3adSHugh Dickins options = strchr(options, ',');
4092b00dc3adSHugh Dickins if (options == NULL)
4093b00dc3adSHugh Dickins break;
4094b00dc3adSHugh Dickins options++;
4095b00dc3adSHugh Dickins if (!isdigit(*options)) {
4096b00dc3adSHugh Dickins options[-1] = '\0';
4097b00dc3adSHugh Dickins break;
4098b00dc3adSHugh Dickins }
4099b00dc3adSHugh Dickins }
4100626c3920SAl Viro if (*this_char) {
4101626c3920SAl Viro char *value = strchr(this_char, '=');
4102f3235626SDavid Howells size_t len = 0;
4103626c3920SAl Viro int err;
4104626c3920SAl Viro
4105626c3920SAl Viro if (value) {
4106626c3920SAl Viro *value++ = '\0';
4107f3235626SDavid Howells len = strlen(value);
41081da177e4SLinus Torvalds }
4109f3235626SDavid Howells err = vfs_parse_fs_string(fc, this_char, value, len);
4110f3235626SDavid Howells if (err < 0)
4111f3235626SDavid Howells return err;
41121da177e4SLinus Torvalds }
4113626c3920SAl Viro }
41141da177e4SLinus Torvalds return 0;
41151da177e4SLinus Torvalds }
41161da177e4SLinus Torvalds
4117f3235626SDavid Howells /*
4118f3235626SDavid Howells * Reconfigure a shmem filesystem.
4119f3235626SDavid Howells */
shmem_reconfigure(struct fs_context * fc)4120f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc)
41211da177e4SLinus Torvalds {
4122f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private;
4123f3235626SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4124e07c469eSHugh Dickins unsigned long used_isp;
4125bf11b9a8SSebastian Andrzej Siewior struct mempolicy *mpol = NULL;
4126f3235626SDavid Howells const char *err;
41270edd73b3SHugh Dickins
4128bf11b9a8SSebastian Andrzej Siewior raw_spin_lock(&sbinfo->stat_lock);
4129e07c469eSHugh Dickins used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
41300c98c8e1SZhaoLong Wang
4131f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4132f3235626SDavid Howells if (!sbinfo->max_blocks) {
4133f3235626SDavid Howells err = "Cannot retroactively limit size";
41340edd73b3SHugh Dickins goto out;
41350b5071ddSAl Viro }
4136f3235626SDavid Howells if (percpu_counter_compare(&sbinfo->used_blocks,
4137f3235626SDavid Howells ctx->blocks) > 0) {
4138f3235626SDavid Howells err = "Too small a size for current use";
41390b5071ddSAl Viro goto out;
4140f3235626SDavid Howells }
4141f3235626SDavid Howells }
4142f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4143f3235626SDavid Howells if (!sbinfo->max_inodes) {
4144f3235626SDavid Howells err = "Cannot retroactively limit inodes";
41450b5071ddSAl Viro goto out;
41460b5071ddSAl Viro }
4147e07c469eSHugh Dickins if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4148f3235626SDavid Howells err = "Too few inodes for current use";
4149f3235626SDavid Howells goto out;
4150f3235626SDavid Howells }
4151f3235626SDavid Howells }
41520edd73b3SHugh Dickins
4153ea3271f7SChris Down if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4154ea3271f7SChris Down sbinfo->next_ino > UINT_MAX) {
4155ea3271f7SChris Down err = "Current inum too high to switch to 32-bit inums";
4156ea3271f7SChris Down goto out;
4157ea3271f7SChris Down }
41582c6efe9cSLuis Chamberlain if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
41592c6efe9cSLuis Chamberlain err = "Cannot disable swap on remount";
41602c6efe9cSLuis Chamberlain goto out;
41612c6efe9cSLuis Chamberlain }
41622c6efe9cSLuis Chamberlain if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
41632c6efe9cSLuis Chamberlain err = "Cannot enable swap on remount if it was disabled on first mount";
41642c6efe9cSLuis Chamberlain goto out;
41652c6efe9cSLuis Chamberlain }
4166ea3271f7SChris Down
4167e09764cfSCarlos Maiolino if (ctx->seen & SHMEM_SEEN_QUOTA &&
4168e09764cfSCarlos Maiolino !sb_any_quota_loaded(fc->root->d_sb)) {
4169e09764cfSCarlos Maiolino err = "Cannot enable quota on remount";
4170e09764cfSCarlos Maiolino goto out;
4171e09764cfSCarlos Maiolino }
4172e09764cfSCarlos Maiolino
4173de4c0e7cSLukas Czerner #ifdef CONFIG_TMPFS_QUOTA
4174de4c0e7cSLukas Czerner #define CHANGED_LIMIT(name) \
4175de4c0e7cSLukas Czerner (ctx->qlimits.name## hardlimit && \
4176de4c0e7cSLukas Czerner (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4177de4c0e7cSLukas Czerner
4178de4c0e7cSLukas Czerner if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4179de4c0e7cSLukas Czerner CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4180de4c0e7cSLukas Czerner err = "Cannot change global quota limit on remount";
4181de4c0e7cSLukas Czerner goto out;
4182de4c0e7cSLukas Czerner }
4183de4c0e7cSLukas Czerner #endif /* CONFIG_TMPFS_QUOTA */
4184de4c0e7cSLukas Czerner
4185f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_HUGE)
4186f3235626SDavid Howells sbinfo->huge = ctx->huge;
4187ea3271f7SChris Down if (ctx->seen & SHMEM_SEEN_INUMS)
4188ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums;
4189f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_BLOCKS)
4190f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks;
4191f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_INODES) {
4192f3235626SDavid Howells sbinfo->max_inodes = ctx->inodes;
4193e07c469eSHugh Dickins sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
41940b5071ddSAl Viro }
419571fe804bSLee Schermerhorn
41965f00110fSGreg Thelen /*
41975f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified.
41985f00110fSGreg Thelen */
4199f3235626SDavid Howells if (ctx->mpol) {
4200bf11b9a8SSebastian Andrzej Siewior mpol = sbinfo->mpol;
4201f3235626SDavid Howells sbinfo->mpol = ctx->mpol; /* transfers initial ref */
4202f3235626SDavid Howells ctx->mpol = NULL;
42035f00110fSGreg Thelen }
42042c6efe9cSLuis Chamberlain
42052c6efe9cSLuis Chamberlain if (ctx->noswap)
42062c6efe9cSLuis Chamberlain sbinfo->noswap = true;
42072c6efe9cSLuis Chamberlain
4208bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock);
4209bf11b9a8SSebastian Andrzej Siewior mpol_put(mpol);
4210f3235626SDavid Howells return 0;
42110edd73b3SHugh Dickins out:
4212bf11b9a8SSebastian Andrzej Siewior raw_spin_unlock(&sbinfo->stat_lock);
4213f35aa2bcSAl Viro return invalfc(fc, "%s", err);
42141da177e4SLinus Torvalds }
4215680d794bSakpm@linux-foundation.org
shmem_show_options(struct seq_file * seq,struct dentry * root)421634c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4217680d794bSakpm@linux-foundation.org {
421834c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4219283ebdeeSTu Jinjiang struct mempolicy *mpol;
4220680d794bSakpm@linux-foundation.org
4221680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks())
4222b91742d8SZhangPeng seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4223680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes())
4224680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
42250825a6f9SJoe Perches if (sbinfo->mode != (0777 | S_ISVTX))
422609208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode);
42278751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
42288751e039SEric W. Biederman seq_printf(seq, ",uid=%u",
42298751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid));
42308751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
42318751e039SEric W. Biederman seq_printf(seq, ",gid=%u",
42328751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid));
4233ea3271f7SChris Down
4234ea3271f7SChris Down /*
4235ea3271f7SChris Down * Showing inode{64,32} might be useful even if it's the system default,
4236ea3271f7SChris Down * since then people don't have to resort to checking both here and
4237ea3271f7SChris Down * /proc/config.gz to confirm 64-bit inums were successfully applied
4238ea3271f7SChris Down * (which may not even exist if IKCONFIG_PROC isn't enabled).
4239ea3271f7SChris Down *
4240ea3271f7SChris Down * We hide it when inode64 isn't the default and we are using 32-bit
4241ea3271f7SChris Down * inodes, since that probably just means the feature isn't even under
4242ea3271f7SChris Down * consideration.
4243ea3271f7SChris Down *
4244ea3271f7SChris Down * As such:
4245ea3271f7SChris Down *
4246ea3271f7SChris Down * +-----------------+-----------------+
4247ea3271f7SChris Down * | TMPFS_INODE64=y | TMPFS_INODE64=n |
4248ea3271f7SChris Down * +------------------+-----------------+-----------------+
4249ea3271f7SChris Down * | full_inums=true | show | show |
4250ea3271f7SChris Down * | full_inums=false | show | hide |
4251ea3271f7SChris Down * +------------------+-----------------+-----------------+
4252ea3271f7SChris Down *
4253ea3271f7SChris Down */
4254ea3271f7SChris Down if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4255ea3271f7SChris Down seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4256396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42575a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
42585a6e75f8SKirill A. Shutemov if (sbinfo->huge)
42595a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
42605a6e75f8SKirill A. Shutemov #endif
4261283ebdeeSTu Jinjiang mpol = shmem_get_sbmpol(sbinfo);
4262283ebdeeSTu Jinjiang shmem_show_mpol(seq, mpol);
4263283ebdeeSTu Jinjiang mpol_put(mpol);
42642c6efe9cSLuis Chamberlain if (sbinfo->noswap)
42652c6efe9cSLuis Chamberlain seq_printf(seq, ",noswap");
4266680d794bSakpm@linux-foundation.org return 0;
4267680d794bSakpm@linux-foundation.org }
42689183df25SDavid Herrmann
4269680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
42701da177e4SLinus Torvalds
shmem_put_super(struct super_block * sb)42711da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
42721da177e4SLinus Torvalds {
4273602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4274602586a8SHugh Dickins
4275e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
4276e09764cfSCarlos Maiolino shmem_disable_quotas(sb);
4277e09764cfSCarlos Maiolino #endif
4278e809d5f0SChris Down free_percpu(sbinfo->ino_batch);
4279602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks);
428049cd0a5cSGreg Thelen mpol_put(sbinfo->mpol);
4281602586a8SHugh Dickins kfree(sbinfo);
42821da177e4SLinus Torvalds sb->s_fs_info = NULL;
42831da177e4SLinus Torvalds }
42841da177e4SLinus Torvalds
shmem_fill_super(struct super_block * sb,struct fs_context * fc)4285f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
42861da177e4SLinus Torvalds {
4287f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private;
42881da177e4SLinus Torvalds struct inode *inode;
42890edd73b3SHugh Dickins struct shmem_sb_info *sbinfo;
429071480663SCarlos Maiolino int error = -ENOMEM;
4291680d794bSakpm@linux-foundation.org
4292680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */
4293425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4294680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL);
4295680d794bSakpm@linux-foundation.org if (!sbinfo)
429671480663SCarlos Maiolino return error;
4297680d794bSakpm@linux-foundation.org
4298680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo;
42991da177e4SLinus Torvalds
43000edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
43011da177e4SLinus Torvalds /*
43021da177e4SLinus Torvalds * Per default we only allow half of the physical ram per
43031da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem;
43041da177e4SLinus Torvalds * but the internal instance is left unlimited.
43051da177e4SLinus Torvalds */
43061751e8a6SLinus Torvalds if (!(sb->s_flags & SB_KERNMOUNT)) {
4307f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
4308f3235626SDavid Howells ctx->blocks = shmem_default_max_blocks();
4309f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_INODES))
4310f3235626SDavid Howells ctx->inodes = shmem_default_max_inodes();
4311ea3271f7SChris Down if (!(ctx->seen & SHMEM_SEEN_INUMS))
4312ea3271f7SChris Down ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
43132c6efe9cSLuis Chamberlain sbinfo->noswap = ctx->noswap;
4314ca4e0519SAl Viro } else {
43151751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER;
43161da177e4SLinus Torvalds }
431791828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops;
431836f05cabSJeff Layton sb->s_flags |= SB_NOSEC | SB_I_VERSION;
43190edd73b3SHugh Dickins #else
43201751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER;
43210edd73b3SHugh Dickins #endif
4322f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks;
4323e07c469eSHugh Dickins sbinfo->max_inodes = ctx->inodes;
4324e07c469eSHugh Dickins sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
4325e809d5f0SChris Down if (sb->s_flags & SB_KERNMOUNT) {
4326e809d5f0SChris Down sbinfo->ino_batch = alloc_percpu(ino_t);
4327e809d5f0SChris Down if (!sbinfo->ino_batch)
4328e809d5f0SChris Down goto failed;
4329e809d5f0SChris Down }
4330f3235626SDavid Howells sbinfo->uid = ctx->uid;
4331f3235626SDavid Howells sbinfo->gid = ctx->gid;
4332ea3271f7SChris Down sbinfo->full_inums = ctx->full_inums;
4333f3235626SDavid Howells sbinfo->mode = ctx->mode;
4334f3235626SDavid Howells sbinfo->huge = ctx->huge;
4335f3235626SDavid Howells sbinfo->mpol = ctx->mpol;
4336f3235626SDavid Howells ctx->mpol = NULL;
43371da177e4SLinus Torvalds
4338bf11b9a8SSebastian Andrzej Siewior raw_spin_lock_init(&sbinfo->stat_lock);
4339908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
4340602586a8SHugh Dickins goto failed;
4341779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock);
4342779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist);
43431da177e4SLinus Torvalds
4344285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE;
434509cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE;
434609cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT;
43471da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC;
43481da177e4SLinus Torvalds sb->s_op = &shmem_ops;
4349cfd95a9cSRobin H. Johnson sb->s_time_gran = 1;
4350b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
435139f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers;
4352b09e0fa4SEric Paris #endif
4353b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
43541751e8a6SLinus Torvalds sb->s_flags |= SB_POSIXACL;
435539f0247dSAndreas Gruenbacher #endif
43562b4db796SAmir Goldstein uuid_gen(&sb->s_uuid);
43570edd73b3SHugh Dickins
4358e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
4359e09764cfSCarlos Maiolino if (ctx->seen & SHMEM_SEEN_QUOTA) {
4360e09764cfSCarlos Maiolino sb->dq_op = &shmem_quota_operations;
4361e09764cfSCarlos Maiolino sb->s_qcop = &dquot_quotactl_sysfile_ops;
4362e09764cfSCarlos Maiolino sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4363e09764cfSCarlos Maiolino
4364de4c0e7cSLukas Czerner /* Copy the default limits from ctx into sbinfo */
4365de4c0e7cSLukas Czerner memcpy(&sbinfo->qlimits, &ctx->qlimits,
4366de4c0e7cSLukas Czerner sizeof(struct shmem_quota_limits));
4367de4c0e7cSLukas Czerner
4368e09764cfSCarlos Maiolino if (shmem_enable_quotas(sb, ctx->quota_types))
4369e09764cfSCarlos Maiolino goto failed;
4370e09764cfSCarlos Maiolino }
4371e09764cfSCarlos Maiolino #endif /* CONFIG_TMPFS_QUOTA */
4372e09764cfSCarlos Maiolino
43737a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0,
43747a80e5b8SGiuseppe Scrivano VM_NORESERVE);
437571480663SCarlos Maiolino if (IS_ERR(inode)) {
437671480663SCarlos Maiolino error = PTR_ERR(inode);
43771da177e4SLinus Torvalds goto failed;
437871480663SCarlos Maiolino }
4379680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid;
4380680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid;
4381318ceed0SAl Viro sb->s_root = d_make_root(inode);
4382318ceed0SAl Viro if (!sb->s_root)
438348fde701SAl Viro goto failed;
43841da177e4SLinus Torvalds return 0;
43851da177e4SLinus Torvalds
43861da177e4SLinus Torvalds failed:
43871da177e4SLinus Torvalds shmem_put_super(sb);
438871480663SCarlos Maiolino return error;
43891da177e4SLinus Torvalds }
43901da177e4SLinus Torvalds
shmem_get_tree(struct fs_context * fc)4391f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc)
4392f3235626SDavid Howells {
4393f3235626SDavid Howells return get_tree_nodev(fc, shmem_fill_super);
4394f3235626SDavid Howells }
4395f3235626SDavid Howells
shmem_free_fc(struct fs_context * fc)4396f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc)
4397f3235626SDavid Howells {
4398f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private;
4399f3235626SDavid Howells
4400f3235626SDavid Howells if (ctx) {
4401f3235626SDavid Howells mpol_put(ctx->mpol);
4402f3235626SDavid Howells kfree(ctx);
4403f3235626SDavid Howells }
4404f3235626SDavid Howells }
4405f3235626SDavid Howells
4406f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = {
4407f3235626SDavid Howells .free = shmem_free_fc,
4408f3235626SDavid Howells .get_tree = shmem_get_tree,
4409f3235626SDavid Howells #ifdef CONFIG_TMPFS
4410f3235626SDavid Howells .parse_monolithic = shmem_parse_options,
4411f3235626SDavid Howells .parse_param = shmem_parse_one,
4412f3235626SDavid Howells .reconfigure = shmem_reconfigure,
4413f3235626SDavid Howells #endif
4414f3235626SDavid Howells };
4415f3235626SDavid Howells
4416fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
44171da177e4SLinus Torvalds
shmem_alloc_inode(struct super_block * sb)44181da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
44191da177e4SLinus Torvalds {
442041ffe5d5SHugh Dickins struct shmem_inode_info *info;
4421fd60b288SMuchun Song info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
442241ffe5d5SHugh Dickins if (!info)
44231da177e4SLinus Torvalds return NULL;
442441ffe5d5SHugh Dickins return &info->vfs_inode;
44251da177e4SLinus Torvalds }
44261da177e4SLinus Torvalds
shmem_free_in_core_inode(struct inode * inode)442774b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode)
4428fa0d7e3dSNick Piggin {
442984e710daSAl Viro if (S_ISLNK(inode->i_mode))
44303ed47db3SAl Viro kfree(inode->i_link);
4431fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4432fa0d7e3dSNick Piggin }
4433fa0d7e3dSNick Piggin
shmem_destroy_inode(struct inode * inode)44341da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
44351da177e4SLinus Torvalds {
443609208d15SAl Viro if (S_ISREG(inode->i_mode))
44371da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4438a2e45955SChuck Lever if (S_ISDIR(inode->i_mode))
4439a2e45955SChuck Lever simple_offset_destroy(shmem_get_offset_ctx(inode));
44401da177e4SLinus Torvalds }
44411da177e4SLinus Torvalds
shmem_init_inode(void * foo)444241ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
44431da177e4SLinus Torvalds {
444441ffe5d5SHugh Dickins struct shmem_inode_info *info = foo;
444541ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode);
44461da177e4SLinus Torvalds }
44471da177e4SLinus Torvalds
shmem_init_inodecache(void)44489a8ec03eSweiping zhang static void shmem_init_inodecache(void)
44491da177e4SLinus Torvalds {
44501da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
44511da177e4SLinus Torvalds sizeof(struct shmem_inode_info),
44525d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
44531da177e4SLinus Torvalds }
44541da177e4SLinus Torvalds
shmem_destroy_inodecache(void)445541ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
44561da177e4SLinus Torvalds {
44571a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep);
44581da177e4SLinus Torvalds }
44591da177e4SLinus Torvalds
4460a7605426SYang Shi /* Keep the page in page cache instead of truncating it */
shmem_error_remove_page(struct address_space * mapping,struct page * page)4461a7605426SYang Shi static int shmem_error_remove_page(struct address_space *mapping,
4462a7605426SYang Shi struct page *page)
4463a7605426SYang Shi {
4464a7605426SYang Shi return 0;
4465a7605426SYang Shi }
4466a7605426SYang Shi
446730e6a51dSHui Su const struct address_space_operations shmem_aops = {
44681da177e4SLinus Torvalds .writepage = shmem_writepage,
446946de8b97SMatthew Wilcox (Oracle) .dirty_folio = noop_dirty_folio,
44701da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
4471800d15a5SNick Piggin .write_begin = shmem_write_begin,
4472800d15a5SNick Piggin .write_end = shmem_write_end,
44731da177e4SLinus Torvalds #endif
44741c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
447554184650SMatthew Wilcox (Oracle) .migrate_folio = migrate_folio,
44761c93923cSAndrew Morton #endif
4477a7605426SYang Shi .error_remove_page = shmem_error_remove_page,
44781da177e4SLinus Torvalds };
447930e6a51dSHui Su EXPORT_SYMBOL(shmem_aops);
44801da177e4SLinus Torvalds
448115ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
44821da177e4SLinus Torvalds .mmap = shmem_mmap,
4483e88e0d36SHugh Dickins .open = shmem_file_open,
4484c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area,
44851da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
4486220f2ac9SHugh Dickins .llseek = shmem_file_llseek,
44872ba5bbedSAl Viro .read_iter = shmem_file_read_iter,
4488e88e0d36SHugh Dickins .write_iter = shmem_file_write_iter,
44891b061d92SChristoph Hellwig .fsync = noop_fsync,
4490bd194b18SDavid Howells .splice_read = shmem_file_splice_read,
4491f6cb85d0SAl Viro .splice_write = iter_file_splice_write,
449283e4fa9cSHugh Dickins .fallocate = shmem_fallocate,
44931da177e4SLinus Torvalds #endif
44941da177e4SLinus Torvalds };
44951da177e4SLinus Torvalds
449692e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
449744a30220SYu Zhao .getattr = shmem_getattr,
449894c1e62dSHugh Dickins .setattr = shmem_setattr,
4499b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
4500b09e0fa4SEric Paris .listxattr = shmem_listxattr,
4501feda821eSChristoph Hellwig .set_acl = simple_set_acl,
4502e408e695STheodore Ts'o .fileattr_get = shmem_fileattr_get,
4503e408e695STheodore Ts'o .fileattr_set = shmem_fileattr_set,
4504b09e0fa4SEric Paris #endif
45051da177e4SLinus Torvalds };
45061da177e4SLinus Torvalds
450792e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
45081da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
4509f7cd16a5SXavier Roche .getattr = shmem_getattr,
45101da177e4SLinus Torvalds .create = shmem_create,
45111da177e4SLinus Torvalds .lookup = simple_lookup,
45121da177e4SLinus Torvalds .link = shmem_link,
45131da177e4SLinus Torvalds .unlink = shmem_unlink,
45141da177e4SLinus Torvalds .symlink = shmem_symlink,
45151da177e4SLinus Torvalds .mkdir = shmem_mkdir,
45161da177e4SLinus Torvalds .rmdir = shmem_rmdir,
45171da177e4SLinus Torvalds .mknod = shmem_mknod,
45182773bf00SMiklos Szeredi .rename = shmem_rename2,
451960545d0dSAl Viro .tmpfile = shmem_tmpfile,
4520a2e45955SChuck Lever .get_offset_ctx = shmem_get_offset_ctx,
45211da177e4SLinus Torvalds #endif
4522b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
4523b09e0fa4SEric Paris .listxattr = shmem_listxattr,
4524e408e695STheodore Ts'o .fileattr_get = shmem_fileattr_get,
4525e408e695STheodore Ts'o .fileattr_set = shmem_fileattr_set,
4526b09e0fa4SEric Paris #endif
452739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
452894c1e62dSHugh Dickins .setattr = shmem_setattr,
4529feda821eSChristoph Hellwig .set_acl = simple_set_acl,
453039f0247dSAndreas Gruenbacher #endif
453139f0247dSAndreas Gruenbacher };
453239f0247dSAndreas Gruenbacher
453392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
4534f7cd16a5SXavier Roche .getattr = shmem_getattr,
4535b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
4536b09e0fa4SEric Paris .listxattr = shmem_listxattr,
4537b09e0fa4SEric Paris #endif
453839f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
453994c1e62dSHugh Dickins .setattr = shmem_setattr,
4540feda821eSChristoph Hellwig .set_acl = simple_set_acl,
454139f0247dSAndreas Gruenbacher #endif
45421da177e4SLinus Torvalds };
45431da177e4SLinus Torvalds
4544759b9775SHugh Dickins static const struct super_operations shmem_ops = {
45451da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode,
454674b1da56SAl Viro .free_inode = shmem_free_in_core_inode,
45471da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode,
45481da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
45491da177e4SLinus Torvalds .statfs = shmem_statfs,
4550680d794bSakpm@linux-foundation.org .show_options = shmem_show_options,
45511da177e4SLinus Torvalds #endif
4552e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
4553e09764cfSCarlos Maiolino .get_dquots = shmem_get_dquots,
4554e09764cfSCarlos Maiolino #endif
45551f895f75SAl Viro .evict_inode = shmem_evict_inode,
45561da177e4SLinus Torvalds .drop_inode = generic_delete_inode,
45571da177e4SLinus Torvalds .put_super = shmem_put_super,
4558396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4559779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count,
4560779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan,
4561779750d2SKirill A. Shutemov #endif
45621da177e4SLinus Torvalds };
45631da177e4SLinus Torvalds
4564f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
456554cb8821SNick Piggin .fault = shmem_fault,
4566d7c17551SNing Qu .map_pages = filemap_map_pages,
45671da177e4SLinus Torvalds #ifdef CONFIG_NUMA
45681da177e4SLinus Torvalds .set_policy = shmem_set_policy,
45691da177e4SLinus Torvalds .get_policy = shmem_get_policy,
45701da177e4SLinus Torvalds #endif
45711da177e4SLinus Torvalds };
45721da177e4SLinus Torvalds
4573d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops = {
4574d09e8ca6SPasha Tatashin .fault = shmem_fault,
4575d09e8ca6SPasha Tatashin .map_pages = filemap_map_pages,
4576d09e8ca6SPasha Tatashin #ifdef CONFIG_NUMA
4577d09e8ca6SPasha Tatashin .set_policy = shmem_set_policy,
4578d09e8ca6SPasha Tatashin .get_policy = shmem_get_policy,
4579d09e8ca6SPasha Tatashin #endif
4580d09e8ca6SPasha Tatashin };
4581d09e8ca6SPasha Tatashin
shmem_init_fs_context(struct fs_context * fc)4582f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc)
45831da177e4SLinus Torvalds {
4584f3235626SDavid Howells struct shmem_options *ctx;
4585f3235626SDavid Howells
4586f3235626SDavid Howells ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4587f3235626SDavid Howells if (!ctx)
4588f3235626SDavid Howells return -ENOMEM;
4589f3235626SDavid Howells
4590f3235626SDavid Howells ctx->mode = 0777 | S_ISVTX;
4591f3235626SDavid Howells ctx->uid = current_fsuid();
4592f3235626SDavid Howells ctx->gid = current_fsgid();
4593f3235626SDavid Howells
4594f3235626SDavid Howells fc->fs_private = ctx;
4595f3235626SDavid Howells fc->ops = &shmem_fs_context_ops;
4596f3235626SDavid Howells return 0;
45971da177e4SLinus Torvalds }
45981da177e4SLinus Torvalds
459941ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
46001da177e4SLinus Torvalds .owner = THIS_MODULE,
46011da177e4SLinus Torvalds .name = "tmpfs",
4602f3235626SDavid Howells .init_fs_context = shmem_init_fs_context,
4603f3235626SDavid Howells #ifdef CONFIG_TMPFS
4604d7167b14SAl Viro .parameters = shmem_fs_parameters,
4605f3235626SDavid Howells #endif
46061da177e4SLinus Torvalds .kill_sb = kill_litter_super,
46077a80e5b8SGiuseppe Scrivano #ifdef CONFIG_SHMEM
4608db58b5eeSChristian Brauner .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
46097a80e5b8SGiuseppe Scrivano #else
4610ff36da69SMatthew Wilcox (Oracle) .fs_flags = FS_USERNS_MOUNT,
46117a80e5b8SGiuseppe Scrivano #endif
46121da177e4SLinus Torvalds };
46131da177e4SLinus Torvalds
shmem_init(void)46149096bbe9SMiaohe Lin void __init shmem_init(void)
46151da177e4SLinus Torvalds {
46161da177e4SLinus Torvalds int error;
46171da177e4SLinus Torvalds
46189a8ec03eSweiping zhang shmem_init_inodecache();
46191da177e4SLinus Torvalds
4620e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
4621e09764cfSCarlos Maiolino error = register_quota_format(&shmem_quota_format);
4622e09764cfSCarlos Maiolino if (error < 0) {
4623e09764cfSCarlos Maiolino pr_err("Could not register quota format\n");
4624e09764cfSCarlos Maiolino goto out3;
4625e09764cfSCarlos Maiolino }
4626e09764cfSCarlos Maiolino #endif
4627e09764cfSCarlos Maiolino
462841ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type);
46291da177e4SLinus Torvalds if (error) {
46301170532bSJoe Perches pr_err("Could not register tmpfs\n");
46311da177e4SLinus Torvalds goto out2;
46321da177e4SLinus Torvalds }
463395dc112aSGreg Kroah-Hartman
4634ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type);
46351da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) {
46361da177e4SLinus Torvalds error = PTR_ERR(shm_mnt);
46371170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n");
46381da177e4SLinus Torvalds goto out1;
46391da177e4SLinus Torvalds }
46405a6e75f8SKirill A. Shutemov
4641396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4642435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
46435a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
46445a6e75f8SKirill A. Shutemov else
46455e6e5a12SHugh Dickins shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
46465a6e75f8SKirill A. Shutemov #endif
46479096bbe9SMiaohe Lin return;
46481da177e4SLinus Torvalds
46491da177e4SLinus Torvalds out1:
465041ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type);
46511da177e4SLinus Torvalds out2:
4652e09764cfSCarlos Maiolino #ifdef CONFIG_TMPFS_QUOTA
4653e09764cfSCarlos Maiolino unregister_quota_format(&shmem_quota_format);
4654e09764cfSCarlos Maiolino out3:
4655e09764cfSCarlos Maiolino #endif
465641ffe5d5SHugh Dickins shmem_destroy_inodecache();
46571da177e4SLinus Torvalds shm_mnt = ERR_PTR(error);
46581da177e4SLinus Torvalds }
4659853ac43aSMatt Mackall
4660396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)46615a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
46625a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf)
46635a6e75f8SKirill A. Shutemov {
466426083eb6SColin Ian King static const int values[] = {
46655a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS,
46665a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE,
46675a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE,
46685a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER,
46695a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY,
46705a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE,
46715a6e75f8SKirill A. Shutemov };
467279d4d38aSJoe Perches int len = 0;
467379d4d38aSJoe Perches int i;
46745a6e75f8SKirill A. Shutemov
467579d4d38aSJoe Perches for (i = 0; i < ARRAY_SIZE(values); i++) {
467679d4d38aSJoe Perches len += sysfs_emit_at(buf, len,
467779d4d38aSJoe Perches shmem_huge == values[i] ? "%s[%s]" : "%s%s",
467879d4d38aSJoe Perches i ? " " : "",
46795a6e75f8SKirill A. Shutemov shmem_format_huge(values[i]));
46805a6e75f8SKirill A. Shutemov }
468179d4d38aSJoe Perches
468279d4d38aSJoe Perches len += sysfs_emit_at(buf, len, "\n");
468379d4d38aSJoe Perches
468479d4d38aSJoe Perches return len;
46855a6e75f8SKirill A. Shutemov }
46865a6e75f8SKirill A. Shutemov
shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)46875a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
46885a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count)
46895a6e75f8SKirill A. Shutemov {
46905a6e75f8SKirill A. Shutemov char tmp[16];
46915a6e75f8SKirill A. Shutemov int huge;
46925a6e75f8SKirill A. Shutemov
46935a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp))
46945a6e75f8SKirill A. Shutemov return -EINVAL;
46955a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count);
46965a6e75f8SKirill A. Shutemov tmp[count] = '\0';
46975a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n')
46985a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0';
46995a6e75f8SKirill A. Shutemov
47005a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp);
47015a6e75f8SKirill A. Shutemov if (huge == -EINVAL)
47025a6e75f8SKirill A. Shutemov return -EINVAL;
47035a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() &&
47045a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
47055a6e75f8SKirill A. Shutemov return -EINVAL;
47065a6e75f8SKirill A. Shutemov
47075a6e75f8SKirill A. Shutemov shmem_huge = huge;
4708435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY)
47095a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
47105a6e75f8SKirill A. Shutemov return count;
47115a6e75f8SKirill A. Shutemov }
47125a6e75f8SKirill A. Shutemov
47134bfa8adaSMiaohe Lin struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
4714396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4715f3f0e1d2SKirill A. Shutemov
4716853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
4717853ac43aSMatt Mackall
4718853ac43aSMatt Mackall /*
4719853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4720853ac43aSMatt Mackall *
4721853ac43aSMatt Mackall * This is intended for small system where the benefits of the full
4722853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by
4723853ac43aSMatt Mackall * their complexity. On systems without swap this code should be
4724853ac43aSMatt Mackall * effectively equivalent, but much lighter weight.
4725853ac43aSMatt Mackall */
4726853ac43aSMatt Mackall
472741ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
4728853ac43aSMatt Mackall .name = "tmpfs",
4729f3235626SDavid Howells .init_fs_context = ramfs_init_fs_context,
4730d7167b14SAl Viro .parameters = ramfs_fs_parameters,
473136ce9d76SRoberto Sassu .kill_sb = ramfs_kill_sb,
47322b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT,
4733853ac43aSMatt Mackall };
4734853ac43aSMatt Mackall
shmem_init(void)47359096bbe9SMiaohe Lin void __init shmem_init(void)
4736853ac43aSMatt Mackall {
473741ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4738853ac43aSMatt Mackall
473941ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type);
4740853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt));
4741853ac43aSMatt Mackall }
4742853ac43aSMatt Mackall
shmem_unuse(unsigned int type)474310a9c496SChristoph Hellwig int shmem_unuse(unsigned int type)
4744853ac43aSMatt Mackall {
4745853ac43aSMatt Mackall return 0;
4746853ac43aSMatt Mackall }
4747853ac43aSMatt Mackall
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)4748d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
47493f96b79aSHugh Dickins {
47503f96b79aSHugh Dickins return 0;
47513f96b79aSHugh Dickins }
47523f96b79aSHugh Dickins
shmem_unlock_mapping(struct address_space * mapping)475324513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
475424513264SHugh Dickins {
475524513264SHugh Dickins }
475624513264SHugh Dickins
4757c01d5b30SHugh Dickins #ifdef CONFIG_MMU
shmem_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)4758c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4759c01d5b30SHugh Dickins unsigned long addr, unsigned long len,
4760c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags)
4761c01d5b30SHugh Dickins {
4762c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4763c01d5b30SHugh Dickins }
4764c01d5b30SHugh Dickins #endif
4765c01d5b30SHugh Dickins
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)476641ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
476794c1e62dSHugh Dickins {
476841ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend);
476994c1e62dSHugh Dickins }
477094c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
477194c1e62dSHugh Dickins
4772853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops
4773d09e8ca6SPasha Tatashin #define shmem_anon_vm_ops generic_file_vm_ops
47740b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations
47750b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0
47760b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0)
4777853ac43aSMatt Mackall
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)477871480663SCarlos Maiolino static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir,
477971480663SCarlos Maiolino umode_t mode, dev_t dev, unsigned long flags)
478071480663SCarlos Maiolino {
478171480663SCarlos Maiolino struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
478271480663SCarlos Maiolino return inode ? inode : ERR_PTR(-ENOSPC);
478371480663SCarlos Maiolino }
478471480663SCarlos Maiolino
4785853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4786853ac43aSMatt Mackall
4787853ac43aSMatt Mackall /* common code */
47881da177e4SLinus Torvalds
__shmem_file_setup(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags,unsigned int i_flags)4789703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4790c7277090SEric Paris unsigned long flags, unsigned int i_flags)
47911da177e4SLinus Torvalds {
47921da177e4SLinus Torvalds struct inode *inode;
479393dec2daSAl Viro struct file *res;
47941da177e4SLinus Torvalds
4795703321b6SMatthew Auld if (IS_ERR(mnt))
4796703321b6SMatthew Auld return ERR_CAST(mnt);
47971da177e4SLinus Torvalds
4798285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE)
47991da177e4SLinus Torvalds return ERR_PTR(-EINVAL);
48001da177e4SLinus Torvalds
48011da177e4SLinus Torvalds if (shmem_acct_size(flags, size))
48021da177e4SLinus Torvalds return ERR_PTR(-ENOMEM);
48031da177e4SLinus Torvalds
48047a80e5b8SGiuseppe Scrivano if (is_idmapped_mnt(mnt))
48057a80e5b8SGiuseppe Scrivano return ERR_PTR(-EINVAL);
48067a80e5b8SGiuseppe Scrivano
48077a80e5b8SGiuseppe Scrivano inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
48087a80e5b8SGiuseppe Scrivano S_IFREG | S_IRWXUGO, 0, flags);
480971480663SCarlos Maiolino
481071480663SCarlos Maiolino if (IS_ERR(inode)) {
4811dac2d1f6SAl Viro shmem_unacct_size(flags, size);
481271480663SCarlos Maiolino return ERR_CAST(inode);
4813dac2d1f6SAl Viro }
4814c7277090SEric Paris inode->i_flags |= i_flags;
48151da177e4SLinus Torvalds inode->i_size = size;
48166d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */
481726567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
481893dec2daSAl Viro if (!IS_ERR(res))
481993dec2daSAl Viro res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
48204b42af81SAl Viro &shmem_file_operations);
48216b4d0b27SAl Viro if (IS_ERR(res))
482293dec2daSAl Viro iput(inode);
48236b4d0b27SAl Viro return res;
48241da177e4SLinus Torvalds }
4825c7277090SEric Paris
4826c7277090SEric Paris /**
4827c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4828c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the
4829c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a
4830e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM
4831e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode.
4832c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps
4833c7277090SEric Paris * @size: size to be set for the file
4834c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4835c7277090SEric Paris */
shmem_kernel_file_setup(const char * name,loff_t size,unsigned long flags)4836c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4837c7277090SEric Paris {
4838703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4839c7277090SEric Paris }
4840c7277090SEric Paris
4841c7277090SEric Paris /**
4842c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs
4843c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps
4844c7277090SEric Paris * @size: size to be set for the file
4845c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4846c7277090SEric Paris */
shmem_file_setup(const char * name,loff_t size,unsigned long flags)4847c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4848c7277090SEric Paris {
4849703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4850c7277090SEric Paris }
4851395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
48521da177e4SLinus Torvalds
485346711810SRandy Dunlap /**
4854703321b6SMatthew Auld * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4855703321b6SMatthew Auld * @mnt: the tmpfs mount where the file will be created
4856703321b6SMatthew Auld * @name: name for dentry (to be seen in /proc/<pid>/maps
4857703321b6SMatthew Auld * @size: size to be set for the file
4858703321b6SMatthew Auld * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4859703321b6SMatthew Auld */
shmem_file_setup_with_mnt(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags)4860703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4861703321b6SMatthew Auld loff_t size, unsigned long flags)
4862703321b6SMatthew Auld {
4863703321b6SMatthew Auld return __shmem_file_setup(mnt, name, size, flags, 0);
4864703321b6SMatthew Auld }
4865703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4866703321b6SMatthew Auld
4867703321b6SMatthew Auld /**
48681da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping
486945e55300SPeter Collingbourne * @vma: the vma to be mmapped is prepared by do_mmap
48701da177e4SLinus Torvalds */
shmem_zero_setup(struct vm_area_struct * vma)48711da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
48721da177e4SLinus Torvalds {
48731da177e4SLinus Torvalds struct file *file;
48741da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start;
48751da177e4SLinus Torvalds
487666fc1303SHugh Dickins /*
4877c1e8d7c6SMichel Lespinasse * Cloning a new file under mmap_lock leads to a lock ordering conflict
487866fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only
487966fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to
488066fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup().
488166fc1303SHugh Dickins */
4882703321b6SMatthew Auld file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
48831da177e4SLinus Torvalds if (IS_ERR(file))
48841da177e4SLinus Torvalds return PTR_ERR(file);
48851da177e4SLinus Torvalds
48861da177e4SLinus Torvalds if (vma->vm_file)
48871da177e4SLinus Torvalds fput(vma->vm_file);
48881da177e4SLinus Torvalds vma->vm_file = file;
4889d09e8ca6SPasha Tatashin vma->vm_ops = &shmem_anon_vm_ops;
4890f3f0e1d2SKirill A. Shutemov
48911da177e4SLinus Torvalds return 0;
48921da177e4SLinus Torvalds }
4893d9d90e5eSHugh Dickins
4894d9d90e5eSHugh Dickins /**
4895f01b2b3eSMatthew Wilcox (Oracle) * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
4896f01b2b3eSMatthew Wilcox (Oracle) * @mapping: the folio's address_space
4897f01b2b3eSMatthew Wilcox (Oracle) * @index: the folio index
4898d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating
4899d9d90e5eSHugh Dickins *
4900d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4901d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags.
49027e0a1265SMatthew Wilcox (Oracle) * But read_cache_page_gfp() uses the ->read_folio() method: which does not
4903d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those
4904d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4905d9d90e5eSHugh Dickins *
490668da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
490768da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4908d9d90e5eSHugh Dickins */
shmem_read_folio_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)4909f01b2b3eSMatthew Wilcox (Oracle) struct folio *shmem_read_folio_gfp(struct address_space *mapping,
4910d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp)
4911d9d90e5eSHugh Dickins {
491268da9f05SHugh Dickins #ifdef CONFIG_SHMEM
491368da9f05SHugh Dickins struct inode *inode = mapping->host;
4914a3a9c397SMatthew Wilcox (Oracle) struct folio *folio;
491568da9f05SHugh Dickins int error;
491668da9f05SHugh Dickins
491730e6a51dSHui Su BUG_ON(!shmem_mapping(mapping));
4918a3a9c397SMatthew Wilcox (Oracle) error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
4919cfda0526SMike Rapoport gfp, NULL, NULL, NULL);
492068da9f05SHugh Dickins if (error)
4921a7605426SYang Shi return ERR_PTR(error);
4922a7605426SYang Shi
4923a3a9c397SMatthew Wilcox (Oracle) folio_unlock(folio);
4924f01b2b3eSMatthew Wilcox (Oracle) return folio;
4925f01b2b3eSMatthew Wilcox (Oracle) #else
4926f01b2b3eSMatthew Wilcox (Oracle) /*
4927f01b2b3eSMatthew Wilcox (Oracle) * The tiny !SHMEM case uses ramfs without swap
4928f01b2b3eSMatthew Wilcox (Oracle) */
4929f01b2b3eSMatthew Wilcox (Oracle) return mapping_read_folio_gfp(mapping, index, gfp);
4930f01b2b3eSMatthew Wilcox (Oracle) #endif
4931f01b2b3eSMatthew Wilcox (Oracle) }
4932f01b2b3eSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
4933f01b2b3eSMatthew Wilcox (Oracle)
shmem_read_mapping_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)4934f01b2b3eSMatthew Wilcox (Oracle) struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4935f01b2b3eSMatthew Wilcox (Oracle) pgoff_t index, gfp_t gfp)
4936f01b2b3eSMatthew Wilcox (Oracle) {
4937f01b2b3eSMatthew Wilcox (Oracle) struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
4938f01b2b3eSMatthew Wilcox (Oracle) struct page *page;
4939f01b2b3eSMatthew Wilcox (Oracle)
4940f01b2b3eSMatthew Wilcox (Oracle) if (IS_ERR(folio))
4941f01b2b3eSMatthew Wilcox (Oracle) return &folio->page;
4942f01b2b3eSMatthew Wilcox (Oracle)
4943a3a9c397SMatthew Wilcox (Oracle) page = folio_file_page(folio, index);
4944a7605426SYang Shi if (PageHWPoison(page)) {
4945a3a9c397SMatthew Wilcox (Oracle) folio_put(folio);
4946a7605426SYang Shi return ERR_PTR(-EIO);
4947a7605426SYang Shi }
4948a7605426SYang Shi
494968da9f05SHugh Dickins return page;
4950d9d90e5eSHugh Dickins }
4951d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4952