xref: /openbmc/linux/mm/shmem.c (revision 3d2c908768877714a354ee6d7bf93e801400d5e2)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31e408e695STheodore Ts'o #include <linux/fileattr.h>
32853ac43aSMatt Mackall #include <linux/mm.h>
3346c9a946SArnd Bergmann #include <linux/random.h>
34174cd4b1SIngo Molnar #include <linux/sched/signal.h>
35b95f1b31SPaul Gortmaker #include <linux/export.h>
365ff2121aSMatthew Wilcox (Oracle) #include <linux/shmem_fs.h>
37853ac43aSMatt Mackall #include <linux/swap.h>
38e2e40f2cSChristoph Hellwig #include <linux/uio.h>
39749df87bSMike Kravetz #include <linux/hugetlb.h>
40626c3920SAl Viro #include <linux/fs_parser.h>
4186a2f3f2SMiaohe Lin #include <linux/swapfile.h>
4236f05cabSJeff Layton #include <linux/iversion.h>
43014bb1deSNeilBrown #include "swap.h"
4495cc09d6SAndrea Arcangeli 
45853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
46853ac43aSMatt Mackall 
47853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
481da177e4SLinus Torvalds /*
491da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
501da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
511da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
521da177e4SLinus Torvalds  */
531da177e4SLinus Torvalds 
5439f0247dSAndreas Gruenbacher #include <linux/xattr.h>
55a5694255SChristoph Hellwig #include <linux/exportfs.h>
561c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
57feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
581da177e4SLinus Torvalds #include <linux/mman.h>
591da177e4SLinus Torvalds #include <linux/string.h>
601da177e4SLinus Torvalds #include <linux/slab.h>
611da177e4SLinus Torvalds #include <linux/backing-dev.h>
621da177e4SLinus Torvalds #include <linux/writeback.h>
63bda97eabSHugh Dickins #include <linux/pagevec.h>
6441ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6583e4fa9cSHugh Dickins #include <linux/falloc.h>
66708e3508SHugh Dickins #include <linux/splice.h>
671da177e4SLinus Torvalds #include <linux/security.h>
681da177e4SLinus Torvalds #include <linux/swapops.h>
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/namei.h>
71b00dc3adSHugh Dickins #include <linux/ctype.h>
72304dbdb7SLee Schermerhorn #include <linux/migrate.h>
73c1f60a5aSChristoph Lameter #include <linux/highmem.h>
74680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7592562927SMimi Zohar #include <linux/magic.h>
769183df25SDavid Herrmann #include <linux/syscalls.h>
7740e041a2SDavid Herrmann #include <linux/fcntl.h>
789183df25SDavid Herrmann #include <uapi/linux/memfd.h>
794c27fe4cSMike Rapoport #include <linux/rmap.h>
802b4db796SAmir Goldstein #include <linux/uuid.h>
81304dbdb7SLee Schermerhorn 
827c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
831da177e4SLinus Torvalds 
84dd56b046SMel Gorman #include "internal.h"
85dd56b046SMel Gorman 
8609cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8709cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
901da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
911da177e4SLinus Torvalds 
9269f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9369f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9469f07ec9SHugh Dickins 
951aac1400SHugh Dickins /*
96f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
979608703eSJan Kara  * inode->i_private (with i_rwsem making sure that it has only one user at
98f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
991aac1400SHugh Dickins  */
1001aac1400SHugh Dickins struct shmem_falloc {
1018e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1021aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1031aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1041aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1051aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1061aac1400SHugh Dickins };
1071aac1400SHugh Dickins 
1080b5071ddSAl Viro struct shmem_options {
1090b5071ddSAl Viro 	unsigned long long blocks;
1100b5071ddSAl Viro 	unsigned long long inodes;
1110b5071ddSAl Viro 	struct mempolicy *mpol;
1120b5071ddSAl Viro 	kuid_t uid;
1130b5071ddSAl Viro 	kgid_t gid;
1140b5071ddSAl Viro 	umode_t mode;
115ea3271f7SChris Down 	bool full_inums;
1160b5071ddSAl Viro 	int huge;
1170b5071ddSAl Viro 	int seen;
1182c6efe9cSLuis Chamberlain 	bool noswap;
1190b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1
1200b5071ddSAl Viro #define SHMEM_SEEN_INODES 2
1210b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4
122ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8
1232c6efe9cSLuis Chamberlain #define SHMEM_SEEN_NOSWAP 16
1240b5071ddSAl Viro };
1250b5071ddSAl Viro 
126b76db735SAndrew Morton #ifdef CONFIG_TMPFS
127680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
128680d794bSakpm@linux-foundation.org {
129ca79b0c2SArun KS 	return totalram_pages() / 2;
130680d794bSakpm@linux-foundation.org }
131680d794bSakpm@linux-foundation.org 
132680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
133680d794bSakpm@linux-foundation.org {
134ca79b0c2SArun KS 	unsigned long nr_pages = totalram_pages();
135ca79b0c2SArun KS 
136ca79b0c2SArun KS 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
137680d794bSakpm@linux-foundation.org }
138b76db735SAndrew Morton #endif
139680d794bSakpm@linux-foundation.org 
140da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
141da08e9b7SMatthew Wilcox (Oracle) 			     struct folio **foliop, enum sgp_type sgp,
142c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
143c5bf121eSVineeth Remanan Pillai 			     vm_fault_t *fault_type);
1441da177e4SLinus Torvalds 
1451da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1461da177e4SLinus Torvalds {
1471da177e4SLinus Torvalds 	return sb->s_fs_info;
1481da177e4SLinus Torvalds }
1491da177e4SLinus Torvalds 
1501da177e4SLinus Torvalds /*
1511da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1521da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1531da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1541da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1551da177e4SLinus Torvalds  */
1561da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1571da177e4SLinus Torvalds {
1580b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
159191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1601da177e4SLinus Torvalds }
1611da177e4SLinus Torvalds 
1621da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1631da177e4SLinus Torvalds {
1640b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1651da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1661da177e4SLinus Torvalds }
1671da177e4SLinus Torvalds 
16877142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
16977142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
17077142517SKonstantin Khlebnikov {
17177142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
17277142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
17377142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
17477142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
17577142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
17677142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
17777142517SKonstantin Khlebnikov 	}
17877142517SKonstantin Khlebnikov 	return 0;
17977142517SKonstantin Khlebnikov }
18077142517SKonstantin Khlebnikov 
1811da177e4SLinus Torvalds /*
1821da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
18375edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
184923e2f0eSMatthew Wilcox (Oracle)  * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1851da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1861da177e4SLinus Torvalds  */
187800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
1881da177e4SLinus Torvalds {
189800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
190800d8c63SKirill A. Shutemov 		return 0;
191800d8c63SKirill A. Shutemov 
192800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
193800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
1941da177e4SLinus Torvalds }
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
1971da177e4SLinus Torvalds {
1980b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
19909cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
2001da177e4SLinus Torvalds }
2011da177e4SLinus Torvalds 
2020f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
2030f079694SMike Rapoport {
2040f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2050f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2060f079694SMike Rapoport 
2070f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
2080f079694SMike Rapoport 		return false;
2090f079694SMike Rapoport 
2100f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2110f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2120f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2130f079694SMike Rapoport 			goto unacct;
2140f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2150f079694SMike Rapoport 	}
2160f079694SMike Rapoport 
2170f079694SMike Rapoport 	return true;
2180f079694SMike Rapoport 
2190f079694SMike Rapoport unacct:
2200f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2210f079694SMike Rapoport 	return false;
2220f079694SMike Rapoport }
2230f079694SMike Rapoport 
2240f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2250f079694SMike Rapoport {
2260f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2270f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2280f079694SMike Rapoport 
2290f079694SMike Rapoport 	if (sbinfo->max_blocks)
2300f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2310f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2320f079694SMike Rapoport }
2330f079694SMike Rapoport 
234759b9775SHugh Dickins static const struct super_operations shmem_ops;
23530e6a51dSHui Su const struct address_space_operations shmem_aops;
23615ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
23792e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
23892e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
23992e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
240f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
241d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops;
242779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2431da177e4SLinus Torvalds 
244d09e8ca6SPasha Tatashin bool vma_is_anon_shmem(struct vm_area_struct *vma)
245d09e8ca6SPasha Tatashin {
246d09e8ca6SPasha Tatashin 	return vma->vm_ops == &shmem_anon_vm_ops;
247d09e8ca6SPasha Tatashin }
248d09e8ca6SPasha Tatashin 
249b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
250b0506e48SMike Rapoport {
251d09e8ca6SPasha Tatashin 	return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
252b0506e48SMike Rapoport }
253b0506e48SMike Rapoport 
2541da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
255cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2561da177e4SLinus Torvalds 
257e809d5f0SChris Down /*
258e809d5f0SChris Down  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
259e809d5f0SChris Down  * produces a novel ino for the newly allocated inode.
260e809d5f0SChris Down  *
261e809d5f0SChris Down  * It may also be called when making a hard link to permit the space needed by
262e809d5f0SChris Down  * each dentry. However, in that case, no new inode number is needed since that
263e809d5f0SChris Down  * internally draws from another pool of inode numbers (currently global
264e809d5f0SChris Down  * get_next_ino()). This case is indicated by passing NULL as inop.
265e809d5f0SChris Down  */
266e809d5f0SChris Down #define SHMEM_INO_BATCH 1024
267e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
2685b04c689SPavel Emelyanov {
2695b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
270e809d5f0SChris Down 	ino_t ino;
271e809d5f0SChris Down 
272e809d5f0SChris Down 	if (!(sb->s_flags & SB_KERNMOUNT)) {
273bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
274bb3e96d6SByron Stanoszek 		if (sbinfo->max_inodes) {
2755b04c689SPavel Emelyanov 			if (!sbinfo->free_inodes) {
276bf11b9a8SSebastian Andrzej Siewior 				raw_spin_unlock(&sbinfo->stat_lock);
2775b04c689SPavel Emelyanov 				return -ENOSPC;
2785b04c689SPavel Emelyanov 			}
2795b04c689SPavel Emelyanov 			sbinfo->free_inodes--;
280bb3e96d6SByron Stanoszek 		}
281e809d5f0SChris Down 		if (inop) {
282e809d5f0SChris Down 			ino = sbinfo->next_ino++;
283e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
284e809d5f0SChris Down 				ino = sbinfo->next_ino++;
285ea3271f7SChris Down 			if (unlikely(!sbinfo->full_inums &&
286ea3271f7SChris Down 				     ino > UINT_MAX)) {
287e809d5f0SChris Down 				/*
288e809d5f0SChris Down 				 * Emulate get_next_ino uint wraparound for
289e809d5f0SChris Down 				 * compatibility
290e809d5f0SChris Down 				 */
291ea3271f7SChris Down 				if (IS_ENABLED(CONFIG_64BIT))
292ea3271f7SChris Down 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
293ea3271f7SChris Down 						__func__, MINOR(sb->s_dev));
294ea3271f7SChris Down 				sbinfo->next_ino = 1;
295ea3271f7SChris Down 				ino = sbinfo->next_ino++;
2965b04c689SPavel Emelyanov 			}
297e809d5f0SChris Down 			*inop = ino;
298e809d5f0SChris Down 		}
299bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
300e809d5f0SChris Down 	} else if (inop) {
301e809d5f0SChris Down 		/*
302e809d5f0SChris Down 		 * __shmem_file_setup, one of our callers, is lock-free: it
303e809d5f0SChris Down 		 * doesn't hold stat_lock in shmem_reserve_inode since
304e809d5f0SChris Down 		 * max_inodes is always 0, and is called from potentially
305e809d5f0SChris Down 		 * unknown contexts. As such, use a per-cpu batched allocator
306e809d5f0SChris Down 		 * which doesn't require the per-sb stat_lock unless we are at
307e809d5f0SChris Down 		 * the batch boundary.
308ea3271f7SChris Down 		 *
309ea3271f7SChris Down 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
310ea3271f7SChris Down 		 * shmem mounts are not exposed to userspace, so we don't need
311ea3271f7SChris Down 		 * to worry about things like glibc compatibility.
312e809d5f0SChris Down 		 */
313e809d5f0SChris Down 		ino_t *next_ino;
314bf11b9a8SSebastian Andrzej Siewior 
315e809d5f0SChris Down 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
316e809d5f0SChris Down 		ino = *next_ino;
317e809d5f0SChris Down 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
318bf11b9a8SSebastian Andrzej Siewior 			raw_spin_lock(&sbinfo->stat_lock);
319e809d5f0SChris Down 			ino = sbinfo->next_ino;
320e809d5f0SChris Down 			sbinfo->next_ino += SHMEM_INO_BATCH;
321bf11b9a8SSebastian Andrzej Siewior 			raw_spin_unlock(&sbinfo->stat_lock);
322e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
323e809d5f0SChris Down 				ino++;
324e809d5f0SChris Down 		}
325e809d5f0SChris Down 		*inop = ino;
326e809d5f0SChris Down 		*next_ino = ++ino;
327e809d5f0SChris Down 		put_cpu();
328e809d5f0SChris Down 	}
329e809d5f0SChris Down 
3305b04c689SPavel Emelyanov 	return 0;
3315b04c689SPavel Emelyanov }
3325b04c689SPavel Emelyanov 
3335b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
3345b04c689SPavel Emelyanov {
3355b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3365b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
337bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
3385b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
339bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
3405b04c689SPavel Emelyanov 	}
3415b04c689SPavel Emelyanov }
3425b04c689SPavel Emelyanov 
34346711810SRandy Dunlap /**
34441ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
3451da177e4SLinus Torvalds  * @inode: inode to recalc
3461da177e4SLinus Torvalds  *
3471da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
3481da177e4SLinus Torvalds  * undirtied hole pages behind our back.
3491da177e4SLinus Torvalds  *
3501da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
3511da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
3521da177e4SLinus Torvalds  *
3531da177e4SLinus Torvalds  * It has to be called with the spinlock held.
3541da177e4SLinus Torvalds  */
3551da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
3561da177e4SLinus Torvalds {
3571da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
3581da177e4SLinus Torvalds 	long freed;
3591da177e4SLinus Torvalds 
3601da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
3611da177e4SLinus Torvalds 	if (freed > 0) {
3621da177e4SLinus Torvalds 		info->alloced -= freed;
36354af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
3640f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
3651da177e4SLinus Torvalds 	}
3661da177e4SLinus Torvalds }
3671da177e4SLinus Torvalds 
368800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
369800d8c63SKirill A. Shutemov {
370800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3714595ef88SKirill A. Shutemov 	unsigned long flags;
372800d8c63SKirill A. Shutemov 
3730f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
374800d8c63SKirill A. Shutemov 		return false;
375b1cc94abSMike Rapoport 
376aaa52e34SHugh Dickins 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
377aaa52e34SHugh Dickins 	inode->i_mapping->nrpages += pages;
378aaa52e34SHugh Dickins 
3794595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
380800d8c63SKirill A. Shutemov 	info->alloced += pages;
381800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
382800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3834595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
384800d8c63SKirill A. Shutemov 
385800d8c63SKirill A. Shutemov 	return true;
386800d8c63SKirill A. Shutemov }
387800d8c63SKirill A. Shutemov 
388800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
389800d8c63SKirill A. Shutemov {
390800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3914595ef88SKirill A. Shutemov 	unsigned long flags;
392800d8c63SKirill A. Shutemov 
3936ffcd825SMatthew Wilcox (Oracle) 	/* nrpages adjustment done by __filemap_remove_folio() or caller */
394aaa52e34SHugh Dickins 
3954595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
396800d8c63SKirill A. Shutemov 	info->alloced -= pages;
397800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
398800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3994595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
400800d8c63SKirill A. Shutemov 
4010f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
402800d8c63SKirill A. Shutemov }
403800d8c63SKirill A. Shutemov 
4047a5d0fbbSHugh Dickins /*
40562f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
4067a5d0fbbSHugh Dickins  */
40762f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
4087a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
4097a5d0fbbSHugh Dickins {
41062f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
4116dbaf22cSJohannes Weiner 	void *item;
4127a5d0fbbSHugh Dickins 
4137a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
4146dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
41562f945b6SMatthew Wilcox 	item = xas_load(&xas);
4167a5d0fbbSHugh Dickins 	if (item != expected)
4177a5d0fbbSHugh Dickins 		return -ENOENT;
41862f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
4197a5d0fbbSHugh Dickins 	return 0;
4207a5d0fbbSHugh Dickins }
4217a5d0fbbSHugh Dickins 
4227a5d0fbbSHugh Dickins /*
423d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
424d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
425d1899228SHugh Dickins  *
426d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
427d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
428d1899228SHugh Dickins  */
429d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
430d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
431d1899228SHugh Dickins {
432a12831bfSMatthew Wilcox 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
433d1899228SHugh Dickins }
434d1899228SHugh Dickins 
435d1899228SHugh Dickins /*
4365a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
4375a6e75f8SKirill A. Shutemov  *
4385a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
4395a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
4405a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
4415a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
4425a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
4435a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
4445a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
4455a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
4465a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
4475a6e75f8SKirill A. Shutemov  */
4485a6e75f8SKirill A. Shutemov 
4495a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
4505a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
4515a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
4525a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
4535a6e75f8SKirill A. Shutemov 
4545a6e75f8SKirill A. Shutemov /*
4555a6e75f8SKirill A. Shutemov  * Special values.
4565a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
4575a6e75f8SKirill A. Shutemov  *
4585a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
4595a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
4605a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
4615a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
4625a6e75f8SKirill A. Shutemov  *
4635a6e75f8SKirill A. Shutemov  */
4645a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
4655a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
4665a6e75f8SKirill A. Shutemov 
467396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4685a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
4695a6e75f8SKirill A. Shutemov 
4705e6e5a12SHugh Dickins static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
4715a6e75f8SKirill A. Shutemov 
4722cf13384SDavid Stevens bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
4732cf13384SDavid Stevens 		   struct mm_struct *mm, unsigned long vm_flags)
474c852023eSHugh Dickins {
475c852023eSHugh Dickins 	loff_t i_size;
476c852023eSHugh Dickins 
477f7cd16a5SXavier Roche 	if (!S_ISREG(inode->i_mode))
478f7cd16a5SXavier Roche 		return false;
4792cf13384SDavid Stevens 	if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
480c852023eSHugh Dickins 		return false;
4817c6c6cc4SZach O'Keefe 	if (shmem_huge == SHMEM_HUGE_DENY)
4827c6c6cc4SZach O'Keefe 		return false;
4833de0c269SZach O'Keefe 	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
4843de0c269SZach O'Keefe 		return true;
4855e6e5a12SHugh Dickins 
4865e6e5a12SHugh Dickins 	switch (SHMEM_SB(inode->i_sb)->huge) {
487c852023eSHugh Dickins 	case SHMEM_HUGE_ALWAYS:
488c852023eSHugh Dickins 		return true;
489c852023eSHugh Dickins 	case SHMEM_HUGE_WITHIN_SIZE:
490de6ee659SLiu Yuntao 		index = round_up(index + 1, HPAGE_PMD_NR);
491c852023eSHugh Dickins 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
492de6ee659SLiu Yuntao 		if (i_size >> PAGE_SHIFT >= index)
493c852023eSHugh Dickins 			return true;
494c852023eSHugh Dickins 		fallthrough;
495c852023eSHugh Dickins 	case SHMEM_HUGE_ADVISE:
4962cf13384SDavid Stevens 		if (mm && (vm_flags & VM_HUGEPAGE))
4975e6e5a12SHugh Dickins 			return true;
4985e6e5a12SHugh Dickins 		fallthrough;
499c852023eSHugh Dickins 	default:
500c852023eSHugh Dickins 		return false;
501c852023eSHugh Dickins 	}
502c852023eSHugh Dickins }
5035a6e75f8SKirill A. Shutemov 
504e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS)
5055a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
5065a6e75f8SKirill A. Shutemov {
5075a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
5085a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
5095a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
5105a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
5115a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
5125a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
5135a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
5145a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
5155a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
5165a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
5175a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
5185a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
5195a6e75f8SKirill A. Shutemov 	return -EINVAL;
5205a6e75f8SKirill A. Shutemov }
521e5f2249aSArnd Bergmann #endif
5225a6e75f8SKirill A. Shutemov 
523e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
5245a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
5255a6e75f8SKirill A. Shutemov {
5265a6e75f8SKirill A. Shutemov 	switch (huge) {
5275a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
5285a6e75f8SKirill A. Shutemov 		return "never";
5295a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
5305a6e75f8SKirill A. Shutemov 		return "always";
5315a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
5325a6e75f8SKirill A. Shutemov 		return "within_size";
5335a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
5345a6e75f8SKirill A. Shutemov 		return "advise";
5355a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
5365a6e75f8SKirill A. Shutemov 		return "deny";
5375a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
5385a6e75f8SKirill A. Shutemov 		return "force";
5395a6e75f8SKirill A. Shutemov 	default:
5405a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
5415a6e75f8SKirill A. Shutemov 		return "bad_val";
5425a6e75f8SKirill A. Shutemov 	}
5435a6e75f8SKirill A. Shutemov }
544f1f5929cSJérémy Lefaure #endif
5455a6e75f8SKirill A. Shutemov 
546779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
547779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
548779750d2SKirill A. Shutemov {
549779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
550253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
551779750d2SKirill A. Shutemov 	struct inode *inode;
552779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
55305624571SMatthew Wilcox (Oracle) 	struct folio *folio;
554779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
55562c9827cSGang Li 	int split = 0;
556779750d2SKirill A. Shutemov 
557779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
558779750d2SKirill A. Shutemov 		return SHRINK_STOP;
559779750d2SKirill A. Shutemov 
560779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
561779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
562779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
563779750d2SKirill A. Shutemov 
564779750d2SKirill A. Shutemov 		/* pin the inode */
565779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
566779750d2SKirill A. Shutemov 
567779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
568779750d2SKirill A. Shutemov 		if (!inode) {
569779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
570779750d2SKirill A. Shutemov 			goto next;
571779750d2SKirill A. Shutemov 		}
572779750d2SKirill A. Shutemov 
573779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
574779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
575779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
576253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
577779750d2SKirill A. Shutemov 			goto next;
578779750d2SKirill A. Shutemov 		}
579779750d2SKirill A. Shutemov 
580779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
581779750d2SKirill A. Shutemov next:
58262c9827cSGang Li 		sbinfo->shrinklist_len--;
583779750d2SKirill A. Shutemov 		if (!--batch)
584779750d2SKirill A. Shutemov 			break;
585779750d2SKirill A. Shutemov 	}
586779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
587779750d2SKirill A. Shutemov 
588253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
589253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
590253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
591253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
592253fd0f0SKirill A. Shutemov 		iput(inode);
593253fd0f0SKirill A. Shutemov 	}
594253fd0f0SKirill A. Shutemov 
595779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
596779750d2SKirill A. Shutemov 		int ret;
59705624571SMatthew Wilcox (Oracle) 		pgoff_t index;
598779750d2SKirill A. Shutemov 
599779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
600779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
601779750d2SKirill A. Shutemov 
602b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
60362c9827cSGang Li 			goto move_back;
604779750d2SKirill A. Shutemov 
60505624571SMatthew Wilcox (Oracle) 		index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
60605624571SMatthew Wilcox (Oracle) 		folio = filemap_get_folio(inode->i_mapping, index);
60766dabbb6SChristoph Hellwig 		if (IS_ERR(folio))
608779750d2SKirill A. Shutemov 			goto drop;
609779750d2SKirill A. Shutemov 
610b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
61105624571SMatthew Wilcox (Oracle) 		if (!folio_test_large(folio)) {
61205624571SMatthew Wilcox (Oracle) 			folio_put(folio);
613779750d2SKirill A. Shutemov 			goto drop;
614779750d2SKirill A. Shutemov 		}
615779750d2SKirill A. Shutemov 
616b3cd54b2SKirill A. Shutemov 		/*
61762c9827cSGang Li 		 * Move the inode on the list back to shrinklist if we failed
61862c9827cSGang Li 		 * to lock the page at this time.
619b3cd54b2SKirill A. Shutemov 		 *
620b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
621b3cd54b2SKirill A. Shutemov 		 * reclaim path.
622b3cd54b2SKirill A. Shutemov 		 */
62305624571SMatthew Wilcox (Oracle) 		if (!folio_trylock(folio)) {
62405624571SMatthew Wilcox (Oracle) 			folio_put(folio);
62562c9827cSGang Li 			goto move_back;
626b3cd54b2SKirill A. Shutemov 		}
627b3cd54b2SKirill A. Shutemov 
628d788f5b3SMatthew Wilcox (Oracle) 		ret = split_folio(folio);
62905624571SMatthew Wilcox (Oracle) 		folio_unlock(folio);
63005624571SMatthew Wilcox (Oracle) 		folio_put(folio);
631779750d2SKirill A. Shutemov 
63262c9827cSGang Li 		/* If split failed move the inode on the list back to shrinklist */
633b3cd54b2SKirill A. Shutemov 		if (ret)
63462c9827cSGang Li 			goto move_back;
635779750d2SKirill A. Shutemov 
636779750d2SKirill A. Shutemov 		split++;
637779750d2SKirill A. Shutemov drop:
638779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
63962c9827cSGang Li 		goto put;
64062c9827cSGang Li move_back:
64162c9827cSGang Li 		/*
64262c9827cSGang Li 		 * Make sure the inode is either on the global list or deleted
64362c9827cSGang Li 		 * from any local list before iput() since it could be deleted
64462c9827cSGang Li 		 * in another thread once we put the inode (then the local list
64562c9827cSGang Li 		 * is corrupted).
64662c9827cSGang Li 		 */
64762c9827cSGang Li 		spin_lock(&sbinfo->shrinklist_lock);
64862c9827cSGang Li 		list_move(&info->shrinklist, &sbinfo->shrinklist);
64962c9827cSGang Li 		sbinfo->shrinklist_len++;
65062c9827cSGang Li 		spin_unlock(&sbinfo->shrinklist_lock);
65162c9827cSGang Li put:
652779750d2SKirill A. Shutemov 		iput(inode);
653779750d2SKirill A. Shutemov 	}
654779750d2SKirill A. Shutemov 
655779750d2SKirill A. Shutemov 	return split;
656779750d2SKirill A. Shutemov }
657779750d2SKirill A. Shutemov 
658779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
659779750d2SKirill A. Shutemov 		struct shrink_control *sc)
660779750d2SKirill A. Shutemov {
661779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
662779750d2SKirill A. Shutemov 
663779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
664779750d2SKirill A. Shutemov 		return SHRINK_STOP;
665779750d2SKirill A. Shutemov 
666779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
667779750d2SKirill A. Shutemov }
668779750d2SKirill A. Shutemov 
669779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
670779750d2SKirill A. Shutemov 		struct shrink_control *sc)
671779750d2SKirill A. Shutemov {
672779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
673779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
674779750d2SKirill A. Shutemov }
675396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
6765a6e75f8SKirill A. Shutemov 
6775a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
6785a6e75f8SKirill A. Shutemov 
6792cf13384SDavid Stevens bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
6802cf13384SDavid Stevens 		   struct mm_struct *mm, unsigned long vm_flags)
6815e6e5a12SHugh Dickins {
6825e6e5a12SHugh Dickins 	return false;
6835e6e5a12SHugh Dickins }
6845e6e5a12SHugh Dickins 
685779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
686779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
687779750d2SKirill A. Shutemov {
688779750d2SKirill A. Shutemov 	return 0;
689779750d2SKirill A. Shutemov }
690396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6915a6e75f8SKirill A. Shutemov 
6925a6e75f8SKirill A. Shutemov /*
6932bb876b5SMatthew Wilcox (Oracle)  * Like filemap_add_folio, but error if expected item has gone.
69446f65ec1SHugh Dickins  */
695b7dd44a1SMatthew Wilcox (Oracle) static int shmem_add_to_page_cache(struct folio *folio,
69646f65ec1SHugh Dickins 				   struct address_space *mapping,
6973fea5a49SJohannes Weiner 				   pgoff_t index, void *expected, gfp_t gfp,
6983fea5a49SJohannes Weiner 				   struct mm_struct *charge_mm)
69946f65ec1SHugh Dickins {
700b7dd44a1SMatthew Wilcox (Oracle) 	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
701b7dd44a1SMatthew Wilcox (Oracle) 	long nr = folio_nr_pages(folio);
7023fea5a49SJohannes Weiner 	int error;
70346f65ec1SHugh Dickins 
704b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
705b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
706b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
707b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON(expected && folio_test_large(folio));
70846f65ec1SHugh Dickins 
709b7dd44a1SMatthew Wilcox (Oracle) 	folio_ref_add(folio, nr);
710b7dd44a1SMatthew Wilcox (Oracle) 	folio->mapping = mapping;
711b7dd44a1SMatthew Wilcox (Oracle) 	folio->index = index;
71246f65ec1SHugh Dickins 
713b7dd44a1SMatthew Wilcox (Oracle) 	if (!folio_test_swapcache(folio)) {
714b7dd44a1SMatthew Wilcox (Oracle) 		error = mem_cgroup_charge(folio, charge_mm, gfp);
7153fea5a49SJohannes Weiner 		if (error) {
716b7dd44a1SMatthew Wilcox (Oracle) 			if (folio_test_pmd_mappable(folio)) {
7173fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK);
7183fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
7193fea5a49SJohannes Weiner 			}
7203fea5a49SJohannes Weiner 			goto error;
7213fea5a49SJohannes Weiner 		}
7224c6355b2SJohannes Weiner 	}
723b7dd44a1SMatthew Wilcox (Oracle) 	folio_throttle_swaprate(folio, gfp);
7243fea5a49SJohannes Weiner 
725552446a4SMatthew Wilcox 	do {
726552446a4SMatthew Wilcox 		xas_lock_irq(&xas);
7276b24ca4aSMatthew Wilcox (Oracle) 		if (expected != xas_find_conflict(&xas)) {
728552446a4SMatthew Wilcox 			xas_set_err(&xas, -EEXIST);
7296b24ca4aSMatthew Wilcox (Oracle) 			goto unlock;
7306b24ca4aSMatthew Wilcox (Oracle) 		}
7316b24ca4aSMatthew Wilcox (Oracle) 		if (expected && xas_find_conflict(&xas)) {
7326b24ca4aSMatthew Wilcox (Oracle) 			xas_set_err(&xas, -EEXIST);
7336b24ca4aSMatthew Wilcox (Oracle) 			goto unlock;
7346b24ca4aSMatthew Wilcox (Oracle) 		}
735b7dd44a1SMatthew Wilcox (Oracle) 		xas_store(&xas, folio);
736552446a4SMatthew Wilcox 		if (xas_error(&xas))
737552446a4SMatthew Wilcox 			goto unlock;
738b7dd44a1SMatthew Wilcox (Oracle) 		if (folio_test_pmd_mappable(folio)) {
739800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
740b7dd44a1SMatthew Wilcox (Oracle) 			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
741552446a4SMatthew Wilcox 		}
742552446a4SMatthew Wilcox 		mapping->nrpages += nr;
743b7dd44a1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
744b7dd44a1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
745552446a4SMatthew Wilcox unlock:
746552446a4SMatthew Wilcox 		xas_unlock_irq(&xas);
747552446a4SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
748552446a4SMatthew Wilcox 
749552446a4SMatthew Wilcox 	if (xas_error(&xas)) {
7503fea5a49SJohannes Weiner 		error = xas_error(&xas);
7513fea5a49SJohannes Weiner 		goto error;
75246f65ec1SHugh Dickins 	}
753552446a4SMatthew Wilcox 
754552446a4SMatthew Wilcox 	return 0;
7553fea5a49SJohannes Weiner error:
756b7dd44a1SMatthew Wilcox (Oracle) 	folio->mapping = NULL;
757b7dd44a1SMatthew Wilcox (Oracle) 	folio_ref_sub(folio, nr);
7583fea5a49SJohannes Weiner 	return error;
75946f65ec1SHugh Dickins }
76046f65ec1SHugh Dickins 
76146f65ec1SHugh Dickins /*
7624cd400fdSMatthew Wilcox (Oracle)  * Like delete_from_page_cache, but substitutes swap for @folio.
7636922c0c7SHugh Dickins  */
7644cd400fdSMatthew Wilcox (Oracle) static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
7656922c0c7SHugh Dickins {
7664cd400fdSMatthew Wilcox (Oracle) 	struct address_space *mapping = folio->mapping;
7674cd400fdSMatthew Wilcox (Oracle) 	long nr = folio_nr_pages(folio);
7686922c0c7SHugh Dickins 	int error;
7696922c0c7SHugh Dickins 
770b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
7714cd400fdSMatthew Wilcox (Oracle) 	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
7724cd400fdSMatthew Wilcox (Oracle) 	folio->mapping = NULL;
7734cd400fdSMatthew Wilcox (Oracle) 	mapping->nrpages -= nr;
7744cd400fdSMatthew Wilcox (Oracle) 	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
7754cd400fdSMatthew Wilcox (Oracle) 	__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
776b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
7774cd400fdSMatthew Wilcox (Oracle) 	folio_put(folio);
7786922c0c7SHugh Dickins 	BUG_ON(error);
7796922c0c7SHugh Dickins }
7806922c0c7SHugh Dickins 
7816922c0c7SHugh Dickins /*
782c121d3bbSMatthew Wilcox  * Remove swap entry from page cache, free the swap and its page cache.
7837a5d0fbbSHugh Dickins  */
7847a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
7857a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
7867a5d0fbbSHugh Dickins {
7876dbaf22cSJohannes Weiner 	void *old;
7887a5d0fbbSHugh Dickins 
78955f3f7eaSMatthew Wilcox 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
7906dbaf22cSJohannes Weiner 	if (old != radswap)
7916dbaf22cSJohannes Weiner 		return -ENOENT;
7927a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
7936dbaf22cSJohannes Weiner 	return 0;
7947a5d0fbbSHugh Dickins }
7957a5d0fbbSHugh Dickins 
7967a5d0fbbSHugh Dickins /*
7976a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
79848131e03SVlastimil Babka  * given offsets are swapped out.
7996a15a370SVlastimil Babka  *
8009608703eSJan Kara  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
8016a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
8026a15a370SVlastimil Babka  */
80348131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
80448131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
8056a15a370SVlastimil Babka {
8067ae3424fSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
8076a15a370SVlastimil Babka 	struct page *page;
80848131e03SVlastimil Babka 	unsigned long swapped = 0;
809e5548f85SHugh Dickins 	unsigned long max = end - 1;
8106a15a370SVlastimil Babka 
8116a15a370SVlastimil Babka 	rcu_read_lock();
812e5548f85SHugh Dickins 	xas_for_each(&xas, page, max) {
8137ae3424fSMatthew Wilcox 		if (xas_retry(&xas, page))
8142cf938aaSMatthew Wilcox 			continue;
8153159f943SMatthew Wilcox 		if (xa_is_value(page))
8166a15a370SVlastimil Babka 			swapped++;
817e5548f85SHugh Dickins 		if (xas.xa_index == max)
818e5548f85SHugh Dickins 			break;
8196a15a370SVlastimil Babka 		if (need_resched()) {
8207ae3424fSMatthew Wilcox 			xas_pause(&xas);
8216a15a370SVlastimil Babka 			cond_resched_rcu();
8226a15a370SVlastimil Babka 		}
8236a15a370SVlastimil Babka 	}
8246a15a370SVlastimil Babka 
8256a15a370SVlastimil Babka 	rcu_read_unlock();
8266a15a370SVlastimil Babka 
8276a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
8286a15a370SVlastimil Babka }
8296a15a370SVlastimil Babka 
8306a15a370SVlastimil Babka /*
83148131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
83248131e03SVlastimil Babka  * given vma is swapped out.
83348131e03SVlastimil Babka  *
8349608703eSJan Kara  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
83548131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
83648131e03SVlastimil Babka  */
83748131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
83848131e03SVlastimil Babka {
83948131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
84048131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
84148131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
84248131e03SVlastimil Babka 	unsigned long swapped;
84348131e03SVlastimil Babka 
84448131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
84548131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
84648131e03SVlastimil Babka 
84748131e03SVlastimil Babka 	/*
84848131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
84948131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
85048131e03SVlastimil Babka 	 * already track.
85148131e03SVlastimil Babka 	 */
85248131e03SVlastimil Babka 	if (!swapped)
85348131e03SVlastimil Babka 		return 0;
85448131e03SVlastimil Babka 
85548131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
85648131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
85748131e03SVlastimil Babka 
85848131e03SVlastimil Babka 	/* Here comes the more involved part */
85902399c88SPeter Xu 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
86002399c88SPeter Xu 					vma->vm_pgoff + vma_pages(vma));
86148131e03SVlastimil Babka }
86248131e03SVlastimil Babka 
86348131e03SVlastimil Babka /*
86424513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
86524513264SHugh Dickins  */
86624513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
86724513264SHugh Dickins {
868105c988fSMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
86924513264SHugh Dickins 	pgoff_t index = 0;
87024513264SHugh Dickins 
871105c988fSMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
87224513264SHugh Dickins 	/*
87324513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
87424513264SHugh Dickins 	 */
875105c988fSMatthew Wilcox (Oracle) 	while (!mapping_unevictable(mapping) &&
876105c988fSMatthew Wilcox (Oracle) 	       filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
877105c988fSMatthew Wilcox (Oracle) 		check_move_unevictable_folios(&fbatch);
878105c988fSMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
87924513264SHugh Dickins 		cond_resched();
88024513264SHugh Dickins 	}
8817a5d0fbbSHugh Dickins }
8827a5d0fbbSHugh Dickins 
883b9a8a419SMatthew Wilcox (Oracle) static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
88471725ed1SHugh Dickins {
885b9a8a419SMatthew Wilcox (Oracle) 	struct folio *folio;
88671725ed1SHugh Dickins 
887b9a8a419SMatthew Wilcox (Oracle) 	/*
888a7f5862cSMatthew Wilcox (Oracle) 	 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
88981914affSHugh Dickins 	 * beyond i_size, and reports fallocated folios as holes.
890b9a8a419SMatthew Wilcox (Oracle) 	 */
89181914affSHugh Dickins 	folio = filemap_get_entry(inode->i_mapping, index);
89281914affSHugh Dickins 	if (!folio)
893b9a8a419SMatthew Wilcox (Oracle) 		return folio;
89481914affSHugh Dickins 	if (!xa_is_value(folio)) {
89581914affSHugh Dickins 		folio_lock(folio);
89681914affSHugh Dickins 		if (folio->mapping == inode->i_mapping)
89781914affSHugh Dickins 			return folio;
89881914affSHugh Dickins 		/* The folio has been swapped out */
89981914affSHugh Dickins 		folio_unlock(folio);
90081914affSHugh Dickins 		folio_put(folio);
90181914affSHugh Dickins 	}
902b9a8a419SMatthew Wilcox (Oracle) 	/*
90381914affSHugh Dickins 	 * But read a folio back from swap if any of it is within i_size
904b9a8a419SMatthew Wilcox (Oracle) 	 * (although in some cases this is just a waste of time).
905b9a8a419SMatthew Wilcox (Oracle) 	 */
906a7f5862cSMatthew Wilcox (Oracle) 	folio = NULL;
907a7f5862cSMatthew Wilcox (Oracle) 	shmem_get_folio(inode, index, &folio, SGP_READ);
908a7f5862cSMatthew Wilcox (Oracle) 	return folio;
90971725ed1SHugh Dickins }
91071725ed1SHugh Dickins 
91171725ed1SHugh Dickins /*
9127f4446eeSMatthew Wilcox  * Remove range of pages and swap entries from page cache, and free them.
9131635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
9147a5d0fbbSHugh Dickins  */
9151635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
9161635f6a7SHugh Dickins 								 bool unfalloc)
9171da177e4SLinus Torvalds {
918285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
9191da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
92009cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
92109cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
9220e499ed3SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
9237a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
924b9a8a419SMatthew Wilcox (Oracle) 	struct folio *folio;
925b9a8a419SMatthew Wilcox (Oracle) 	bool same_folio;
9267a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
927285b2c4fSHugh Dickins 	pgoff_t index;
928bda97eabSHugh Dickins 	int i;
9291da177e4SLinus Torvalds 
93083e4fa9cSHugh Dickins 	if (lend == -1)
93183e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
932bda97eabSHugh Dickins 
933d144bf62SHugh Dickins 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
934d144bf62SHugh Dickins 		info->fallocend = start;
935d144bf62SHugh Dickins 
93651dcbdacSMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
937bda97eabSHugh Dickins 	index = start;
9383392ca12SVishal Moola (Oracle) 	while (index < end && find_lock_entries(mapping, &index, end - 1,
93951dcbdacSMatthew Wilcox (Oracle) 			&fbatch, indices)) {
94051dcbdacSMatthew Wilcox (Oracle) 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
941b9a8a419SMatthew Wilcox (Oracle) 			folio = fbatch.folios[i];
942bda97eabSHugh Dickins 
9437b774aabSMatthew Wilcox (Oracle) 			if (xa_is_value(folio)) {
9441635f6a7SHugh Dickins 				if (unfalloc)
9451635f6a7SHugh Dickins 					continue;
9467a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
9473392ca12SVishal Moola (Oracle) 							indices[i], folio);
9487a5d0fbbSHugh Dickins 				continue;
9497a5d0fbbSHugh Dickins 			}
9507a5d0fbbSHugh Dickins 
9517b774aabSMatthew Wilcox (Oracle) 			if (!unfalloc || !folio_test_uptodate(folio))
9521e84a3d9SMatthew Wilcox (Oracle) 				truncate_inode_folio(mapping, folio);
9537b774aabSMatthew Wilcox (Oracle) 			folio_unlock(folio);
954bda97eabSHugh Dickins 		}
95551dcbdacSMatthew Wilcox (Oracle) 		folio_batch_remove_exceptionals(&fbatch);
95651dcbdacSMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
957bda97eabSHugh Dickins 		cond_resched();
958bda97eabSHugh Dickins 	}
959bda97eabSHugh Dickins 
96044bcabd7SHugh Dickins 	/*
96144bcabd7SHugh Dickins 	 * When undoing a failed fallocate, we want none of the partial folio
96244bcabd7SHugh Dickins 	 * zeroing and splitting below, but shall want to truncate the whole
96344bcabd7SHugh Dickins 	 * folio when !uptodate indicates that it was added by this fallocate,
96444bcabd7SHugh Dickins 	 * even when [lstart, lend] covers only a part of the folio.
96544bcabd7SHugh Dickins 	 */
96644bcabd7SHugh Dickins 	if (unfalloc)
96744bcabd7SHugh Dickins 		goto whole_folios;
96844bcabd7SHugh Dickins 
969b9a8a419SMatthew Wilcox (Oracle) 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
970b9a8a419SMatthew Wilcox (Oracle) 	folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
971b9a8a419SMatthew Wilcox (Oracle) 	if (folio) {
972b9a8a419SMatthew Wilcox (Oracle) 		same_folio = lend < folio_pos(folio) + folio_size(folio);
973b9a8a419SMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
974b9a8a419SMatthew Wilcox (Oracle) 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
97587b11f86SSidhartha Kumar 			start = folio_next_index(folio);
976b9a8a419SMatthew Wilcox (Oracle) 			if (same_folio)
977b9a8a419SMatthew Wilcox (Oracle) 				end = folio->index;
97883e4fa9cSHugh Dickins 		}
979b9a8a419SMatthew Wilcox (Oracle) 		folio_unlock(folio);
980b9a8a419SMatthew Wilcox (Oracle) 		folio_put(folio);
981b9a8a419SMatthew Wilcox (Oracle) 		folio = NULL;
982bda97eabSHugh Dickins 	}
983b9a8a419SMatthew Wilcox (Oracle) 
984b9a8a419SMatthew Wilcox (Oracle) 	if (!same_folio)
985b9a8a419SMatthew Wilcox (Oracle) 		folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
986b9a8a419SMatthew Wilcox (Oracle) 	if (folio) {
987b9a8a419SMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
988b9a8a419SMatthew Wilcox (Oracle) 		if (!truncate_inode_partial_folio(folio, lstart, lend))
989b9a8a419SMatthew Wilcox (Oracle) 			end = folio->index;
990b9a8a419SMatthew Wilcox (Oracle) 		folio_unlock(folio);
991b9a8a419SMatthew Wilcox (Oracle) 		folio_put(folio);
992bda97eabSHugh Dickins 	}
993bda97eabSHugh Dickins 
99444bcabd7SHugh Dickins whole_folios:
99544bcabd7SHugh Dickins 
996bda97eabSHugh Dickins 	index = start;
997b1a36650SHugh Dickins 	while (index < end) {
998bda97eabSHugh Dickins 		cond_resched();
9990cd6144aSJohannes Weiner 
10009fb6beeaSVishal Moola (Oracle) 		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1001cf2039afSMatthew Wilcox (Oracle) 				indices)) {
1002b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
1003b1a36650SHugh Dickins 			if (index == start || end != -1)
1004bda97eabSHugh Dickins 				break;
1005b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
1006bda97eabSHugh Dickins 			index = start;
1007bda97eabSHugh Dickins 			continue;
1008bda97eabSHugh Dickins 		}
10090e499ed3SMatthew Wilcox (Oracle) 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1010b9a8a419SMatthew Wilcox (Oracle) 			folio = fbatch.folios[i];
1011bda97eabSHugh Dickins 
10120e499ed3SMatthew Wilcox (Oracle) 			if (xa_is_value(folio)) {
10131635f6a7SHugh Dickins 				if (unfalloc)
10141635f6a7SHugh Dickins 					continue;
10159fb6beeaSVishal Moola (Oracle) 				if (shmem_free_swap(mapping, indices[i], folio)) {
1016b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
10179fb6beeaSVishal Moola (Oracle) 					index = indices[i];
1018b1a36650SHugh Dickins 					break;
1019b1a36650SHugh Dickins 				}
1020b1a36650SHugh Dickins 				nr_swaps_freed++;
10217a5d0fbbSHugh Dickins 				continue;
10227a5d0fbbSHugh Dickins 			}
10237a5d0fbbSHugh Dickins 
10240e499ed3SMatthew Wilcox (Oracle) 			folio_lock(folio);
1025800d8c63SKirill A. Shutemov 
10260e499ed3SMatthew Wilcox (Oracle) 			if (!unfalloc || !folio_test_uptodate(folio)) {
10270e499ed3SMatthew Wilcox (Oracle) 				if (folio_mapping(folio) != mapping) {
1028b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
10290e499ed3SMatthew Wilcox (Oracle) 					folio_unlock(folio);
10309fb6beeaSVishal Moola (Oracle) 					index = indices[i];
1031b1a36650SHugh Dickins 					break;
10327a5d0fbbSHugh Dickins 				}
10330e499ed3SMatthew Wilcox (Oracle) 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
10340e499ed3SMatthew Wilcox (Oracle) 						folio);
10350e499ed3SMatthew Wilcox (Oracle) 				truncate_inode_folio(mapping, folio);
103671725ed1SHugh Dickins 			}
10370e499ed3SMatthew Wilcox (Oracle) 			folio_unlock(folio);
1038bda97eabSHugh Dickins 		}
10390e499ed3SMatthew Wilcox (Oracle) 		folio_batch_remove_exceptionals(&fbatch);
10400e499ed3SMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
1041bda97eabSHugh Dickins 	}
104294c1e62dSHugh Dickins 
10434595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
10447a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
10451da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
10464595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
10471635f6a7SHugh Dickins }
10481da177e4SLinus Torvalds 
10491635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
10501635f6a7SHugh Dickins {
10511635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
1052078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
105336f05cabSJeff Layton 	inode_inc_iversion(inode);
10541da177e4SLinus Torvalds }
105594c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
10561da177e4SLinus Torvalds 
1057b74d24f7SChristian Brauner static int shmem_getattr(struct mnt_idmap *idmap,
1058549c7297SChristian Brauner 			 const struct path *path, struct kstat *stat,
1059a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
106044a30220SYu Zhao {
1061a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
106244a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
106344a30220SYu Zhao 
1064d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
10654595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
106644a30220SYu Zhao 		shmem_recalc_inode(inode);
10674595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1068d0424c42SHugh Dickins 	}
1069e408e695STheodore Ts'o 	if (info->fsflags & FS_APPEND_FL)
1070e408e695STheodore Ts'o 		stat->attributes |= STATX_ATTR_APPEND;
1071e408e695STheodore Ts'o 	if (info->fsflags & FS_IMMUTABLE_FL)
1072e408e695STheodore Ts'o 		stat->attributes |= STATX_ATTR_IMMUTABLE;
1073e408e695STheodore Ts'o 	if (info->fsflags & FS_NODUMP_FL)
1074e408e695STheodore Ts'o 		stat->attributes |= STATX_ATTR_NODUMP;
1075e408e695STheodore Ts'o 	stat->attributes_mask |= (STATX_ATTR_APPEND |
1076e408e695STheodore Ts'o 			STATX_ATTR_IMMUTABLE |
1077e408e695STheodore Ts'o 			STATX_ATTR_NODUMP);
10787a80e5b8SGiuseppe Scrivano 	generic_fillattr(idmap, inode, stat);
107989fdcd26SYang Shi 
10802cf13384SDavid Stevens 	if (shmem_is_huge(inode, 0, false, NULL, 0))
108189fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
108289fdcd26SYang Shi 
1083f7cd16a5SXavier Roche 	if (request_mask & STATX_BTIME) {
1084f7cd16a5SXavier Roche 		stat->result_mask |= STATX_BTIME;
1085f7cd16a5SXavier Roche 		stat->btime.tv_sec = info->i_crtime.tv_sec;
1086f7cd16a5SXavier Roche 		stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1087f7cd16a5SXavier Roche 	}
1088f7cd16a5SXavier Roche 
108944a30220SYu Zhao 	return 0;
109044a30220SYu Zhao }
109144a30220SYu Zhao 
1092c1632a0fSChristian Brauner static int shmem_setattr(struct mnt_idmap *idmap,
1093549c7297SChristian Brauner 			 struct dentry *dentry, struct iattr *attr)
10941da177e4SLinus Torvalds {
109575c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
109640e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
10971da177e4SLinus Torvalds 	int error;
109836f05cabSJeff Layton 	bool update_mtime = false;
109936f05cabSJeff Layton 	bool update_ctime = true;
11001da177e4SLinus Torvalds 
11017a80e5b8SGiuseppe Scrivano 	error = setattr_prepare(idmap, dentry, attr);
1102db78b877SChristoph Hellwig 	if (error)
1103db78b877SChristoph Hellwig 		return error;
1104db78b877SChristoph Hellwig 
11056fd73538SDaniel Verkamp 	if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
11066fd73538SDaniel Verkamp 		if ((inode->i_mode ^ attr->ia_mode) & 0111) {
11076fd73538SDaniel Verkamp 			return -EPERM;
11086fd73538SDaniel Verkamp 		}
11096fd73538SDaniel Verkamp 	}
11106fd73538SDaniel Verkamp 
111194c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
111294c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
111394c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
11143889e6e7Snpiggin@suse.de 
11159608703eSJan Kara 		/* protected by i_rwsem */
111640e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
111740e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
111840e041a2SDavid Herrmann 			return -EPERM;
111940e041a2SDavid Herrmann 
112094c1e62dSHugh Dickins 		if (newsize != oldsize) {
112177142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
112277142517SKonstantin Khlebnikov 					oldsize, newsize);
112377142517SKonstantin Khlebnikov 			if (error)
112477142517SKonstantin Khlebnikov 				return error;
112594c1e62dSHugh Dickins 			i_size_write(inode, newsize);
112636f05cabSJeff Layton 			update_mtime = true;
112736f05cabSJeff Layton 		} else {
112836f05cabSJeff Layton 			update_ctime = false;
112994c1e62dSHugh Dickins 		}
1130afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
113194c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1132d0424c42SHugh Dickins 			if (oldsize > holebegin)
1133d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1134d0424c42SHugh Dickins 							holebegin, 0, 1);
1135d0424c42SHugh Dickins 			if (info->alloced)
1136d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1137d0424c42SHugh Dickins 							newsize, (loff_t)-1);
113894c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1139d0424c42SHugh Dickins 			if (oldsize > holebegin)
1140d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1141d0424c42SHugh Dickins 							holebegin, 0, 1);
114294c1e62dSHugh Dickins 		}
11431da177e4SLinus Torvalds 	}
11441da177e4SLinus Torvalds 
11457a80e5b8SGiuseppe Scrivano 	setattr_copy(idmap, inode, attr);
1146db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
11477a80e5b8SGiuseppe Scrivano 		error = posix_acl_chmod(idmap, dentry, inode->i_mode);
114836f05cabSJeff Layton 	if (!error && update_ctime) {
114936f05cabSJeff Layton 		inode->i_ctime = current_time(inode);
115036f05cabSJeff Layton 		if (update_mtime)
115136f05cabSJeff Layton 			inode->i_mtime = inode->i_ctime;
115236f05cabSJeff Layton 		inode_inc_iversion(inode);
115336f05cabSJeff Layton 	}
11541da177e4SLinus Torvalds 	return error;
11551da177e4SLinus Torvalds }
11561da177e4SLinus Torvalds 
11571f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
11581da177e4SLinus Torvalds {
11591da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1160779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
11611da177e4SLinus Torvalds 
116230e6a51dSHui Su 	if (shmem_mapping(inode->i_mapping)) {
11631da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
11641da177e4SLinus Torvalds 		inode->i_size = 0;
1165bc786390SHugh Dickins 		mapping_set_exiting(inode->i_mapping);
11663889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1167779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1168779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1169779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1170779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1171779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1172779750d2SKirill A. Shutemov 			}
1173779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1174779750d2SKirill A. Shutemov 		}
1175af53d3e9SHugh Dickins 		while (!list_empty(&info->swaplist)) {
1176af53d3e9SHugh Dickins 			/* Wait while shmem_unuse() is scanning this inode... */
1177af53d3e9SHugh Dickins 			wait_var_event(&info->stop_eviction,
1178af53d3e9SHugh Dickins 				       !atomic_read(&info->stop_eviction));
1179cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
1180af53d3e9SHugh Dickins 			/* ...but beware of the race if we peeked too early */
1181af53d3e9SHugh Dickins 			if (!atomic_read(&info->stop_eviction))
11821da177e4SLinus Torvalds 				list_del_init(&info->swaplist);
1183cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
11841da177e4SLinus Torvalds 		}
11853ed47db3SAl Viro 	}
1186b09e0fa4SEric Paris 
118738f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
11880f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
11895b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1190dbd5768fSJan Kara 	clear_inode(inode);
11911da177e4SLinus Torvalds }
11921da177e4SLinus Torvalds 
1193b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1194da08e9b7SMatthew Wilcox (Oracle) 				   pgoff_t start, struct folio_batch *fbatch,
1195da08e9b7SMatthew Wilcox (Oracle) 				   pgoff_t *indices, unsigned int type)
1196478922e2SMatthew Wilcox {
1197b56a2d8aSVineeth Remanan Pillai 	XA_STATE(xas, &mapping->i_pages, start);
1198da08e9b7SMatthew Wilcox (Oracle) 	struct folio *folio;
119987039546SHugh Dickins 	swp_entry_t entry;
1200478922e2SMatthew Wilcox 
1201478922e2SMatthew Wilcox 	rcu_read_lock();
1202da08e9b7SMatthew Wilcox (Oracle) 	xas_for_each(&xas, folio, ULONG_MAX) {
1203da08e9b7SMatthew Wilcox (Oracle) 		if (xas_retry(&xas, folio))
12045b9c98f3SMike Kravetz 			continue;
1205b56a2d8aSVineeth Remanan Pillai 
1206da08e9b7SMatthew Wilcox (Oracle) 		if (!xa_is_value(folio))
1207478922e2SMatthew Wilcox 			continue;
1208b56a2d8aSVineeth Remanan Pillai 
1209da08e9b7SMatthew Wilcox (Oracle) 		entry = radix_to_swp_entry(folio);
12106cec2b95SMiaohe Lin 		/*
12116cec2b95SMiaohe Lin 		 * swapin error entries can be found in the mapping. But they're
12126cec2b95SMiaohe Lin 		 * deliberately ignored here as we've done everything we can do.
12136cec2b95SMiaohe Lin 		 */
121487039546SHugh Dickins 		if (swp_type(entry) != type)
1215b56a2d8aSVineeth Remanan Pillai 			continue;
1216b56a2d8aSVineeth Remanan Pillai 
1217e384200eSHugh Dickins 		indices[folio_batch_count(fbatch)] = xas.xa_index;
1218da08e9b7SMatthew Wilcox (Oracle) 		if (!folio_batch_add(fbatch, folio))
1219da08e9b7SMatthew Wilcox (Oracle) 			break;
1220b56a2d8aSVineeth Remanan Pillai 
1221b56a2d8aSVineeth Remanan Pillai 		if (need_resched()) {
1222e21a2955SMatthew Wilcox 			xas_pause(&xas);
1223478922e2SMatthew Wilcox 			cond_resched_rcu();
1224478922e2SMatthew Wilcox 		}
1225b56a2d8aSVineeth Remanan Pillai 	}
1226478922e2SMatthew Wilcox 	rcu_read_unlock();
1227e21a2955SMatthew Wilcox 
1228da08e9b7SMatthew Wilcox (Oracle) 	return xas.xa_index;
1229b56a2d8aSVineeth Remanan Pillai }
1230b56a2d8aSVineeth Remanan Pillai 
1231b56a2d8aSVineeth Remanan Pillai /*
1232b56a2d8aSVineeth Remanan Pillai  * Move the swapped pages for an inode to page cache. Returns the count
1233b56a2d8aSVineeth Remanan Pillai  * of pages swapped in, or the error in case of failure.
1234b56a2d8aSVineeth Remanan Pillai  */
1235da08e9b7SMatthew Wilcox (Oracle) static int shmem_unuse_swap_entries(struct inode *inode,
1236da08e9b7SMatthew Wilcox (Oracle) 		struct folio_batch *fbatch, pgoff_t *indices)
1237b56a2d8aSVineeth Remanan Pillai {
1238b56a2d8aSVineeth Remanan Pillai 	int i = 0;
1239b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
1240b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1241b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1242b56a2d8aSVineeth Remanan Pillai 
1243da08e9b7SMatthew Wilcox (Oracle) 	for (i = 0; i < folio_batch_count(fbatch); i++) {
1244da08e9b7SMatthew Wilcox (Oracle) 		struct folio *folio = fbatch->folios[i];
1245b56a2d8aSVineeth Remanan Pillai 
1246da08e9b7SMatthew Wilcox (Oracle) 		if (!xa_is_value(folio))
1247b56a2d8aSVineeth Remanan Pillai 			continue;
1248da08e9b7SMatthew Wilcox (Oracle) 		error = shmem_swapin_folio(inode, indices[i],
1249da08e9b7SMatthew Wilcox (Oracle) 					  &folio, SGP_CACHE,
1250b56a2d8aSVineeth Remanan Pillai 					  mapping_gfp_mask(mapping),
1251b56a2d8aSVineeth Remanan Pillai 					  NULL, NULL);
1252b56a2d8aSVineeth Remanan Pillai 		if (error == 0) {
1253da08e9b7SMatthew Wilcox (Oracle) 			folio_unlock(folio);
1254da08e9b7SMatthew Wilcox (Oracle) 			folio_put(folio);
1255b56a2d8aSVineeth Remanan Pillai 			ret++;
1256b56a2d8aSVineeth Remanan Pillai 		}
1257b56a2d8aSVineeth Remanan Pillai 		if (error == -ENOMEM)
1258b56a2d8aSVineeth Remanan Pillai 			break;
1259b56a2d8aSVineeth Remanan Pillai 		error = 0;
1260b56a2d8aSVineeth Remanan Pillai 	}
1261b56a2d8aSVineeth Remanan Pillai 	return error ? error : ret;
1262478922e2SMatthew Wilcox }
1263478922e2SMatthew Wilcox 
126446f65ec1SHugh Dickins /*
126546f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
126646f65ec1SHugh Dickins  */
126710a9c496SChristoph Hellwig static int shmem_unuse_inode(struct inode *inode, unsigned int type)
12681da177e4SLinus Torvalds {
1269b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1270b56a2d8aSVineeth Remanan Pillai 	pgoff_t start = 0;
1271da08e9b7SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
1272b56a2d8aSVineeth Remanan Pillai 	pgoff_t indices[PAGEVEC_SIZE];
1273b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
12741da177e4SLinus Torvalds 
1275b56a2d8aSVineeth Remanan Pillai 	do {
1276da08e9b7SMatthew Wilcox (Oracle) 		folio_batch_init(&fbatch);
1277da08e9b7SMatthew Wilcox (Oracle) 		shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1278da08e9b7SMatthew Wilcox (Oracle) 		if (folio_batch_count(&fbatch) == 0) {
1279b56a2d8aSVineeth Remanan Pillai 			ret = 0;
1280778dd893SHugh Dickins 			break;
1281b56a2d8aSVineeth Remanan Pillai 		}
1282b56a2d8aSVineeth Remanan Pillai 
1283da08e9b7SMatthew Wilcox (Oracle) 		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1284b56a2d8aSVineeth Remanan Pillai 		if (ret < 0)
1285b56a2d8aSVineeth Remanan Pillai 			break;
1286b56a2d8aSVineeth Remanan Pillai 
1287da08e9b7SMatthew Wilcox (Oracle) 		start = indices[folio_batch_count(&fbatch) - 1];
1288b56a2d8aSVineeth Remanan Pillai 	} while (true);
1289b56a2d8aSVineeth Remanan Pillai 
1290b56a2d8aSVineeth Remanan Pillai 	return ret;
1291b56a2d8aSVineeth Remanan Pillai }
1292b56a2d8aSVineeth Remanan Pillai 
1293b56a2d8aSVineeth Remanan Pillai /*
1294b56a2d8aSVineeth Remanan Pillai  * Read all the shared memory data that resides in the swap
1295b56a2d8aSVineeth Remanan Pillai  * device 'type' back into memory, so the swap device can be
1296b56a2d8aSVineeth Remanan Pillai  * unused.
1297b56a2d8aSVineeth Remanan Pillai  */
129810a9c496SChristoph Hellwig int shmem_unuse(unsigned int type)
1299b56a2d8aSVineeth Remanan Pillai {
1300b56a2d8aSVineeth Remanan Pillai 	struct shmem_inode_info *info, *next;
1301b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1302b56a2d8aSVineeth Remanan Pillai 
1303b56a2d8aSVineeth Remanan Pillai 	if (list_empty(&shmem_swaplist))
1304b56a2d8aSVineeth Remanan Pillai 		return 0;
1305b56a2d8aSVineeth Remanan Pillai 
1306b56a2d8aSVineeth Remanan Pillai 	mutex_lock(&shmem_swaplist_mutex);
1307b56a2d8aSVineeth Remanan Pillai 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1308b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped) {
1309b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1310b56a2d8aSVineeth Remanan Pillai 			continue;
1311b56a2d8aSVineeth Remanan Pillai 		}
1312af53d3e9SHugh Dickins 		/*
1313af53d3e9SHugh Dickins 		 * Drop the swaplist mutex while searching the inode for swap;
1314af53d3e9SHugh Dickins 		 * but before doing so, make sure shmem_evict_inode() will not
1315af53d3e9SHugh Dickins 		 * remove placeholder inode from swaplist, nor let it be freed
1316af53d3e9SHugh Dickins 		 * (igrab() would protect from unlink, but not from unmount).
1317af53d3e9SHugh Dickins 		 */
1318af53d3e9SHugh Dickins 		atomic_inc(&info->stop_eviction);
1319b56a2d8aSVineeth Remanan Pillai 		mutex_unlock(&shmem_swaplist_mutex);
1320b56a2d8aSVineeth Remanan Pillai 
132110a9c496SChristoph Hellwig 		error = shmem_unuse_inode(&info->vfs_inode, type);
1322b56a2d8aSVineeth Remanan Pillai 		cond_resched();
1323b56a2d8aSVineeth Remanan Pillai 
1324b56a2d8aSVineeth Remanan Pillai 		mutex_lock(&shmem_swaplist_mutex);
1325b56a2d8aSVineeth Remanan Pillai 		next = list_next_entry(info, swaplist);
1326b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped)
1327b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1328af53d3e9SHugh Dickins 		if (atomic_dec_and_test(&info->stop_eviction))
1329af53d3e9SHugh Dickins 			wake_up_var(&info->stop_eviction);
1330b56a2d8aSVineeth Remanan Pillai 		if (error)
1331b56a2d8aSVineeth Remanan Pillai 			break;
13321da177e4SLinus Torvalds 	}
1333cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1334778dd893SHugh Dickins 
1335778dd893SHugh Dickins 	return error;
13361da177e4SLinus Torvalds }
13371da177e4SLinus Torvalds 
13381da177e4SLinus Torvalds /*
13391da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
13401da177e4SLinus Torvalds  */
13411da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
13421da177e4SLinus Torvalds {
1343e2e3fdc7SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
13448ccee8c1SLuis Chamberlain 	struct address_space *mapping = folio->mapping;
13458ccee8c1SLuis Chamberlain 	struct inode *inode = mapping->host;
13468ccee8c1SLuis Chamberlain 	struct shmem_inode_info *info = SHMEM_I(inode);
13472c6efe9cSLuis Chamberlain 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
13486922c0c7SHugh Dickins 	swp_entry_t swap;
13496922c0c7SHugh Dickins 	pgoff_t index;
13501da177e4SLinus Torvalds 
13511e6decf3SHugh Dickins 	/*
1352cf7992bfSLuis Chamberlain 	 * Our capabilities prevent regular writeback or sync from ever calling
1353cf7992bfSLuis Chamberlain 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1354cf7992bfSLuis Chamberlain 	 * its underlying filesystem, in which case tmpfs should write out to
1355cf7992bfSLuis Chamberlain 	 * swap only in response to memory pressure, and not for the writeback
1356cf7992bfSLuis Chamberlain 	 * threads or sync.
1357cf7992bfSLuis Chamberlain 	 */
1358cf7992bfSLuis Chamberlain 	if (WARN_ON_ONCE(!wbc->for_reclaim))
1359cf7992bfSLuis Chamberlain 		goto redirty;
1360cf7992bfSLuis Chamberlain 
13612c6efe9cSLuis Chamberlain 	if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
13629a976f0cSLuis Chamberlain 		goto redirty;
13639a976f0cSLuis Chamberlain 
13649a976f0cSLuis Chamberlain 	if (!total_swap_pages)
13659a976f0cSLuis Chamberlain 		goto redirty;
13669a976f0cSLuis Chamberlain 
1367cf7992bfSLuis Chamberlain 	/*
13681e6decf3SHugh Dickins 	 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
13691e6decf3SHugh Dickins 	 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
13701e6decf3SHugh Dickins 	 * and its shmem_writeback() needs them to be split when swapping.
13711e6decf3SHugh Dickins 	 */
1372f530ed0eSMatthew Wilcox (Oracle) 	if (folio_test_large(folio)) {
13731e6decf3SHugh Dickins 		/* Ensure the subpages are still dirty */
1374f530ed0eSMatthew Wilcox (Oracle) 		folio_test_set_dirty(folio);
13751e6decf3SHugh Dickins 		if (split_huge_page(page) < 0)
13761e6decf3SHugh Dickins 			goto redirty;
1377f530ed0eSMatthew Wilcox (Oracle) 		folio = page_folio(page);
1378f530ed0eSMatthew Wilcox (Oracle) 		folio_clear_dirty(folio);
13791e6decf3SHugh Dickins 	}
13801e6decf3SHugh Dickins 
1381f530ed0eSMatthew Wilcox (Oracle) 	index = folio->index;
13821635f6a7SHugh Dickins 
13831635f6a7SHugh Dickins 	/*
13841635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13851635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
1386f530ed0eSMatthew Wilcox (Oracle) 	 * fallocated folio arriving here is now to initialize it and write it.
13871aac1400SHugh Dickins 	 *
1388f530ed0eSMatthew Wilcox (Oracle) 	 * That's okay for a folio already fallocated earlier, but if we have
13891aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
1390f530ed0eSMatthew Wilcox (Oracle) 	 * of this folio in case we have to undo it, and (b) it may not be a
13911aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
1392f530ed0eSMatthew Wilcox (Oracle) 	 * reactivate the folio, and let shmem_fallocate() quit when too many.
13931635f6a7SHugh Dickins 	 */
1394f530ed0eSMatthew Wilcox (Oracle) 	if (!folio_test_uptodate(folio)) {
13951aac1400SHugh Dickins 		if (inode->i_private) {
13961aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
13971aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
13981aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
13991aac1400SHugh Dickins 			if (shmem_falloc &&
14008e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
14011aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
14021aac1400SHugh Dickins 			    index < shmem_falloc->next)
14031aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
14041aac1400SHugh Dickins 			else
14051aac1400SHugh Dickins 				shmem_falloc = NULL;
14061aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
14071aac1400SHugh Dickins 			if (shmem_falloc)
14081aac1400SHugh Dickins 				goto redirty;
14091aac1400SHugh Dickins 		}
1410f530ed0eSMatthew Wilcox (Oracle) 		folio_zero_range(folio, 0, folio_size(folio));
1411f530ed0eSMatthew Wilcox (Oracle) 		flush_dcache_folio(folio);
1412f530ed0eSMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
14131635f6a7SHugh Dickins 	}
14141635f6a7SHugh Dickins 
1415e2e3fdc7SMatthew Wilcox (Oracle) 	swap = folio_alloc_swap(folio);
141648f170fbSHugh Dickins 	if (!swap.val)
141748f170fbSHugh Dickins 		goto redirty;
1418d9fe526aSHugh Dickins 
1419b1dea800SHugh Dickins 	/*
1420b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1421f530ed0eSMatthew Wilcox (Oracle) 	 * if it's not already there.  Do it now before the folio is
14226922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1423b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
14246922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
14256922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1426b1dea800SHugh Dickins 	 */
1427b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
142805bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
1429b56a2d8aSVineeth Remanan Pillai 		list_add(&info->swaplist, &shmem_swaplist);
1430b1dea800SHugh Dickins 
1431a4c366f0SMatthew Wilcox (Oracle) 	if (add_to_swap_cache(folio, swap,
14323852f676SJoonsoo Kim 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
14333852f676SJoonsoo Kim 			NULL) == 0) {
14344595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1435267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1436267a4c76SHugh Dickins 		info->swapped++;
14374595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1438267a4c76SHugh Dickins 
1439aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
14404cd400fdSMatthew Wilcox (Oracle) 		shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
14416922c0c7SHugh Dickins 
14426922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1443f530ed0eSMatthew Wilcox (Oracle) 		BUG_ON(folio_mapped(folio));
1444f530ed0eSMatthew Wilcox (Oracle) 		swap_writepage(&folio->page, wbc);
14451da177e4SLinus Torvalds 		return 0;
14461da177e4SLinus Torvalds 	}
14471da177e4SLinus Torvalds 
14486922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
14494081f744SMatthew Wilcox (Oracle) 	put_swap_folio(folio, swap);
14501da177e4SLinus Torvalds redirty:
1451f530ed0eSMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
1452d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1453f530ed0eSMatthew Wilcox (Oracle) 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
1454f530ed0eSMatthew Wilcox (Oracle) 	folio_unlock(folio);
1455d9fe526aSHugh Dickins 	return 0;
14561da177e4SLinus Torvalds }
14571da177e4SLinus Torvalds 
145875edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
145971fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1460680d794bSakpm@linux-foundation.org {
1461680d794bSakpm@linux-foundation.org 	char buffer[64];
1462680d794bSakpm@linux-foundation.org 
146371fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1464095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1465095f1fc4SLee Schermerhorn 
1466a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1467095f1fc4SLee Schermerhorn 
1468095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1469680d794bSakpm@linux-foundation.org }
147071fe804bSLee Schermerhorn 
147171fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
147271fe804bSLee Schermerhorn {
147371fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
147471fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
1475bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
147671fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
147771fe804bSLee Schermerhorn 		mpol_get(mpol);
1478bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
147971fe804bSLee Schermerhorn 	}
148071fe804bSLee Schermerhorn 	return mpol;
148171fe804bSLee Schermerhorn }
148275edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
148375edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
148475edd345SHugh Dickins {
148575edd345SHugh Dickins }
148675edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
148775edd345SHugh Dickins {
148875edd345SHugh Dickins 	return NULL;
148975edd345SHugh Dickins }
149075edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
149175edd345SHugh Dickins #ifndef CONFIG_NUMA
149275edd345SHugh Dickins #define vm_policy vm_private_data
149375edd345SHugh Dickins #endif
1494680d794bSakpm@linux-foundation.org 
1495800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1496800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1497800d8c63SKirill A. Shutemov {
1498800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
14992c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1500800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1501800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1502800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1503800d8c63SKirill A. Shutemov }
1504800d8c63SKirill A. Shutemov 
1505800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1506800d8c63SKirill A. Shutemov {
1507800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1508800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1509800d8c63SKirill A. Shutemov }
1510800d8c63SKirill A. Shutemov 
15115739a81cSMatthew Wilcox (Oracle) static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
151241ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
15131da177e4SLinus Torvalds {
15141da177e4SLinus Torvalds 	struct vm_area_struct pvma;
151518a2f371SMel Gorman 	struct page *page;
15168c63ca5bSWill Deacon 	struct vm_fault vmf = {
15178c63ca5bSWill Deacon 		.vma = &pvma,
15188c63ca5bSWill Deacon 	};
15191da177e4SLinus Torvalds 
1520800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1521e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1522800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
152318a2f371SMel Gorman 
15245739a81cSMatthew Wilcox (Oracle) 	if (!page)
15255739a81cSMatthew Wilcox (Oracle) 		return NULL;
15265739a81cSMatthew Wilcox (Oracle) 	return page_folio(page);
1527800d8c63SKirill A. Shutemov }
152818a2f371SMel Gorman 
152978cc8cdcSRik van Riel /*
153078cc8cdcSRik van Riel  * Make sure huge_gfp is always more limited than limit_gfp.
153178cc8cdcSRik van Riel  * Some of the flags set permissions, while others set limitations.
153278cc8cdcSRik van Riel  */
153378cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
153478cc8cdcSRik van Riel {
153578cc8cdcSRik van Riel 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
153678cc8cdcSRik van Riel 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1537187df5ddSRik van Riel 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1538187df5ddSRik van Riel 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1539187df5ddSRik van Riel 
1540187df5ddSRik van Riel 	/* Allow allocations only from the originally specified zones. */
1541187df5ddSRik van Riel 	result |= zoneflags;
154278cc8cdcSRik van Riel 
154378cc8cdcSRik van Riel 	/*
154478cc8cdcSRik van Riel 	 * Minimize the result gfp by taking the union with the deny flags,
154578cc8cdcSRik van Riel 	 * and the intersection of the allow flags.
154678cc8cdcSRik van Riel 	 */
154778cc8cdcSRik van Riel 	result |= (limit_gfp & denyflags);
154878cc8cdcSRik van Riel 	result |= (huge_gfp & limit_gfp) & allowflags;
154978cc8cdcSRik van Riel 
155078cc8cdcSRik van Riel 	return result;
155178cc8cdcSRik van Riel }
155278cc8cdcSRik van Riel 
155372827e5cSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1554800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1555800d8c63SKirill A. Shutemov {
1556800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
15577b8d046fSMatthew Wilcox 	struct address_space *mapping = info->vfs_inode.i_mapping;
15587b8d046fSMatthew Wilcox 	pgoff_t hindex;
1559dfe98499SMatthew Wilcox (Oracle) 	struct folio *folio;
1560800d8c63SKirill A. Shutemov 
15614620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
15627b8d046fSMatthew Wilcox 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
15637b8d046fSMatthew Wilcox 								XA_PRESENT))
1564800d8c63SKirill A. Shutemov 		return NULL;
1565800d8c63SKirill A. Shutemov 
1566800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1567dfe98499SMatthew Wilcox (Oracle) 	folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
1568800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1569dfe98499SMatthew Wilcox (Oracle) 	if (!folio)
1570dcdf11eeSDavid Rientjes 		count_vm_event(THP_FILE_FALLBACK);
157172827e5cSMatthew Wilcox (Oracle) 	return folio;
157218a2f371SMel Gorman }
157318a2f371SMel Gorman 
15740c023ef5SMatthew Wilcox (Oracle) static struct folio *shmem_alloc_folio(gfp_t gfp,
157518a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
157618a2f371SMel Gorman {
157718a2f371SMel Gorman 	struct vm_area_struct pvma;
15780c023ef5SMatthew Wilcox (Oracle) 	struct folio *folio;
157918a2f371SMel Gorman 
1580800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
15810c023ef5SMatthew Wilcox (Oracle) 	folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
1582800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
158318a2f371SMel Gorman 
15840c023ef5SMatthew Wilcox (Oracle) 	return folio;
158518a2f371SMel Gorman }
158618a2f371SMel Gorman 
1587b1d0ec3aSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1588800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1589800d8c63SKirill A. Shutemov {
15900f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
159172827e5cSMatthew Wilcox (Oracle) 	struct folio *folio;
1592800d8c63SKirill A. Shutemov 	int nr;
1593800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1594800d8c63SKirill A. Shutemov 
1595396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1596800d8c63SKirill A. Shutemov 		huge = false;
1597800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1598800d8c63SKirill A. Shutemov 
15990f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1600800d8c63SKirill A. Shutemov 		goto failed;
1601800d8c63SKirill A. Shutemov 
1602800d8c63SKirill A. Shutemov 	if (huge)
160372827e5cSMatthew Wilcox (Oracle) 		folio = shmem_alloc_hugefolio(gfp, info, index);
1604800d8c63SKirill A. Shutemov 	else
160572827e5cSMatthew Wilcox (Oracle) 		folio = shmem_alloc_folio(gfp, info, index);
160672827e5cSMatthew Wilcox (Oracle) 	if (folio) {
160772827e5cSMatthew Wilcox (Oracle) 		__folio_set_locked(folio);
160872827e5cSMatthew Wilcox (Oracle) 		__folio_set_swapbacked(folio);
1609b1d0ec3aSMatthew Wilcox (Oracle) 		return folio;
161075edd345SHugh Dickins 	}
161118a2f371SMel Gorman 
1612800d8c63SKirill A. Shutemov 	err = -ENOMEM;
16130f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1614800d8c63SKirill A. Shutemov failed:
1615800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
16161da177e4SLinus Torvalds }
161771fe804bSLee Schermerhorn 
16181da177e4SLinus Torvalds /*
1619bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1620fc26babbSMatthew Wilcox (Oracle)  * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1621bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1622bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1623bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1624bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1625bde05d1cSHugh Dickins  *
1626bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1627bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1628bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1629bde05d1cSHugh Dickins  */
1630069d849cSMatthew Wilcox (Oracle) static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1631bde05d1cSHugh Dickins {
1632069d849cSMatthew Wilcox (Oracle) 	return folio_zonenum(folio) > gfp_zone(gfp);
1633bde05d1cSHugh Dickins }
1634bde05d1cSHugh Dickins 
16350d698e25SMatthew Wilcox (Oracle) static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1636bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1637bde05d1cSHugh Dickins {
1638d21bba2bSMatthew Wilcox (Oracle) 	struct folio *old, *new;
1639bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1640c1cb20d4SYu Zhao 	swp_entry_t entry;
1641bde05d1cSHugh Dickins 	pgoff_t swap_index;
1642bde05d1cSHugh Dickins 	int error;
1643bde05d1cSHugh Dickins 
16440d698e25SMatthew Wilcox (Oracle) 	old = *foliop;
1645*3d2c9087SDavid Hildenbrand 	entry = old->swap;
1646c1cb20d4SYu Zhao 	swap_index = swp_offset(entry);
1647907ea17eSMatthew Wilcox (Oracle) 	swap_mapping = swap_address_space(entry);
1648bde05d1cSHugh Dickins 
1649bde05d1cSHugh Dickins 	/*
1650bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1651bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1652bde05d1cSHugh Dickins 	 */
1653bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1654907ea17eSMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(folio_test_large(old), old);
1655907ea17eSMatthew Wilcox (Oracle) 	new = shmem_alloc_folio(gfp, info, index);
1656907ea17eSMatthew Wilcox (Oracle) 	if (!new)
1657bde05d1cSHugh Dickins 		return -ENOMEM;
1658bde05d1cSHugh Dickins 
1659907ea17eSMatthew Wilcox (Oracle) 	folio_get(new);
1660907ea17eSMatthew Wilcox (Oracle) 	folio_copy(new, old);
1661907ea17eSMatthew Wilcox (Oracle) 	flush_dcache_folio(new);
1662bde05d1cSHugh Dickins 
1663907ea17eSMatthew Wilcox (Oracle) 	__folio_set_locked(new);
1664907ea17eSMatthew Wilcox (Oracle) 	__folio_set_swapbacked(new);
1665907ea17eSMatthew Wilcox (Oracle) 	folio_mark_uptodate(new);
1666*3d2c9087SDavid Hildenbrand 	new->swap = entry;
1667907ea17eSMatthew Wilcox (Oracle) 	folio_set_swapcache(new);
1668bde05d1cSHugh Dickins 
1669bde05d1cSHugh Dickins 	/*
1670bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1671bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1672bde05d1cSHugh Dickins 	 */
1673b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
1674907ea17eSMatthew Wilcox (Oracle) 	error = shmem_replace_entry(swap_mapping, swap_index, old, new);
16750142ef6cSHugh Dickins 	if (!error) {
1676d21bba2bSMatthew Wilcox (Oracle) 		mem_cgroup_migrate(old, new);
1677907ea17eSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
1678907ea17eSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(new, NR_SHMEM, 1);
1679907ea17eSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
1680907ea17eSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(old, NR_SHMEM, -1);
16810142ef6cSHugh Dickins 	}
1682b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1683bde05d1cSHugh Dickins 
16840142ef6cSHugh Dickins 	if (unlikely(error)) {
16850142ef6cSHugh Dickins 		/*
16860142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
16870142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
16880142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
16890142ef6cSHugh Dickins 		 */
1690907ea17eSMatthew Wilcox (Oracle) 		old = new;
16910142ef6cSHugh Dickins 	} else {
1692907ea17eSMatthew Wilcox (Oracle) 		folio_add_lru(new);
16930d698e25SMatthew Wilcox (Oracle) 		*foliop = new;
16940142ef6cSHugh Dickins 	}
1695bde05d1cSHugh Dickins 
1696907ea17eSMatthew Wilcox (Oracle) 	folio_clear_swapcache(old);
1697907ea17eSMatthew Wilcox (Oracle) 	old->private = NULL;
1698bde05d1cSHugh Dickins 
1699907ea17eSMatthew Wilcox (Oracle) 	folio_unlock(old);
1700907ea17eSMatthew Wilcox (Oracle) 	folio_put_refs(old, 2);
17010142ef6cSHugh Dickins 	return error;
1702bde05d1cSHugh Dickins }
1703bde05d1cSHugh Dickins 
17046cec2b95SMiaohe Lin static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
17056cec2b95SMiaohe Lin 					 struct folio *folio, swp_entry_t swap)
17066cec2b95SMiaohe Lin {
17076cec2b95SMiaohe Lin 	struct address_space *mapping = inode->i_mapping;
17086cec2b95SMiaohe Lin 	struct shmem_inode_info *info = SHMEM_I(inode);
17096cec2b95SMiaohe Lin 	swp_entry_t swapin_error;
17106cec2b95SMiaohe Lin 	void *old;
17116cec2b95SMiaohe Lin 
1712af19487fSAxel Rasmussen 	swapin_error = make_poisoned_swp_entry();
17136cec2b95SMiaohe Lin 	old = xa_cmpxchg_irq(&mapping->i_pages, index,
17146cec2b95SMiaohe Lin 			     swp_to_radix_entry(swap),
17156cec2b95SMiaohe Lin 			     swp_to_radix_entry(swapin_error), 0);
17166cec2b95SMiaohe Lin 	if (old != swp_to_radix_entry(swap))
17176cec2b95SMiaohe Lin 		return;
17186cec2b95SMiaohe Lin 
17196cec2b95SMiaohe Lin 	folio_wait_writeback(folio);
172075fa68a5SMatthew Wilcox (Oracle) 	delete_from_swap_cache(folio);
17216cec2b95SMiaohe Lin 	spin_lock_irq(&info->lock);
17226cec2b95SMiaohe Lin 	/*
17236cec2b95SMiaohe Lin 	 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
17246cec2b95SMiaohe Lin 	 * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
17256cec2b95SMiaohe Lin 	 * shmem_evict_inode.
17266cec2b95SMiaohe Lin 	 */
17276cec2b95SMiaohe Lin 	info->alloced--;
17286cec2b95SMiaohe Lin 	info->swapped--;
17296cec2b95SMiaohe Lin 	shmem_recalc_inode(inode);
17306cec2b95SMiaohe Lin 	spin_unlock_irq(&info->lock);
17316cec2b95SMiaohe Lin 	swap_free(swap);
17326cec2b95SMiaohe Lin }
17336cec2b95SMiaohe Lin 
1734bde05d1cSHugh Dickins /*
1735833de10fSMiaohe Lin  * Swap in the folio pointed to by *foliop.
1736833de10fSMiaohe Lin  * Caller has to make sure that *foliop contains a valid swapped folio.
1737833de10fSMiaohe Lin  * Returns 0 and the folio in foliop if success. On failure, returns the
1738833de10fSMiaohe Lin  * error code and NULL in *foliop.
17391da177e4SLinus Torvalds  */
1740da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1741da08e9b7SMatthew Wilcox (Oracle) 			     struct folio **foliop, enum sgp_type sgp,
1742c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
17432b740303SSouptick Joarder 			     vm_fault_t *fault_type)
17441da177e4SLinus Torvalds {
17451da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
174623f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
174704f94e3fSDan Schatzberg 	struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1748cbc2bd98SKairui Song 	struct swap_info_struct *si;
1749da08e9b7SMatthew Wilcox (Oracle) 	struct folio *folio = NULL;
17501da177e4SLinus Torvalds 	swp_entry_t swap;
17511da177e4SLinus Torvalds 	int error;
17521da177e4SLinus Torvalds 
1753da08e9b7SMatthew Wilcox (Oracle) 	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1754da08e9b7SMatthew Wilcox (Oracle) 	swap = radix_to_swp_entry(*foliop);
1755da08e9b7SMatthew Wilcox (Oracle) 	*foliop = NULL;
175654af6042SHugh Dickins 
1757af19487fSAxel Rasmussen 	if (is_poisoned_swp_entry(swap))
17586cec2b95SMiaohe Lin 		return -EIO;
17596cec2b95SMiaohe Lin 
1760cbc2bd98SKairui Song 	si = get_swap_device(swap);
1761cbc2bd98SKairui Song 	if (!si) {
1762cbc2bd98SKairui Song 		if (!shmem_confirm_swap(mapping, index, swap))
1763cbc2bd98SKairui Song 			return -EEXIST;
1764cbc2bd98SKairui Song 		else
1765cbc2bd98SKairui Song 			return -EINVAL;
1766cbc2bd98SKairui Song 	}
1767cbc2bd98SKairui Song 
17681da177e4SLinus Torvalds 	/* Look it up and read it in.. */
17695739a81cSMatthew Wilcox (Oracle) 	folio = swap_cache_get_folio(swap, NULL, 0);
17705739a81cSMatthew Wilcox (Oracle) 	if (!folio) {
17719e18eb29SAndres Lagar-Cavilla 		/* Or update major stats only when swapin succeeds?? */
17729e18eb29SAndres Lagar-Cavilla 		if (fault_type) {
177368da9f05SHugh Dickins 			*fault_type |= VM_FAULT_MAJOR;
17749e18eb29SAndres Lagar-Cavilla 			count_vm_event(PGMAJFAULT);
17752262185cSRoman Gushchin 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
17769e18eb29SAndres Lagar-Cavilla 		}
17779e18eb29SAndres Lagar-Cavilla 		/* Here we actually start the io */
17785739a81cSMatthew Wilcox (Oracle) 		folio = shmem_swapin(swap, gfp, info, index);
17795739a81cSMatthew Wilcox (Oracle) 		if (!folio) {
17801da177e4SLinus Torvalds 			error = -ENOMEM;
178154af6042SHugh Dickins 			goto failed;
1782285b2c4fSHugh Dickins 		}
17831da177e4SLinus Torvalds 	}
17841da177e4SLinus Torvalds 
1785833de10fSMiaohe Lin 	/* We have to do this with folio locked to prevent races */
1786da08e9b7SMatthew Wilcox (Oracle) 	folio_lock(folio);
1787da08e9b7SMatthew Wilcox (Oracle) 	if (!folio_test_swapcache(folio) ||
1788*3d2c9087SDavid Hildenbrand 	    folio->swap.val != swap.val ||
1789d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap)) {
1790c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1791d1899228SHugh Dickins 		goto unlock;
1792bde05d1cSHugh Dickins 	}
1793da08e9b7SMatthew Wilcox (Oracle) 	if (!folio_test_uptodate(folio)) {
17941da177e4SLinus Torvalds 		error = -EIO;
179554af6042SHugh Dickins 		goto failed;
179654af6042SHugh Dickins 	}
1797da08e9b7SMatthew Wilcox (Oracle) 	folio_wait_writeback(folio);
179854af6042SHugh Dickins 
17998a84802eSSteven Price 	/*
18008a84802eSSteven Price 	 * Some architectures may have to restore extra metadata to the
1801da08e9b7SMatthew Wilcox (Oracle) 	 * folio after reading from swap.
18028a84802eSSteven Price 	 */
1803da08e9b7SMatthew Wilcox (Oracle) 	arch_swap_restore(swap, folio);
18048a84802eSSteven Price 
1805069d849cSMatthew Wilcox (Oracle) 	if (shmem_should_replace_folio(folio, gfp)) {
18060d698e25SMatthew Wilcox (Oracle) 		error = shmem_replace_folio(&folio, gfp, info, index);
1807bde05d1cSHugh Dickins 		if (error)
180854af6042SHugh Dickins 			goto failed;
18091da177e4SLinus Torvalds 	}
18101da177e4SLinus Torvalds 
1811b7dd44a1SMatthew Wilcox (Oracle) 	error = shmem_add_to_page_cache(folio, mapping, index,
18123fea5a49SJohannes Weiner 					swp_to_radix_entry(swap), gfp,
18133fea5a49SJohannes Weiner 					charge_mm);
181454af6042SHugh Dickins 	if (error)
181554af6042SHugh Dickins 		goto failed;
181654af6042SHugh Dickins 
18174595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
181854af6042SHugh Dickins 	info->swapped--;
181954af6042SHugh Dickins 	shmem_recalc_inode(inode);
18204595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
182127ab7006SHugh Dickins 
182266d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1823da08e9b7SMatthew Wilcox (Oracle) 		folio_mark_accessed(folio);
182466d2f4d2SHugh Dickins 
182575fa68a5SMatthew Wilcox (Oracle) 	delete_from_swap_cache(folio);
1826da08e9b7SMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
182727ab7006SHugh Dickins 	swap_free(swap);
1828cbc2bd98SKairui Song 	put_swap_device(si);
182927ab7006SHugh Dickins 
1830da08e9b7SMatthew Wilcox (Oracle) 	*foliop = folio;
1831c5bf121eSVineeth Remanan Pillai 	return 0;
1832c5bf121eSVineeth Remanan Pillai failed:
1833c5bf121eSVineeth Remanan Pillai 	if (!shmem_confirm_swap(mapping, index, swap))
1834c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
18356cec2b95SMiaohe Lin 	if (error == -EIO)
18366cec2b95SMiaohe Lin 		shmem_set_folio_swapin_error(inode, index, folio, swap);
1837c5bf121eSVineeth Remanan Pillai unlock:
1838da08e9b7SMatthew Wilcox (Oracle) 	if (folio) {
1839da08e9b7SMatthew Wilcox (Oracle) 		folio_unlock(folio);
1840da08e9b7SMatthew Wilcox (Oracle) 		folio_put(folio);
1841c5bf121eSVineeth Remanan Pillai 	}
1842cbc2bd98SKairui Song 	put_swap_device(si);
1843c5bf121eSVineeth Remanan Pillai 
1844c5bf121eSVineeth Remanan Pillai 	return error;
1845c5bf121eSVineeth Remanan Pillai }
1846c5bf121eSVineeth Remanan Pillai 
1847c5bf121eSVineeth Remanan Pillai /*
1848fc26babbSMatthew Wilcox (Oracle)  * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1849c5bf121eSVineeth Remanan Pillai  *
1850c5bf121eSVineeth Remanan Pillai  * If we allocate a new one we do not mark it dirty. That's up to the
1851c5bf121eSVineeth Remanan Pillai  * vm. If we swap it in we mark it dirty since we also free the swap
1852c5bf121eSVineeth Remanan Pillai  * entry since a page cannot live in both the swap and page cache.
1853c5bf121eSVineeth Remanan Pillai  *
1854c949b097SAxel Rasmussen  * vma, vmf, and fault_type are only supplied by shmem_fault:
1855c5bf121eSVineeth Remanan Pillai  * otherwise they are NULL.
1856c5bf121eSVineeth Remanan Pillai  */
1857fc26babbSMatthew Wilcox (Oracle) static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1858fc26babbSMatthew Wilcox (Oracle) 		struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1859c5bf121eSVineeth Remanan Pillai 		struct vm_area_struct *vma, struct vm_fault *vmf,
1860c5bf121eSVineeth Remanan Pillai 		vm_fault_t *fault_type)
1861c5bf121eSVineeth Remanan Pillai {
1862c5bf121eSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1863c5bf121eSVineeth Remanan Pillai 	struct shmem_inode_info *info = SHMEM_I(inode);
1864c5bf121eSVineeth Remanan Pillai 	struct shmem_sb_info *sbinfo;
1865c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm;
1866b7dd44a1SMatthew Wilcox (Oracle) 	struct folio *folio;
18676fe7d712SLukas Bulwahn 	pgoff_t hindex;
1868164cc4feSRik van Riel 	gfp_t huge_gfp;
1869c5bf121eSVineeth Remanan Pillai 	int error;
1870c5bf121eSVineeth Remanan Pillai 	int once = 0;
1871c5bf121eSVineeth Remanan Pillai 	int alloced = 0;
1872c5bf121eSVineeth Remanan Pillai 
1873c5bf121eSVineeth Remanan Pillai 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1874c5bf121eSVineeth Remanan Pillai 		return -EFBIG;
1875c5bf121eSVineeth Remanan Pillai repeat:
1876c5bf121eSVineeth Remanan Pillai 	if (sgp <= SGP_CACHE &&
1877c5bf121eSVineeth Remanan Pillai 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1878c5bf121eSVineeth Remanan Pillai 		return -EINVAL;
1879c5bf121eSVineeth Remanan Pillai 	}
1880c5bf121eSVineeth Remanan Pillai 
1881c5bf121eSVineeth Remanan Pillai 	sbinfo = SHMEM_SB(inode->i_sb);
188204f94e3fSDan Schatzberg 	charge_mm = vma ? vma->vm_mm : NULL;
1883c5bf121eSVineeth Remanan Pillai 
1884aaeb94ebSChristoph Hellwig 	folio = filemap_get_entry(mapping, index);
1885b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio && vma && userfaultfd_minor(vma)) {
1886aaeb94ebSChristoph Hellwig 		if (!xa_is_value(folio))
1887b1d0ec3aSMatthew Wilcox (Oracle) 			folio_put(folio);
1888c949b097SAxel Rasmussen 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1889c949b097SAxel Rasmussen 		return 0;
1890c949b097SAxel Rasmussen 	}
1891c949b097SAxel Rasmussen 
1892b1d0ec3aSMatthew Wilcox (Oracle) 	if (xa_is_value(folio)) {
1893da08e9b7SMatthew Wilcox (Oracle) 		error = shmem_swapin_folio(inode, index, &folio,
1894c5bf121eSVineeth Remanan Pillai 					  sgp, gfp, vma, fault_type);
1895c5bf121eSVineeth Remanan Pillai 		if (error == -EEXIST)
1896c5bf121eSVineeth Remanan Pillai 			goto repeat;
1897c5bf121eSVineeth Remanan Pillai 
1898fc26babbSMatthew Wilcox (Oracle) 		*foliop = folio;
1899c5bf121eSVineeth Remanan Pillai 		return error;
1900c5bf121eSVineeth Remanan Pillai 	}
1901c5bf121eSVineeth Remanan Pillai 
1902b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio) {
1903aaeb94ebSChristoph Hellwig 		folio_lock(folio);
1904aaeb94ebSChristoph Hellwig 
1905aaeb94ebSChristoph Hellwig 		/* Has the folio been truncated or swapped out? */
1906aaeb94ebSChristoph Hellwig 		if (unlikely(folio->mapping != mapping)) {
1907aaeb94ebSChristoph Hellwig 			folio_unlock(folio);
1908aaeb94ebSChristoph Hellwig 			folio_put(folio);
1909aaeb94ebSChristoph Hellwig 			goto repeat;
1910aaeb94ebSChristoph Hellwig 		}
1911acdd9f8eSHugh Dickins 		if (sgp == SGP_WRITE)
1912b1d0ec3aSMatthew Wilcox (Oracle) 			folio_mark_accessed(folio);
1913b1d0ec3aSMatthew Wilcox (Oracle) 		if (folio_test_uptodate(folio))
1914acdd9f8eSHugh Dickins 			goto out;
1915fc26babbSMatthew Wilcox (Oracle) 		/* fallocated folio */
1916c5bf121eSVineeth Remanan Pillai 		if (sgp != SGP_READ)
1917c5bf121eSVineeth Remanan Pillai 			goto clear;
1918b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
1919b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
1920c5bf121eSVineeth Remanan Pillai 	}
1921c5bf121eSVineeth Remanan Pillai 
1922c5bf121eSVineeth Remanan Pillai 	/*
1923fc26babbSMatthew Wilcox (Oracle) 	 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
1924fc26babbSMatthew Wilcox (Oracle) 	 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
1925acdd9f8eSHugh Dickins 	 */
1926fc26babbSMatthew Wilcox (Oracle) 	*foliop = NULL;
1927acdd9f8eSHugh Dickins 	if (sgp == SGP_READ)
1928acdd9f8eSHugh Dickins 		return 0;
1929acdd9f8eSHugh Dickins 	if (sgp == SGP_NOALLOC)
1930acdd9f8eSHugh Dickins 		return -ENOENT;
1931acdd9f8eSHugh Dickins 
1932acdd9f8eSHugh Dickins 	/*
1933acdd9f8eSHugh Dickins 	 * Fast cache lookup and swap lookup did not find it: allocate.
1934c5bf121eSVineeth Remanan Pillai 	 */
1935c5bf121eSVineeth Remanan Pillai 
1936cfda0526SMike Rapoport 	if (vma && userfaultfd_missing(vma)) {
1937cfda0526SMike Rapoport 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1938cfda0526SMike Rapoport 		return 0;
1939cfda0526SMike Rapoport 	}
1940cfda0526SMike Rapoport 
19412cf13384SDavid Stevens 	if (!shmem_is_huge(inode, index, false,
19422cf13384SDavid Stevens 			   vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0))
1943800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
194427d80fa2SKees Cook 
1945164cc4feSRik van Riel 	huge_gfp = vma_thp_gfp_mask(vma);
194678cc8cdcSRik van Riel 	huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1947b1d0ec3aSMatthew Wilcox (Oracle) 	folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
1948b1d0ec3aSMatthew Wilcox (Oracle) 	if (IS_ERR(folio)) {
1949c5bf121eSVineeth Remanan Pillai alloc_nohuge:
1950b1d0ec3aSMatthew Wilcox (Oracle) 		folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
195154af6042SHugh Dickins 	}
1952b1d0ec3aSMatthew Wilcox (Oracle) 	if (IS_ERR(folio)) {
1953779750d2SKirill A. Shutemov 		int retry = 5;
1954c5bf121eSVineeth Remanan Pillai 
1955b1d0ec3aSMatthew Wilcox (Oracle) 		error = PTR_ERR(folio);
1956b1d0ec3aSMatthew Wilcox (Oracle) 		folio = NULL;
1957779750d2SKirill A. Shutemov 		if (error != -ENOSPC)
1958c5bf121eSVineeth Remanan Pillai 			goto unlock;
1959779750d2SKirill A. Shutemov 		/*
1960fc26babbSMatthew Wilcox (Oracle) 		 * Try to reclaim some space by splitting a large folio
1961779750d2SKirill A. Shutemov 		 * beyond i_size on the filesystem.
1962779750d2SKirill A. Shutemov 		 */
1963779750d2SKirill A. Shutemov 		while (retry--) {
1964779750d2SKirill A. Shutemov 			int ret;
1965c5bf121eSVineeth Remanan Pillai 
1966779750d2SKirill A. Shutemov 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1967779750d2SKirill A. Shutemov 			if (ret == SHRINK_STOP)
1968779750d2SKirill A. Shutemov 				break;
1969779750d2SKirill A. Shutemov 			if (ret)
1970779750d2SKirill A. Shutemov 				goto alloc_nohuge;
1971779750d2SKirill A. Shutemov 		}
1972c5bf121eSVineeth Remanan Pillai 		goto unlock;
1973800d8c63SKirill A. Shutemov 	}
1974800d8c63SKirill A. Shutemov 
1975b1d0ec3aSMatthew Wilcox (Oracle) 	hindex = round_down(index, folio_nr_pages(folio));
1976800d8c63SKirill A. Shutemov 
197766d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1978b1d0ec3aSMatthew Wilcox (Oracle) 		__folio_set_referenced(folio);
197966d2f4d2SHugh Dickins 
1980b7dd44a1SMatthew Wilcox (Oracle) 	error = shmem_add_to_page_cache(folio, mapping, hindex,
19813fea5a49SJohannes Weiner 					NULL, gfp & GFP_RECLAIM_MASK,
19823fea5a49SJohannes Weiner 					charge_mm);
19833fea5a49SJohannes Weiner 	if (error)
1984800d8c63SKirill A. Shutemov 		goto unacct;
1985b1d0ec3aSMatthew Wilcox (Oracle) 	folio_add_lru(folio);
198654af6042SHugh Dickins 
19874595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
1988b1d0ec3aSMatthew Wilcox (Oracle) 	info->alloced += folio_nr_pages(folio);
1989fa020a2bSAndrew Morton 	inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
199054af6042SHugh Dickins 	shmem_recalc_inode(inode);
19914595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
19921635f6a7SHugh Dickins 	alloced = true;
199354af6042SHugh Dickins 
1994b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio_test_pmd_mappable(folio) &&
1995779750d2SKirill A. Shutemov 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1996fc26babbSMatthew Wilcox (Oracle) 					folio_next_index(folio) - 1) {
1997779750d2SKirill A. Shutemov 		/*
1998fc26babbSMatthew Wilcox (Oracle) 		 * Part of the large folio is beyond i_size: subject
1999779750d2SKirill A. Shutemov 		 * to shrink under memory pressure.
2000779750d2SKirill A. Shutemov 		 */
2001779750d2SKirill A. Shutemov 		spin_lock(&sbinfo->shrinklist_lock);
2002d041353dSCong Wang 		/*
2003d041353dSCong Wang 		 * _careful to defend against unlocked access to
2004d041353dSCong Wang 		 * ->shrink_list in shmem_unused_huge_shrink()
2005d041353dSCong Wang 		 */
2006d041353dSCong Wang 		if (list_empty_careful(&info->shrinklist)) {
2007779750d2SKirill A. Shutemov 			list_add_tail(&info->shrinklist,
2008779750d2SKirill A. Shutemov 				      &sbinfo->shrinklist);
2009779750d2SKirill A. Shutemov 			sbinfo->shrinklist_len++;
2010779750d2SKirill A. Shutemov 		}
2011779750d2SKirill A. Shutemov 		spin_unlock(&sbinfo->shrinklist_lock);
2012779750d2SKirill A. Shutemov 	}
2013779750d2SKirill A. Shutemov 
2014ec9516fbSHugh Dickins 	/*
2015fc26babbSMatthew Wilcox (Oracle) 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
20161635f6a7SHugh Dickins 	 */
20171635f6a7SHugh Dickins 	if (sgp == SGP_FALLOC)
20181635f6a7SHugh Dickins 		sgp = SGP_WRITE;
20191635f6a7SHugh Dickins clear:
20201635f6a7SHugh Dickins 	/*
2021fc26babbSMatthew Wilcox (Oracle) 	 * Let SGP_WRITE caller clear ends if write does not fill folio;
2022fc26babbSMatthew Wilcox (Oracle) 	 * but SGP_FALLOC on a folio fallocated earlier must initialize
20231635f6a7SHugh Dickins 	 * it now, lest undo on failure cancel our earlier guarantee.
2024ec9516fbSHugh Dickins 	 */
2025b1d0ec3aSMatthew Wilcox (Oracle) 	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2026b1d0ec3aSMatthew Wilcox (Oracle) 		long i, n = folio_nr_pages(folio);
2027800d8c63SKirill A. Shutemov 
2028b1d0ec3aSMatthew Wilcox (Oracle) 		for (i = 0; i < n; i++)
2029b1d0ec3aSMatthew Wilcox (Oracle) 			clear_highpage(folio_page(folio, i));
2030b1d0ec3aSMatthew Wilcox (Oracle) 		flush_dcache_folio(folio);
2031b1d0ec3aSMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
2032ec9516fbSHugh Dickins 	}
2033bde05d1cSHugh Dickins 
203454af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
203575edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
203609cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2037267a4c76SHugh Dickins 		if (alloced) {
2038b1d0ec3aSMatthew Wilcox (Oracle) 			folio_clear_dirty(folio);
2039b1d0ec3aSMatthew Wilcox (Oracle) 			filemap_remove_folio(folio);
20404595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
2041267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
20424595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
2043267a4c76SHugh Dickins 		}
204454af6042SHugh Dickins 		error = -EINVAL;
2045267a4c76SHugh Dickins 		goto unlock;
2046ff36b801SShaohua Li 	}
204763ec1973SMatthew Wilcox (Oracle) out:
2048fc26babbSMatthew Wilcox (Oracle) 	*foliop = folio;
204954af6042SHugh Dickins 	return 0;
2050d00806b1SNick Piggin 
2051d0217ac0SNick Piggin 	/*
205254af6042SHugh Dickins 	 * Error recovery.
20531da177e4SLinus Torvalds 	 */
205454af6042SHugh Dickins unacct:
2055b1d0ec3aSMatthew Wilcox (Oracle) 	shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
2056800d8c63SKirill A. Shutemov 
2057b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio_test_large(folio)) {
2058b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
2059b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
2060800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
2061800d8c63SKirill A. Shutemov 	}
2062d1899228SHugh Dickins unlock:
2063b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio) {
2064b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
2065b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
206654af6042SHugh Dickins 	}
206754af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
20684595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
206954af6042SHugh Dickins 		shmem_recalc_inode(inode);
20704595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
20711da177e4SLinus Torvalds 		goto repeat;
2072d8dc74f2SAdrian Bunk 	}
20737f4446eeSMatthew Wilcox 	if (error == -EEXIST)
207454af6042SHugh Dickins 		goto repeat;
207554af6042SHugh Dickins 	return error;
20761da177e4SLinus Torvalds }
20771da177e4SLinus Torvalds 
20784e1fc793SMatthew Wilcox (Oracle) int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
20794e1fc793SMatthew Wilcox (Oracle) 		enum sgp_type sgp)
20804e1fc793SMatthew Wilcox (Oracle) {
20814e1fc793SMatthew Wilcox (Oracle) 	return shmem_get_folio_gfp(inode, index, foliop, sgp,
20824e1fc793SMatthew Wilcox (Oracle) 			mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
20834e1fc793SMatthew Wilcox (Oracle) }
20844e1fc793SMatthew Wilcox (Oracle) 
208510d20bd2SLinus Torvalds /*
208610d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
208710d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
208810d20bd2SLinus Torvalds  * target.
208910d20bd2SLinus Torvalds  */
2090ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
209110d20bd2SLinus Torvalds {
209210d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
20932055da97SIngo Molnar 	list_del_init(&wait->entry);
209410d20bd2SLinus Torvalds 	return ret;
209510d20bd2SLinus Torvalds }
209610d20bd2SLinus Torvalds 
209720acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
20981da177e4SLinus Torvalds {
209911bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
2100496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
21019e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
210268a54100SMatthew Wilcox (Oracle) 	struct folio *folio = NULL;
210320acce67SSouptick Joarder 	int err;
210420acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
21051da177e4SLinus Torvalds 
2106f00cdc6dSHugh Dickins 	/*
2107f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
2108f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
21099608703eSJan Kara 	 * locks writers out with its hold on i_rwsem.  So refrain from
21108e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
21118e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
21128e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
21138e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
21148e205f77SHugh Dickins 	 *
21158e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
21168e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
21178e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
21188e205f77SHugh Dickins 	 *
21198e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
21209608703eSJan Kara 	 * standard mutex or completion: but we cannot take i_rwsem in fault,
21218e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
2122f00cdc6dSHugh Dickins 	 */
2123f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
2124f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
2125f00cdc6dSHugh Dickins 
2126f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2127f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
21288e205f77SHugh Dickins 		if (shmem_falloc &&
21298e205f77SHugh Dickins 		    shmem_falloc->waitq &&
21308e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
21318e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
21328897c1b1SKirill A. Shutemov 			struct file *fpin;
21338e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
213410d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
21358e205f77SHugh Dickins 
21368e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
21378897c1b1SKirill A. Shutemov 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
21388897c1b1SKirill A. Shutemov 			if (fpin)
21398e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
21408e205f77SHugh Dickins 
21418e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
21428e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
21438e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
21448e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
21458e205f77SHugh Dickins 			schedule();
21468e205f77SHugh Dickins 
21478e205f77SHugh Dickins 			/*
21488e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
21498e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
21508e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
21518e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
21528e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
21538e205f77SHugh Dickins 			 */
21548e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
21558e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
21568e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
21578897c1b1SKirill A. Shutemov 
21588897c1b1SKirill A. Shutemov 			if (fpin)
21598897c1b1SKirill A. Shutemov 				fput(fpin);
21608e205f77SHugh Dickins 			return ret;
2161f00cdc6dSHugh Dickins 		}
21628e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2163f00cdc6dSHugh Dickins 	}
2164f00cdc6dSHugh Dickins 
216568a54100SMatthew Wilcox (Oracle) 	err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2166cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
216720acce67SSouptick Joarder 	if (err)
216820acce67SSouptick Joarder 		return vmf_error(err);
216968a54100SMatthew Wilcox (Oracle) 	if (folio)
217068a54100SMatthew Wilcox (Oracle) 		vmf->page = folio_file_page(folio, vmf->pgoff);
217168da9f05SHugh Dickins 	return ret;
21721da177e4SLinus Torvalds }
21731da177e4SLinus Torvalds 
2174c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2175c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2176c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2177c01d5b30SHugh Dickins {
2178c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2179c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2180c01d5b30SHugh Dickins 	unsigned long addr;
2181c01d5b30SHugh Dickins 	unsigned long offset;
2182c01d5b30SHugh Dickins 	unsigned long inflated_len;
2183c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2184c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2185c01d5b30SHugh Dickins 
2186c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2187c01d5b30SHugh Dickins 		return -ENOMEM;
2188c01d5b30SHugh Dickins 
2189c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2190c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2191c01d5b30SHugh Dickins 
2192396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2193c01d5b30SHugh Dickins 		return addr;
2194c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2195c01d5b30SHugh Dickins 		return addr;
2196c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2197c01d5b30SHugh Dickins 		return addr;
2198c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2199c01d5b30SHugh Dickins 		return addr;
2200c01d5b30SHugh Dickins 
2201c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2202c01d5b30SHugh Dickins 		return addr;
2203c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2204c01d5b30SHugh Dickins 		return addr;
2205c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2206c01d5b30SHugh Dickins 		return addr;
2207c01d5b30SHugh Dickins 	/*
2208c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2209c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
221099158997SKirill A. Shutemov 	 * But if caller specified an address hint and we allocated area there
221199158997SKirill A. Shutemov 	 * successfully, respect that as before.
2212c01d5b30SHugh Dickins 	 */
221399158997SKirill A. Shutemov 	if (uaddr == addr)
2214c01d5b30SHugh Dickins 		return addr;
2215c01d5b30SHugh Dickins 
2216c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2217c01d5b30SHugh Dickins 		struct super_block *sb;
2218c01d5b30SHugh Dickins 
2219c01d5b30SHugh Dickins 		if (file) {
2220c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2221c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2222c01d5b30SHugh Dickins 		} else {
2223c01d5b30SHugh Dickins 			/*
2224c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2225c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2226c01d5b30SHugh Dickins 			 */
2227c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2228c01d5b30SHugh Dickins 				return addr;
2229c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2230c01d5b30SHugh Dickins 		}
22313089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2232c01d5b30SHugh Dickins 			return addr;
2233c01d5b30SHugh Dickins 	}
2234c01d5b30SHugh Dickins 
2235c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2236c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2237c01d5b30SHugh Dickins 		return addr;
2238c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2239c01d5b30SHugh Dickins 		return addr;
2240c01d5b30SHugh Dickins 
2241c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2242c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2243c01d5b30SHugh Dickins 		return addr;
2244c01d5b30SHugh Dickins 	if (inflated_len < len)
2245c01d5b30SHugh Dickins 		return addr;
2246c01d5b30SHugh Dickins 
224799158997SKirill A. Shutemov 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2248c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2249c01d5b30SHugh Dickins 		return addr;
2250c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2251c01d5b30SHugh Dickins 		return addr;
2252c01d5b30SHugh Dickins 
2253c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2254c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2255c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2256c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2257c01d5b30SHugh Dickins 
2258c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2259c01d5b30SHugh Dickins 		return addr;
2260c01d5b30SHugh Dickins 	return inflated_addr;
2261c01d5b30SHugh Dickins }
2262c01d5b30SHugh Dickins 
22631da177e4SLinus Torvalds #ifdef CONFIG_NUMA
226441ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
22651da177e4SLinus Torvalds {
2266496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
226741ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
22681da177e4SLinus Torvalds }
22691da177e4SLinus Torvalds 
2270d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2271d8dc74f2SAdrian Bunk 					  unsigned long addr)
22721da177e4SLinus Torvalds {
2273496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
227441ffe5d5SHugh Dickins 	pgoff_t index;
22751da177e4SLinus Torvalds 
227641ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
227741ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
22781da177e4SLinus Torvalds }
22791da177e4SLinus Torvalds #endif
22801da177e4SLinus Torvalds 
2281d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
22821da177e4SLinus Torvalds {
2283496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
22841da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
22851da177e4SLinus Torvalds 	int retval = -ENOMEM;
22861da177e4SLinus Torvalds 
2287ea0dfeb4SHugh Dickins 	/*
2288ea0dfeb4SHugh Dickins 	 * What serializes the accesses to info->flags?
2289ea0dfeb4SHugh Dickins 	 * ipc_lock_object() when called from shmctl_do_lock(),
2290ea0dfeb4SHugh Dickins 	 * no serialization needed when called from shm_destroy().
2291ea0dfeb4SHugh Dickins 	 */
22921da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
2293d7c9e99aSAlexey Gladkov 		if (!user_shm_lock(inode->i_size, ucounts))
22941da177e4SLinus Torvalds 			goto out_nomem;
22951da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
229689e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
22971da177e4SLinus Torvalds 	}
2298d7c9e99aSAlexey Gladkov 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2299d7c9e99aSAlexey Gladkov 		user_shm_unlock(inode->i_size, ucounts);
23001da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
230189e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
23021da177e4SLinus Torvalds 	}
23031da177e4SLinus Torvalds 	retval = 0;
230489e004eaSLee Schermerhorn 
23051da177e4SLinus Torvalds out_nomem:
23061da177e4SLinus Torvalds 	return retval;
23071da177e4SLinus Torvalds }
23081da177e4SLinus Torvalds 
23099b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
23101da177e4SLinus Torvalds {
2311d09e8ca6SPasha Tatashin 	struct inode *inode = file_inode(file);
2312d09e8ca6SPasha Tatashin 	struct shmem_inode_info *info = SHMEM_I(inode);
231322247efdSPeter Xu 	int ret;
2314ab3948f5SJoel Fernandes (Google) 
231522247efdSPeter Xu 	ret = seal_check_future_write(info->seals, vma);
231622247efdSPeter Xu 	if (ret)
231722247efdSPeter Xu 		return ret;
2318ab3948f5SJoel Fernandes (Google) 
231951b0bff2SCatalin Marinas 	/* arm64 - allow memory tagging on RAM-based files */
23201c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_MTE_ALLOWED);
232151b0bff2SCatalin Marinas 
23221da177e4SLinus Torvalds 	file_accessed(file);
2323d09e8ca6SPasha Tatashin 	/* This is anonymous shared memory if it is unlinked at the time of mmap */
2324d09e8ca6SPasha Tatashin 	if (inode->i_nlink)
23251da177e4SLinus Torvalds 		vma->vm_ops = &shmem_vm_ops;
2326d09e8ca6SPasha Tatashin 	else
2327d09e8ca6SPasha Tatashin 		vma->vm_ops = &shmem_anon_vm_ops;
23281da177e4SLinus Torvalds 	return 0;
23291da177e4SLinus Torvalds }
23301da177e4SLinus Torvalds 
2331cb241339SHugh Dickins #ifdef CONFIG_TMPFS_XATTR
2332cb241339SHugh Dickins static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2333cb241339SHugh Dickins 
2334cb241339SHugh Dickins /*
2335cb241339SHugh Dickins  * chattr's fsflags are unrelated to extended attributes,
2336cb241339SHugh Dickins  * but tmpfs has chosen to enable them under the same config option.
2337cb241339SHugh Dickins  */
2338cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2339e408e695STheodore Ts'o {
2340cb241339SHugh Dickins 	unsigned int i_flags = 0;
2341cb241339SHugh Dickins 
2342cb241339SHugh Dickins 	if (fsflags & FS_NOATIME_FL)
2343cb241339SHugh Dickins 		i_flags |= S_NOATIME;
2344cb241339SHugh Dickins 	if (fsflags & FS_APPEND_FL)
2345cb241339SHugh Dickins 		i_flags |= S_APPEND;
2346cb241339SHugh Dickins 	if (fsflags & FS_IMMUTABLE_FL)
2347cb241339SHugh Dickins 		i_flags |= S_IMMUTABLE;
2348cb241339SHugh Dickins 	/*
2349cb241339SHugh Dickins 	 * But FS_NODUMP_FL does not require any action in i_flags.
2350cb241339SHugh Dickins 	 */
2351cb241339SHugh Dickins 	inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2352e408e695STheodore Ts'o }
2353cb241339SHugh Dickins #else
2354cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2355cb241339SHugh Dickins {
2356cb241339SHugh Dickins }
2357cb241339SHugh Dickins #define shmem_initxattrs NULL
2358cb241339SHugh Dickins #endif
2359e408e695STheodore Ts'o 
23607a80e5b8SGiuseppe Scrivano static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb,
23617a80e5b8SGiuseppe Scrivano 				     struct inode *dir, umode_t mode, dev_t dev,
23627a80e5b8SGiuseppe Scrivano 				     unsigned long flags)
23631da177e4SLinus Torvalds {
23641da177e4SLinus Torvalds 	struct inode *inode;
23651da177e4SLinus Torvalds 	struct shmem_inode_info *info;
23661da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2367e809d5f0SChris Down 	ino_t ino;
23681da177e4SLinus Torvalds 
2369e809d5f0SChris Down 	if (shmem_reserve_inode(sb, &ino))
23701da177e4SLinus Torvalds 		return NULL;
23711da177e4SLinus Torvalds 
23721da177e4SLinus Torvalds 	inode = new_inode(sb);
23731da177e4SLinus Torvalds 	if (inode) {
2374e809d5f0SChris Down 		inode->i_ino = ino;
23757a80e5b8SGiuseppe Scrivano 		inode_init_owner(idmap, inode, dir, mode);
23761da177e4SLinus Torvalds 		inode->i_blocks = 0;
2377078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2378a251c17aSJason A. Donenfeld 		inode->i_generation = get_random_u32();
23791da177e4SLinus Torvalds 		info = SHMEM_I(inode);
23801da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
23811da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
2382af53d3e9SHugh Dickins 		atomic_set(&info->stop_eviction, 0);
238340e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
23840b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2385f7cd16a5SXavier Roche 		info->i_crtime = inode->i_mtime;
2386e408e695STheodore Ts'o 		info->fsflags = (dir == NULL) ? 0 :
2387e408e695STheodore Ts'o 			SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2388cb241339SHugh Dickins 		if (info->fsflags)
2389cb241339SHugh Dickins 			shmem_set_inode_flags(inode, info->fsflags);
2390779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
23911da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
23922c6efe9cSLuis Chamberlain 		if (sbinfo->noswap)
23932c6efe9cSLuis Chamberlain 			mapping_set_unevictable(inode->i_mapping);
239438f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
239572c04902SAl Viro 		cache_no_acl(inode);
2396ff36da69SMatthew Wilcox (Oracle) 		mapping_set_large_folios(inode->i_mapping);
23971da177e4SLinus Torvalds 
23981da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
23991da177e4SLinus Torvalds 		default:
240039f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
24011da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
24021da177e4SLinus Torvalds 			break;
24031da177e4SLinus Torvalds 		case S_IFREG:
240414fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
24051da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
24061da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
240771fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
240871fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
24091da177e4SLinus Torvalds 			break;
24101da177e4SLinus Torvalds 		case S_IFDIR:
2411d8c76e6fSDave Hansen 			inc_nlink(inode);
24121da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
24131da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
24141da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
24151da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
24161da177e4SLinus Torvalds 			break;
24171da177e4SLinus Torvalds 		case S_IFLNK:
24181da177e4SLinus Torvalds 			/*
24191da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
24201da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
24211da177e4SLinus Torvalds 			 */
242271fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
24231da177e4SLinus Torvalds 			break;
24241da177e4SLinus Torvalds 		}
2425b45d71fbSJoel Fernandes (Google) 
2426b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
24275b04c689SPavel Emelyanov 	} else
24285b04c689SPavel Emelyanov 		shmem_free_inode(sb);
24291da177e4SLinus Torvalds 	return inode;
24301da177e4SLinus Torvalds }
24311da177e4SLinus Torvalds 
24323460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
243361c50040SAxel Rasmussen int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
24344c27fe4cSMike Rapoport 			   struct vm_area_struct *dst_vma,
24354c27fe4cSMike Rapoport 			   unsigned long dst_addr,
24364c27fe4cSMike Rapoport 			   unsigned long src_addr,
2437d9712937SAxel Rasmussen 			   uffd_flags_t flags,
2438d7be6d7eSZhangPeng 			   struct folio **foliop)
24394c27fe4cSMike Rapoport {
24404c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
24414c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
24424c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
24434c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
24444c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
24454c27fe4cSMike Rapoport 	void *page_kaddr;
2446b7dd44a1SMatthew Wilcox (Oracle) 	struct folio *folio;
24474c27fe4cSMike Rapoport 	int ret;
24483460f6e5SAxel Rasmussen 	pgoff_t max_off;
24494c27fe4cSMike Rapoport 
24507ed9d238SAxel Rasmussen 	if (!shmem_inode_acct_block(inode, 1)) {
24517ed9d238SAxel Rasmussen 		/*
24527ed9d238SAxel Rasmussen 		 * We may have got a page, returned -ENOENT triggering a retry,
24537ed9d238SAxel Rasmussen 		 * and now we find ourselves with -ENOMEM. Release the page, to
24547ed9d238SAxel Rasmussen 		 * avoid a BUG_ON in our caller.
24557ed9d238SAxel Rasmussen 		 */
2456d7be6d7eSZhangPeng 		if (unlikely(*foliop)) {
2457d7be6d7eSZhangPeng 			folio_put(*foliop);
2458d7be6d7eSZhangPeng 			*foliop = NULL;
24597ed9d238SAxel Rasmussen 		}
24607d64ae3aSAxel Rasmussen 		return -ENOMEM;
24617ed9d238SAxel Rasmussen 	}
24624c27fe4cSMike Rapoport 
2463d7be6d7eSZhangPeng 	if (!*foliop) {
24647d64ae3aSAxel Rasmussen 		ret = -ENOMEM;
24657a7256d5SMatthew Wilcox (Oracle) 		folio = shmem_alloc_folio(gfp, info, pgoff);
24667a7256d5SMatthew Wilcox (Oracle) 		if (!folio)
24670f079694SMike Rapoport 			goto out_unacct_blocks;
24684c27fe4cSMike Rapoport 
2469d9712937SAxel Rasmussen 		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
24707a7256d5SMatthew Wilcox (Oracle) 			page_kaddr = kmap_local_folio(folio, 0);
24715dc21f0cSIra Weiny 			/*
24725dc21f0cSIra Weiny 			 * The read mmap_lock is held here.  Despite the
24735dc21f0cSIra Weiny 			 * mmap_lock being read recursive a deadlock is still
24745dc21f0cSIra Weiny 			 * possible if a writer has taken a lock.  For example:
24755dc21f0cSIra Weiny 			 *
24765dc21f0cSIra Weiny 			 * process A thread 1 takes read lock on own mmap_lock
24775dc21f0cSIra Weiny 			 * process A thread 2 calls mmap, blocks taking write lock
24785dc21f0cSIra Weiny 			 * process B thread 1 takes page fault, read lock on own mmap lock
24795dc21f0cSIra Weiny 			 * process B thread 2 calls mmap, blocks taking write lock
24805dc21f0cSIra Weiny 			 * process A thread 1 blocks taking read lock on process B
24815dc21f0cSIra Weiny 			 * process B thread 1 blocks taking read lock on process A
24825dc21f0cSIra Weiny 			 *
24835dc21f0cSIra Weiny 			 * Disable page faults to prevent potential deadlock
24845dc21f0cSIra Weiny 			 * and retry the copy outside the mmap_lock.
24855dc21f0cSIra Weiny 			 */
24865dc21f0cSIra Weiny 			pagefault_disable();
24878d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
24888d103963SMike Rapoport 					     (const void __user *)src_addr,
24894c27fe4cSMike Rapoport 					     PAGE_SIZE);
24905dc21f0cSIra Weiny 			pagefault_enable();
24917a7256d5SMatthew Wilcox (Oracle) 			kunmap_local(page_kaddr);
24924c27fe4cSMike Rapoport 
2493c1e8d7c6SMichel Lespinasse 			/* fallback to copy_from_user outside mmap_lock */
24944c27fe4cSMike Rapoport 			if (unlikely(ret)) {
2495d7be6d7eSZhangPeng 				*foliop = folio;
24967d64ae3aSAxel Rasmussen 				ret = -ENOENT;
24974c27fe4cSMike Rapoport 				/* don't free the page */
24987d64ae3aSAxel Rasmussen 				goto out_unacct_blocks;
24994c27fe4cSMike Rapoport 			}
250019b482c2SMuchun Song 
25017a7256d5SMatthew Wilcox (Oracle) 			flush_dcache_folio(folio);
25023460f6e5SAxel Rasmussen 		} else {		/* ZEROPAGE */
25037a7256d5SMatthew Wilcox (Oracle) 			clear_user_highpage(&folio->page, dst_addr);
25048d103963SMike Rapoport 		}
25054c27fe4cSMike Rapoport 	} else {
2506d7be6d7eSZhangPeng 		folio = *foliop;
25077a7256d5SMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2508d7be6d7eSZhangPeng 		*foliop = NULL;
25094c27fe4cSMike Rapoport 	}
25104c27fe4cSMike Rapoport 
25117a7256d5SMatthew Wilcox (Oracle) 	VM_BUG_ON(folio_test_locked(folio));
25127a7256d5SMatthew Wilcox (Oracle) 	VM_BUG_ON(folio_test_swapbacked(folio));
25137a7256d5SMatthew Wilcox (Oracle) 	__folio_set_locked(folio);
25147a7256d5SMatthew Wilcox (Oracle) 	__folio_set_swapbacked(folio);
25157a7256d5SMatthew Wilcox (Oracle) 	__folio_mark_uptodate(folio);
25169cc90c66SAndrea Arcangeli 
2517e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2518e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
25193460f6e5SAxel Rasmussen 	if (unlikely(pgoff >= max_off))
2520e2a50c1fSAndrea Arcangeli 		goto out_release;
2521e2a50c1fSAndrea Arcangeli 
2522b7dd44a1SMatthew Wilcox (Oracle) 	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
252361c50040SAxel Rasmussen 				      gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
25244c27fe4cSMike Rapoport 	if (ret)
25254c27fe4cSMike Rapoport 		goto out_release;
25264c27fe4cSMike Rapoport 
252761c50040SAxel Rasmussen 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2528d9712937SAxel Rasmussen 				       &folio->page, true, flags);
25297d64ae3aSAxel Rasmussen 	if (ret)
25307d64ae3aSAxel Rasmussen 		goto out_delete_from_cache;
25314c27fe4cSMike Rapoport 
253294b7cc01SYang Shi 	spin_lock_irq(&info->lock);
25334c27fe4cSMike Rapoport 	info->alloced++;
25344c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
25354c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
253694b7cc01SYang Shi 	spin_unlock_irq(&info->lock);
25374c27fe4cSMike Rapoport 
25387a7256d5SMatthew Wilcox (Oracle) 	folio_unlock(folio);
25397d64ae3aSAxel Rasmussen 	return 0;
25407d64ae3aSAxel Rasmussen out_delete_from_cache:
25417a7256d5SMatthew Wilcox (Oracle) 	filemap_remove_folio(folio);
25424c27fe4cSMike Rapoport out_release:
25437a7256d5SMatthew Wilcox (Oracle) 	folio_unlock(folio);
25447a7256d5SMatthew Wilcox (Oracle) 	folio_put(folio);
25454c27fe4cSMike Rapoport out_unacct_blocks:
25460f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
25477d64ae3aSAxel Rasmussen 	return ret;
25484c27fe4cSMike Rapoport }
25493460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
25508d103963SMike Rapoport 
25511da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
255292e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
255369f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
25541da177e4SLinus Torvalds 
25551da177e4SLinus Torvalds static int
2556800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
25579d6b0cd7SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
2558800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
25591da177e4SLinus Torvalds {
2560800d15a5SNick Piggin 	struct inode *inode = mapping->host;
256140e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
256209cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
2563eff1f906SMatthew Wilcox (Oracle) 	struct folio *folio;
2564a7605426SYang Shi 	int ret = 0;
256540e041a2SDavid Herrmann 
25669608703eSJan Kara 	/* i_rwsem is held by caller */
2567ab3948f5SJoel Fernandes (Google) 	if (unlikely(info->seals & (F_SEAL_GROW |
2568ab3948f5SJoel Fernandes (Google) 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2569ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
257040e041a2SDavid Herrmann 			return -EPERM;
257140e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
257240e041a2SDavid Herrmann 			return -EPERM;
257340e041a2SDavid Herrmann 	}
257440e041a2SDavid Herrmann 
2575eff1f906SMatthew Wilcox (Oracle) 	ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2576a7605426SYang Shi 
2577a7605426SYang Shi 	if (ret)
2578a7605426SYang Shi 		return ret;
2579a7605426SYang Shi 
2580eff1f906SMatthew Wilcox (Oracle) 	*pagep = folio_file_page(folio, index);
2581a7605426SYang Shi 	if (PageHWPoison(*pagep)) {
2582eff1f906SMatthew Wilcox (Oracle) 		folio_unlock(folio);
2583eff1f906SMatthew Wilcox (Oracle) 		folio_put(folio);
2584a7605426SYang Shi 		*pagep = NULL;
2585a7605426SYang Shi 		return -EIO;
2586a7605426SYang Shi 	}
2587a7605426SYang Shi 
2588a7605426SYang Shi 	return 0;
2589800d15a5SNick Piggin }
2590800d15a5SNick Piggin 
2591800d15a5SNick Piggin static int
2592800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2593800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2594800d15a5SNick Piggin 			struct page *page, void *fsdata)
2595800d15a5SNick Piggin {
259669bbb87bSMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
2597800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2598800d15a5SNick Piggin 
2599800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2600800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2601800d15a5SNick Piggin 
260269bbb87bSMatthew Wilcox (Oracle) 	if (!folio_test_uptodate(folio)) {
260369bbb87bSMatthew Wilcox (Oracle) 		if (copied < folio_size(folio)) {
260469bbb87bSMatthew Wilcox (Oracle) 			size_t from = offset_in_folio(folio, pos);
260569bbb87bSMatthew Wilcox (Oracle) 			folio_zero_segments(folio, 0, from,
260669bbb87bSMatthew Wilcox (Oracle) 					from + copied, folio_size(folio));
2607800d8c63SKirill A. Shutemov 		}
260869bbb87bSMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
2609800d8c63SKirill A. Shutemov 	}
261069bbb87bSMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
261169bbb87bSMatthew Wilcox (Oracle) 	folio_unlock(folio);
261269bbb87bSMatthew Wilcox (Oracle) 	folio_put(folio);
2613d3602444SHugh Dickins 
2614800d15a5SNick Piggin 	return copied;
26151da177e4SLinus Torvalds }
26161da177e4SLinus Torvalds 
26172ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
26181da177e4SLinus Torvalds {
26196e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
26206e58e79dSAl Viro 	struct inode *inode = file_inode(file);
26211da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
262241ffe5d5SHugh Dickins 	pgoff_t index;
262341ffe5d5SHugh Dickins 	unsigned long offset;
2624f7c1d074SGeert Uytterhoeven 	int error = 0;
2625cb66a7a1SAl Viro 	ssize_t retval = 0;
26266e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2627a0ee5ec5SHugh Dickins 
262809cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
262909cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
26301da177e4SLinus Torvalds 
26311da177e4SLinus Torvalds 	for (;;) {
26324601e2fcSMatthew Wilcox (Oracle) 		struct folio *folio = NULL;
26331da177e4SLinus Torvalds 		struct page *page = NULL;
263441ffe5d5SHugh Dickins 		pgoff_t end_index;
263541ffe5d5SHugh Dickins 		unsigned long nr, ret;
26361da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
26371da177e4SLinus Torvalds 
263809cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
26391da177e4SLinus Torvalds 		if (index > end_index)
26401da177e4SLinus Torvalds 			break;
26411da177e4SLinus Torvalds 		if (index == end_index) {
264209cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
26431da177e4SLinus Torvalds 			if (nr <= offset)
26441da177e4SLinus Torvalds 				break;
26451da177e4SLinus Torvalds 		}
26461da177e4SLinus Torvalds 
26474601e2fcSMatthew Wilcox (Oracle) 		error = shmem_get_folio(inode, index, &folio, SGP_READ);
26486e58e79dSAl Viro 		if (error) {
26496e58e79dSAl Viro 			if (error == -EINVAL)
26506e58e79dSAl Viro 				error = 0;
26511da177e4SLinus Torvalds 			break;
26521da177e4SLinus Torvalds 		}
26534601e2fcSMatthew Wilcox (Oracle) 		if (folio) {
26544601e2fcSMatthew Wilcox (Oracle) 			folio_unlock(folio);
2655a7605426SYang Shi 
26564601e2fcSMatthew Wilcox (Oracle) 			page = folio_file_page(folio, index);
2657a7605426SYang Shi 			if (PageHWPoison(page)) {
26584601e2fcSMatthew Wilcox (Oracle) 				folio_put(folio);
2659a7605426SYang Shi 				error = -EIO;
2660a7605426SYang Shi 				break;
2661a7605426SYang Shi 			}
266275edd345SHugh Dickins 		}
26631da177e4SLinus Torvalds 
26641da177e4SLinus Torvalds 		/*
26651da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
26669608703eSJan Kara 		 * are called without i_rwsem protection against truncate
26671da177e4SLinus Torvalds 		 */
266809cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
26691da177e4SLinus Torvalds 		i_size = i_size_read(inode);
267009cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
26711da177e4SLinus Torvalds 		if (index == end_index) {
267209cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
26731da177e4SLinus Torvalds 			if (nr <= offset) {
26744601e2fcSMatthew Wilcox (Oracle) 				if (folio)
26754601e2fcSMatthew Wilcox (Oracle) 					folio_put(folio);
26761da177e4SLinus Torvalds 				break;
26771da177e4SLinus Torvalds 			}
26781da177e4SLinus Torvalds 		}
26791da177e4SLinus Torvalds 		nr -= offset;
26801da177e4SLinus Torvalds 
26814601e2fcSMatthew Wilcox (Oracle) 		if (folio) {
26821da177e4SLinus Torvalds 			/*
26831da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
26841da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
26851da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
26861da177e4SLinus Torvalds 			 */
26871da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
26881da177e4SLinus Torvalds 				flush_dcache_page(page);
26891da177e4SLinus Torvalds 			/*
26901da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
26911da177e4SLinus Torvalds 			 */
26921da177e4SLinus Torvalds 			if (!offset)
26934601e2fcSMatthew Wilcox (Oracle) 				folio_mark_accessed(folio);
26941da177e4SLinus Torvalds 			/*
26951da177e4SLinus Torvalds 			 * Ok, we have the page, and it's up-to-date, so
26961da177e4SLinus Torvalds 			 * now we can copy it to user space...
26971da177e4SLinus Torvalds 			 */
26982ba5bbedSAl Viro 			ret = copy_page_to_iter(page, offset, nr, to);
26994601e2fcSMatthew Wilcox (Oracle) 			folio_put(folio);
27001bdec44bSHugh Dickins 
2701fcb14cb1SAl Viro 		} else if (user_backed_iter(to)) {
27021bdec44bSHugh Dickins 			/*
27031bdec44bSHugh Dickins 			 * Copy to user tends to be so well optimized, but
27041bdec44bSHugh Dickins 			 * clear_user() not so much, that it is noticeably
27051bdec44bSHugh Dickins 			 * faster to copy the zero page instead of clearing.
27061bdec44bSHugh Dickins 			 */
27071bdec44bSHugh Dickins 			ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
27081bdec44bSHugh Dickins 		} else {
27091bdec44bSHugh Dickins 			/*
27101bdec44bSHugh Dickins 			 * But submitting the same page twice in a row to
27111bdec44bSHugh Dickins 			 * splice() - or others? - can result in confusion:
27121bdec44bSHugh Dickins 			 * so don't attempt that optimization on pipes etc.
27131bdec44bSHugh Dickins 			 */
27141bdec44bSHugh Dickins 			ret = iov_iter_zero(nr, to);
27151bdec44bSHugh Dickins 		}
27161bdec44bSHugh Dickins 
27176e58e79dSAl Viro 		retval += ret;
27181da177e4SLinus Torvalds 		offset += ret;
271909cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
272009cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
27211da177e4SLinus Torvalds 
27222ba5bbedSAl Viro 		if (!iov_iter_count(to))
27231da177e4SLinus Torvalds 			break;
27246e58e79dSAl Viro 		if (ret < nr) {
27256e58e79dSAl Viro 			error = -EFAULT;
27266e58e79dSAl Viro 			break;
27276e58e79dSAl Viro 		}
27281da177e4SLinus Torvalds 		cond_resched();
27291da177e4SLinus Torvalds 	}
27301da177e4SLinus Torvalds 
273109cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
27326e58e79dSAl Viro 	file_accessed(file);
27336e58e79dSAl Viro 	return retval ? retval : error;
27341da177e4SLinus Torvalds }
27351da177e4SLinus Torvalds 
2736bd194b18SDavid Howells static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
2737bd194b18SDavid Howells 			      struct pipe_buffer *buf)
2738bd194b18SDavid Howells {
2739bd194b18SDavid Howells 	return true;
2740bd194b18SDavid Howells }
2741bd194b18SDavid Howells 
2742bd194b18SDavid Howells static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
2743bd194b18SDavid Howells 				  struct pipe_buffer *buf)
2744bd194b18SDavid Howells {
2745bd194b18SDavid Howells }
2746bd194b18SDavid Howells 
2747bd194b18SDavid Howells static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
2748bd194b18SDavid Howells 				    struct pipe_buffer *buf)
2749bd194b18SDavid Howells {
2750bd194b18SDavid Howells 	return false;
2751bd194b18SDavid Howells }
2752bd194b18SDavid Howells 
2753bd194b18SDavid Howells static const struct pipe_buf_operations zero_pipe_buf_ops = {
2754bd194b18SDavid Howells 	.release	= zero_pipe_buf_release,
2755bd194b18SDavid Howells 	.try_steal	= zero_pipe_buf_try_steal,
2756bd194b18SDavid Howells 	.get		= zero_pipe_buf_get,
2757bd194b18SDavid Howells };
2758bd194b18SDavid Howells 
2759bd194b18SDavid Howells static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
2760bd194b18SDavid Howells 					loff_t fpos, size_t size)
2761bd194b18SDavid Howells {
2762bd194b18SDavid Howells 	size_t offset = fpos & ~PAGE_MASK;
2763bd194b18SDavid Howells 
2764bd194b18SDavid Howells 	size = min_t(size_t, size, PAGE_SIZE - offset);
2765bd194b18SDavid Howells 
2766bd194b18SDavid Howells 	if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2767bd194b18SDavid Howells 		struct pipe_buffer *buf = pipe_head_buf(pipe);
2768bd194b18SDavid Howells 
2769bd194b18SDavid Howells 		*buf = (struct pipe_buffer) {
2770bd194b18SDavid Howells 			.ops	= &zero_pipe_buf_ops,
2771bd194b18SDavid Howells 			.page	= ZERO_PAGE(0),
2772bd194b18SDavid Howells 			.offset	= offset,
2773bd194b18SDavid Howells 			.len	= size,
2774bd194b18SDavid Howells 		};
2775bd194b18SDavid Howells 		pipe->head++;
2776bd194b18SDavid Howells 	}
2777bd194b18SDavid Howells 
2778bd194b18SDavid Howells 	return size;
2779bd194b18SDavid Howells }
2780bd194b18SDavid Howells 
2781bd194b18SDavid Howells static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
2782bd194b18SDavid Howells 				      struct pipe_inode_info *pipe,
2783bd194b18SDavid Howells 				      size_t len, unsigned int flags)
2784bd194b18SDavid Howells {
2785bd194b18SDavid Howells 	struct inode *inode = file_inode(in);
2786bd194b18SDavid Howells 	struct address_space *mapping = inode->i_mapping;
2787bd194b18SDavid Howells 	struct folio *folio = NULL;
2788bd194b18SDavid Howells 	size_t total_spliced = 0, used, npages, n, part;
2789bd194b18SDavid Howells 	loff_t isize;
2790bd194b18SDavid Howells 	int error = 0;
2791bd194b18SDavid Howells 
2792bd194b18SDavid Howells 	/* Work out how much data we can actually add into the pipe */
2793bd194b18SDavid Howells 	used = pipe_occupancy(pipe->head, pipe->tail);
2794bd194b18SDavid Howells 	npages = max_t(ssize_t, pipe->max_usage - used, 0);
2795bd194b18SDavid Howells 	len = min_t(size_t, len, npages * PAGE_SIZE);
2796bd194b18SDavid Howells 
2797bd194b18SDavid Howells 	do {
2798bd194b18SDavid Howells 		if (*ppos >= i_size_read(inode))
2799bd194b18SDavid Howells 			break;
2800bd194b18SDavid Howells 
2801fa598952SHugh Dickins 		error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio,
2802fa598952SHugh Dickins 					SGP_READ);
2803bd194b18SDavid Howells 		if (error) {
2804bd194b18SDavid Howells 			if (error == -EINVAL)
2805bd194b18SDavid Howells 				error = 0;
2806bd194b18SDavid Howells 			break;
2807bd194b18SDavid Howells 		}
2808bd194b18SDavid Howells 		if (folio) {
2809bd194b18SDavid Howells 			folio_unlock(folio);
2810bd194b18SDavid Howells 
2811fa598952SHugh Dickins 			if (folio_test_hwpoison(folio) ||
2812fa598952SHugh Dickins 			    (folio_test_large(folio) &&
2813fa598952SHugh Dickins 			     folio_test_has_hwpoisoned(folio))) {
2814bd194b18SDavid Howells 				error = -EIO;
2815bd194b18SDavid Howells 				break;
2816bd194b18SDavid Howells 			}
2817bd194b18SDavid Howells 		}
2818bd194b18SDavid Howells 
2819bd194b18SDavid Howells 		/*
2820bd194b18SDavid Howells 		 * i_size must be checked after we know the pages are Uptodate.
2821bd194b18SDavid Howells 		 *
2822bd194b18SDavid Howells 		 * Checking i_size after the check allows us to calculate
2823bd194b18SDavid Howells 		 * the correct value for "nr", which means the zero-filled
2824bd194b18SDavid Howells 		 * part of the page is not copied back to userspace (unless
2825bd194b18SDavid Howells 		 * another truncate extends the file - this is desired though).
2826bd194b18SDavid Howells 		 */
2827bd194b18SDavid Howells 		isize = i_size_read(inode);
2828bd194b18SDavid Howells 		if (unlikely(*ppos >= isize))
2829bd194b18SDavid Howells 			break;
2830bd194b18SDavid Howells 		part = min_t(loff_t, isize - *ppos, len);
2831bd194b18SDavid Howells 
2832bd194b18SDavid Howells 		if (folio) {
2833bd194b18SDavid Howells 			/*
2834bd194b18SDavid Howells 			 * If users can be writing to this page using arbitrary
2835bd194b18SDavid Howells 			 * virtual addresses, take care about potential aliasing
2836bd194b18SDavid Howells 			 * before reading the page on the kernel side.
2837bd194b18SDavid Howells 			 */
2838bd194b18SDavid Howells 			if (mapping_writably_mapped(mapping))
2839bd194b18SDavid Howells 				flush_dcache_folio(folio);
2840bd194b18SDavid Howells 			folio_mark_accessed(folio);
2841bd194b18SDavid Howells 			/*
2842bd194b18SDavid Howells 			 * Ok, we have the page, and it's up-to-date, so we can
2843bd194b18SDavid Howells 			 * now splice it into the pipe.
2844bd194b18SDavid Howells 			 */
2845bd194b18SDavid Howells 			n = splice_folio_into_pipe(pipe, folio, *ppos, part);
2846bd194b18SDavid Howells 			folio_put(folio);
2847bd194b18SDavid Howells 			folio = NULL;
2848bd194b18SDavid Howells 		} else {
2849fa598952SHugh Dickins 			n = splice_zeropage_into_pipe(pipe, *ppos, part);
2850bd194b18SDavid Howells 		}
2851bd194b18SDavid Howells 
2852bd194b18SDavid Howells 		if (!n)
2853bd194b18SDavid Howells 			break;
2854bd194b18SDavid Howells 		len -= n;
2855bd194b18SDavid Howells 		total_spliced += n;
2856bd194b18SDavid Howells 		*ppos += n;
2857bd194b18SDavid Howells 		in->f_ra.prev_pos = *ppos;
2858bd194b18SDavid Howells 		if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
2859bd194b18SDavid Howells 			break;
2860bd194b18SDavid Howells 
2861bd194b18SDavid Howells 		cond_resched();
2862bd194b18SDavid Howells 	} while (len);
2863bd194b18SDavid Howells 
2864bd194b18SDavid Howells 	if (folio)
2865bd194b18SDavid Howells 		folio_put(folio);
2866bd194b18SDavid Howells 
2867bd194b18SDavid Howells 	file_accessed(in);
2868bd194b18SDavid Howells 	return total_spliced ? total_spliced : error;
2869bd194b18SDavid Howells }
2870bd194b18SDavid Howells 
2871965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2872220f2ac9SHugh Dickins {
2873220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2874220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2875220f2ac9SHugh Dickins 
2876965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2877965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2878220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
287941139aa4SMatthew Wilcox (Oracle) 	if (offset < 0)
288041139aa4SMatthew Wilcox (Oracle) 		return -ENXIO;
288141139aa4SMatthew Wilcox (Oracle) 
28825955102cSAl Viro 	inode_lock(inode);
28839608703eSJan Kara 	/* We're holding i_rwsem so we can access i_size directly */
288441139aa4SMatthew Wilcox (Oracle) 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2885387aae6fSHugh Dickins 	if (offset >= 0)
288646a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
28875955102cSAl Viro 	inode_unlock(inode);
2888220f2ac9SHugh Dickins 	return offset;
2889220f2ac9SHugh Dickins }
2890220f2ac9SHugh Dickins 
289183e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
289283e4fa9cSHugh Dickins 							 loff_t len)
289383e4fa9cSHugh Dickins {
2894496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2895e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
289640e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
28971aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2898d144bf62SHugh Dickins 	pgoff_t start, index, end, undo_fallocend;
2899e2d12e22SHugh Dickins 	int error;
290083e4fa9cSHugh Dickins 
290113ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
290213ace4d0SHugh Dickins 		return -EOPNOTSUPP;
290313ace4d0SHugh Dickins 
29045955102cSAl Viro 	inode_lock(inode);
290583e4fa9cSHugh Dickins 
290683e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
290783e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
290883e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
290983e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
29108e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
291183e4fa9cSHugh Dickins 
29129608703eSJan Kara 		/* protected by i_rwsem */
2913ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
291440e041a2SDavid Herrmann 			error = -EPERM;
291540e041a2SDavid Herrmann 			goto out;
291640e041a2SDavid Herrmann 		}
291740e041a2SDavid Herrmann 
29188e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2919aa71ecd8SChen Jun 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2920f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2921f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2922f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2923f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2924f00cdc6dSHugh Dickins 
292583e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
292683e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
292783e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
292883e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
292983e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
29308e205f77SHugh Dickins 
29318e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
29328e205f77SHugh Dickins 		inode->i_private = NULL;
29338e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
29342055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
29358e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
293683e4fa9cSHugh Dickins 		error = 0;
29378e205f77SHugh Dickins 		goto out;
293883e4fa9cSHugh Dickins 	}
293983e4fa9cSHugh Dickins 
2940e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2941e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2942e2d12e22SHugh Dickins 	if (error)
2943e2d12e22SHugh Dickins 		goto out;
2944e2d12e22SHugh Dickins 
294540e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
294640e041a2SDavid Herrmann 		error = -EPERM;
294740e041a2SDavid Herrmann 		goto out;
294840e041a2SDavid Herrmann 	}
294940e041a2SDavid Herrmann 
295009cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
295109cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2952e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2953e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2954e2d12e22SHugh Dickins 		error = -ENOSPC;
2955e2d12e22SHugh Dickins 		goto out;
2956e2d12e22SHugh Dickins 	}
2957e2d12e22SHugh Dickins 
29588e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
29591aac1400SHugh Dickins 	shmem_falloc.start = start;
29601aac1400SHugh Dickins 	shmem_falloc.next  = start;
29611aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
29621aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
29631aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
29641aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
29651aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
29661aac1400SHugh Dickins 
2967d144bf62SHugh Dickins 	/*
2968d144bf62SHugh Dickins 	 * info->fallocend is only relevant when huge pages might be
2969d144bf62SHugh Dickins 	 * involved: to prevent split_huge_page() freeing fallocated
2970d144bf62SHugh Dickins 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
2971d144bf62SHugh Dickins 	 */
2972d144bf62SHugh Dickins 	undo_fallocend = info->fallocend;
2973d144bf62SHugh Dickins 	if (info->fallocend < end)
2974d144bf62SHugh Dickins 		info->fallocend = end;
2975d144bf62SHugh Dickins 
2976050dcb5cSHugh Dickins 	for (index = start; index < end; ) {
2977b0802b22SMatthew Wilcox (Oracle) 		struct folio *folio;
2978e2d12e22SHugh Dickins 
2979e2d12e22SHugh Dickins 		/*
2980e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2981e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2982e2d12e22SHugh Dickins 		 */
2983e2d12e22SHugh Dickins 		if (signal_pending(current))
2984e2d12e22SHugh Dickins 			error = -EINTR;
29851aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
29861aac1400SHugh Dickins 			error = -ENOMEM;
2987e2d12e22SHugh Dickins 		else
2988b0802b22SMatthew Wilcox (Oracle) 			error = shmem_get_folio(inode, index, &folio,
2989b0802b22SMatthew Wilcox (Oracle) 						SGP_FALLOC);
2990e2d12e22SHugh Dickins 		if (error) {
2991d144bf62SHugh Dickins 			info->fallocend = undo_fallocend;
2992b0802b22SMatthew Wilcox (Oracle) 			/* Remove the !uptodate folios we added */
29937f556567SHugh Dickins 			if (index > start) {
29941635f6a7SHugh Dickins 				shmem_undo_range(inode,
299509cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2996b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
29977f556567SHugh Dickins 			}
29981aac1400SHugh Dickins 			goto undone;
2999e2d12e22SHugh Dickins 		}
3000e2d12e22SHugh Dickins 
3001050dcb5cSHugh Dickins 		/*
3002050dcb5cSHugh Dickins 		 * Here is a more important optimization than it appears:
3003b0802b22SMatthew Wilcox (Oracle) 		 * a second SGP_FALLOC on the same large folio will clear it,
3004b0802b22SMatthew Wilcox (Oracle) 		 * making it uptodate and un-undoable if we fail later.
3005050dcb5cSHugh Dickins 		 */
3006b0802b22SMatthew Wilcox (Oracle) 		index = folio_next_index(folio);
3007050dcb5cSHugh Dickins 		/* Beware 32-bit wraparound */
3008050dcb5cSHugh Dickins 		if (!index)
3009050dcb5cSHugh Dickins 			index--;
3010050dcb5cSHugh Dickins 
3011e2d12e22SHugh Dickins 		/*
30121aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
30131aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
30141aac1400SHugh Dickins 		 */
3015b0802b22SMatthew Wilcox (Oracle) 		if (!folio_test_uptodate(folio))
3016050dcb5cSHugh Dickins 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
3017050dcb5cSHugh Dickins 		shmem_falloc.next = index;
30181aac1400SHugh Dickins 
30191aac1400SHugh Dickins 		/*
3020b0802b22SMatthew Wilcox (Oracle) 		 * If !uptodate, leave it that way so that freeable folios
30211635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
3022b0802b22SMatthew Wilcox (Oracle) 		 * But mark it dirty so that memory pressure will swap rather
3023b0802b22SMatthew Wilcox (Oracle) 		 * than free the folios we are allocating (and SGP_CACHE folios
3024e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
3025e2d12e22SHugh Dickins 		 */
3026b0802b22SMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
3027b0802b22SMatthew Wilcox (Oracle) 		folio_unlock(folio);
3028b0802b22SMatthew Wilcox (Oracle) 		folio_put(folio);
3029e2d12e22SHugh Dickins 		cond_resched();
3030e2d12e22SHugh Dickins 	}
3031e2d12e22SHugh Dickins 
3032e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3033e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
30341aac1400SHugh Dickins undone:
30351aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
30361aac1400SHugh Dickins 	inode->i_private = NULL;
30371aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
3038e2d12e22SHugh Dickins out:
303915f242bbSHugh Dickins 	if (!error)
304015f242bbSHugh Dickins 		file_modified(file);
30415955102cSAl Viro 	inode_unlock(inode);
304283e4fa9cSHugh Dickins 	return error;
304383e4fa9cSHugh Dickins }
304483e4fa9cSHugh Dickins 
3045726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
30461da177e4SLinus Torvalds {
3047726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
30481da177e4SLinus Torvalds 
30491da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
305009cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
30511da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
30520edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
30531da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
305441ffe5d5SHugh Dickins 		buf->f_bavail =
305541ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
305641ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
30570edd73b3SHugh Dickins 	}
30580edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
30591da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
30601da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
30611da177e4SLinus Torvalds 	}
30621da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
306359cda49eSAmir Goldstein 
306459cda49eSAmir Goldstein 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
306559cda49eSAmir Goldstein 
30661da177e4SLinus Torvalds 	return 0;
30671da177e4SLinus Torvalds }
30681da177e4SLinus Torvalds 
30691da177e4SLinus Torvalds /*
30701da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
30711da177e4SLinus Torvalds  */
30721da177e4SLinus Torvalds static int
30735ebb29beSChristian Brauner shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3074549c7297SChristian Brauner 	    struct dentry *dentry, umode_t mode, dev_t dev)
30751da177e4SLinus Torvalds {
30760b0a0806SHugh Dickins 	struct inode *inode;
30771da177e4SLinus Torvalds 	int error = -ENOSPC;
30781da177e4SLinus Torvalds 
30797a80e5b8SGiuseppe Scrivano 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
30801da177e4SLinus Torvalds 	if (inode) {
3081feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
3082feda821eSChristoph Hellwig 		if (error)
3083feda821eSChristoph Hellwig 			goto out_iput;
30842a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
30859d8f13baSMimi Zohar 						     &dentry->d_name,
30866d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
3087feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
3088feda821eSChristoph Hellwig 			goto out_iput;
308937ec43cdSMimi Zohar 
3090718deb6bSAl Viro 		error = 0;
30911da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
3092078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
309336f05cabSJeff Layton 		inode_inc_iversion(dir);
30941da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
30951da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
30961da177e4SLinus Torvalds 	}
30971da177e4SLinus Torvalds 	return error;
3098feda821eSChristoph Hellwig out_iput:
3099feda821eSChristoph Hellwig 	iput(inode);
3100feda821eSChristoph Hellwig 	return error;
31011da177e4SLinus Torvalds }
31021da177e4SLinus Torvalds 
310360545d0dSAl Viro static int
3104011e2b71SChristian Brauner shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3105863f144fSMiklos Szeredi 	      struct file *file, umode_t mode)
310660545d0dSAl Viro {
310760545d0dSAl Viro 	struct inode *inode;
310860545d0dSAl Viro 	int error = -ENOSPC;
310960545d0dSAl Viro 
31107a80e5b8SGiuseppe Scrivano 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
311160545d0dSAl Viro 	if (inode) {
311260545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
311360545d0dSAl Viro 						     NULL,
311460545d0dSAl Viro 						     shmem_initxattrs, NULL);
3115feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
3116feda821eSChristoph Hellwig 			goto out_iput;
3117feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
3118feda821eSChristoph Hellwig 		if (error)
3119feda821eSChristoph Hellwig 			goto out_iput;
3120863f144fSMiklos Szeredi 		d_tmpfile(file, inode);
312160545d0dSAl Viro 	}
3122863f144fSMiklos Szeredi 	return finish_open_simple(file, error);
3123feda821eSChristoph Hellwig out_iput:
3124feda821eSChristoph Hellwig 	iput(inode);
3125feda821eSChristoph Hellwig 	return error;
312660545d0dSAl Viro }
312760545d0dSAl Viro 
3128c54bd91eSChristian Brauner static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3129549c7297SChristian Brauner 		       struct dentry *dentry, umode_t mode)
31301da177e4SLinus Torvalds {
31311da177e4SLinus Torvalds 	int error;
31321da177e4SLinus Torvalds 
31337a80e5b8SGiuseppe Scrivano 	error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
31347a80e5b8SGiuseppe Scrivano 	if (error)
31351da177e4SLinus Torvalds 		return error;
3136d8c76e6fSDave Hansen 	inc_nlink(dir);
31371da177e4SLinus Torvalds 	return 0;
31381da177e4SLinus Torvalds }
31391da177e4SLinus Torvalds 
31406c960e68SChristian Brauner static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3141549c7297SChristian Brauner 			struct dentry *dentry, umode_t mode, bool excl)
31421da177e4SLinus Torvalds {
31437a80e5b8SGiuseppe Scrivano 	return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
31441da177e4SLinus Torvalds }
31451da177e4SLinus Torvalds 
31461da177e4SLinus Torvalds /*
31471da177e4SLinus Torvalds  * Link a file..
31481da177e4SLinus Torvalds  */
31491da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
31501da177e4SLinus Torvalds {
315175c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
315229b00e60SDarrick J. Wong 	int ret = 0;
31531da177e4SLinus Torvalds 
31541da177e4SLinus Torvalds 	/*
31551da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
31561da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
31571da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
31581062af92SDarrick J. Wong 	 * But if an O_TMPFILE file is linked into the tmpfs, the
31591062af92SDarrick J. Wong 	 * first link must skip that, to get the accounting right.
31601da177e4SLinus Torvalds 	 */
31611062af92SDarrick J. Wong 	if (inode->i_nlink) {
3162e809d5f0SChris Down 		ret = shmem_reserve_inode(inode->i_sb, NULL);
31635b04c689SPavel Emelyanov 		if (ret)
31645b04c689SPavel Emelyanov 			goto out;
31651062af92SDarrick J. Wong 	}
31661da177e4SLinus Torvalds 
31671da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3168078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
316936f05cabSJeff Layton 	inode_inc_iversion(dir);
3170d8c76e6fSDave Hansen 	inc_nlink(inode);
31717de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
31721da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
31731da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
31745b04c689SPavel Emelyanov out:
31755b04c689SPavel Emelyanov 	return ret;
31761da177e4SLinus Torvalds }
31771da177e4SLinus Torvalds 
31781da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
31791da177e4SLinus Torvalds {
318075c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
31811da177e4SLinus Torvalds 
31825b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
31835b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
31841da177e4SLinus Torvalds 
31851da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
3186078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
318736f05cabSJeff Layton 	inode_inc_iversion(dir);
31889a53c3a7SDave Hansen 	drop_nlink(inode);
31891da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
31901da177e4SLinus Torvalds 	return 0;
31911da177e4SLinus Torvalds }
31921da177e4SLinus Torvalds 
31931da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
31941da177e4SLinus Torvalds {
31951da177e4SLinus Torvalds 	if (!simple_empty(dentry))
31961da177e4SLinus Torvalds 		return -ENOTEMPTY;
31971da177e4SLinus Torvalds 
319875c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
31999a53c3a7SDave Hansen 	drop_nlink(dir);
32001da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
32011da177e4SLinus Torvalds }
32021da177e4SLinus Torvalds 
3203e18275aeSChristian Brauner static int shmem_whiteout(struct mnt_idmap *idmap,
3204549c7297SChristian Brauner 			  struct inode *old_dir, struct dentry *old_dentry)
320546fdb794SMiklos Szeredi {
320646fdb794SMiklos Szeredi 	struct dentry *whiteout;
320746fdb794SMiklos Szeredi 	int error;
320846fdb794SMiklos Szeredi 
320946fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
321046fdb794SMiklos Szeredi 	if (!whiteout)
321146fdb794SMiklos Szeredi 		return -ENOMEM;
321246fdb794SMiklos Szeredi 
32137a80e5b8SGiuseppe Scrivano 	error = shmem_mknod(idmap, old_dir, whiteout,
321446fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
321546fdb794SMiklos Szeredi 	dput(whiteout);
321646fdb794SMiklos Szeredi 	if (error)
321746fdb794SMiklos Szeredi 		return error;
321846fdb794SMiklos Szeredi 
321946fdb794SMiklos Szeredi 	/*
322046fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
322146fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
322246fdb794SMiklos Szeredi 	 *
322346fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
322446fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
322546fdb794SMiklos Szeredi 	 */
322646fdb794SMiklos Szeredi 	d_rehash(whiteout);
322746fdb794SMiklos Szeredi 	return 0;
322846fdb794SMiklos Szeredi }
322946fdb794SMiklos Szeredi 
32301da177e4SLinus Torvalds /*
32311da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
32321da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
32331da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
32341da177e4SLinus Torvalds  * gets overwritten.
32351da177e4SLinus Torvalds  */
3236e18275aeSChristian Brauner static int shmem_rename2(struct mnt_idmap *idmap,
3237549c7297SChristian Brauner 			 struct inode *old_dir, struct dentry *old_dentry,
3238549c7297SChristian Brauner 			 struct inode *new_dir, struct dentry *new_dentry,
3239549c7297SChristian Brauner 			 unsigned int flags)
32401da177e4SLinus Torvalds {
324175c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
32421da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
32431da177e4SLinus Torvalds 
324446fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
32453b69ff51SMiklos Szeredi 		return -EINVAL;
32463b69ff51SMiklos Szeredi 
324737456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
32486429e463SLorenz Bauer 		return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
324937456771SMiklos Szeredi 
32501da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
32511da177e4SLinus Torvalds 		return -ENOTEMPTY;
32521da177e4SLinus Torvalds 
325346fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
325446fdb794SMiklos Szeredi 		int error;
325546fdb794SMiklos Szeredi 
32567a80e5b8SGiuseppe Scrivano 		error = shmem_whiteout(idmap, old_dir, old_dentry);
325746fdb794SMiklos Szeredi 		if (error)
325846fdb794SMiklos Szeredi 			return error;
325946fdb794SMiklos Szeredi 	}
326046fdb794SMiklos Szeredi 
326175c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
32621da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3263b928095bSMiklos Szeredi 		if (they_are_dirs) {
326475c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
32659a53c3a7SDave Hansen 			drop_nlink(old_dir);
3266b928095bSMiklos Szeredi 		}
32671da177e4SLinus Torvalds 	} else if (they_are_dirs) {
32689a53c3a7SDave Hansen 		drop_nlink(old_dir);
3269d8c76e6fSDave Hansen 		inc_nlink(new_dir);
32701da177e4SLinus Torvalds 	}
32711da177e4SLinus Torvalds 
32721da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
32731da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
32741da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
32751da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3276078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
327736f05cabSJeff Layton 	inode_inc_iversion(old_dir);
327836f05cabSJeff Layton 	inode_inc_iversion(new_dir);
32791da177e4SLinus Torvalds 	return 0;
32801da177e4SLinus Torvalds }
32811da177e4SLinus Torvalds 
32827a77db95SChristian Brauner static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3283549c7297SChristian Brauner 			 struct dentry *dentry, const char *symname)
32841da177e4SLinus Torvalds {
32851da177e4SLinus Torvalds 	int error;
32861da177e4SLinus Torvalds 	int len;
32871da177e4SLinus Torvalds 	struct inode *inode;
32887ad0414bSMatthew Wilcox (Oracle) 	struct folio *folio;
32891da177e4SLinus Torvalds 
32901da177e4SLinus Torvalds 	len = strlen(symname) + 1;
329109cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
32921da177e4SLinus Torvalds 		return -ENAMETOOLONG;
32931da177e4SLinus Torvalds 
32947a80e5b8SGiuseppe Scrivano 	inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
32950825a6f9SJoe Perches 				VM_NORESERVE);
32961da177e4SLinus Torvalds 	if (!inode)
32971da177e4SLinus Torvalds 		return -ENOSPC;
32981da177e4SLinus Torvalds 
32999d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
33006d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3301343c3d7fSMateusz Nosek 	if (error && error != -EOPNOTSUPP) {
3302570bc1c2SStephen Smalley 		iput(inode);
3303570bc1c2SStephen Smalley 		return error;
3304570bc1c2SStephen Smalley 	}
3305570bc1c2SStephen Smalley 
33061da177e4SLinus Torvalds 	inode->i_size = len-1;
330769f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
33083ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
33093ed47db3SAl Viro 		if (!inode->i_link) {
331069f07ec9SHugh Dickins 			iput(inode);
331169f07ec9SHugh Dickins 			return -ENOMEM;
331269f07ec9SHugh Dickins 		}
331369f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
33141da177e4SLinus Torvalds 	} else {
3315e8ecde25SAl Viro 		inode_nohighmem(inode);
33167ad0414bSMatthew Wilcox (Oracle) 		error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
33171da177e4SLinus Torvalds 		if (error) {
33181da177e4SLinus Torvalds 			iput(inode);
33191da177e4SLinus Torvalds 			return error;
33201da177e4SLinus Torvalds 		}
332114fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
33221da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
33237ad0414bSMatthew Wilcox (Oracle) 		memcpy(folio_address(folio), symname, len);
33247ad0414bSMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
33257ad0414bSMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
33267ad0414bSMatthew Wilcox (Oracle) 		folio_unlock(folio);
33277ad0414bSMatthew Wilcox (Oracle) 		folio_put(folio);
33281da177e4SLinus Torvalds 	}
33291da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3330078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
333136f05cabSJeff Layton 	inode_inc_iversion(dir);
33321da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
33331da177e4SLinus Torvalds 	dget(dentry);
33341da177e4SLinus Torvalds 	return 0;
33351da177e4SLinus Torvalds }
33361da177e4SLinus Torvalds 
3337fceef393SAl Viro static void shmem_put_link(void *arg)
3338fceef393SAl Viro {
3339e4b57722SMatthew Wilcox (Oracle) 	folio_mark_accessed(arg);
3340e4b57722SMatthew Wilcox (Oracle) 	folio_put(arg);
3341fceef393SAl Viro }
3342fceef393SAl Viro 
33436b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3344fceef393SAl Viro 				  struct inode *inode,
3345fceef393SAl Viro 				  struct delayed_call *done)
33461da177e4SLinus Torvalds {
3347e4b57722SMatthew Wilcox (Oracle) 	struct folio *folio = NULL;
33486b255391SAl Viro 	int error;
3349e4b57722SMatthew Wilcox (Oracle) 
33506a6c9904SAl Viro 	if (!dentry) {
3351e4b57722SMatthew Wilcox (Oracle) 		folio = filemap_get_folio(inode->i_mapping, 0);
335266dabbb6SChristoph Hellwig 		if (IS_ERR(folio))
33536b255391SAl Viro 			return ERR_PTR(-ECHILD);
33547459c149SMatthew Wilcox (Oracle) 		if (PageHWPoison(folio_page(folio, 0)) ||
3355e4b57722SMatthew Wilcox (Oracle) 		    !folio_test_uptodate(folio)) {
3356e4b57722SMatthew Wilcox (Oracle) 			folio_put(folio);
33576a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
33586a6c9904SAl Viro 		}
33596a6c9904SAl Viro 	} else {
3360e4b57722SMatthew Wilcox (Oracle) 		error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3361680baacbSAl Viro 		if (error)
3362680baacbSAl Viro 			return ERR_PTR(error);
3363e4b57722SMatthew Wilcox (Oracle) 		if (!folio)
3364a7605426SYang Shi 			return ERR_PTR(-ECHILD);
33657459c149SMatthew Wilcox (Oracle) 		if (PageHWPoison(folio_page(folio, 0))) {
3366e4b57722SMatthew Wilcox (Oracle) 			folio_unlock(folio);
3367e4b57722SMatthew Wilcox (Oracle) 			folio_put(folio);
3368a7605426SYang Shi 			return ERR_PTR(-ECHILD);
3369a7605426SYang Shi 		}
3370e4b57722SMatthew Wilcox (Oracle) 		folio_unlock(folio);
33711da177e4SLinus Torvalds 	}
3372e4b57722SMatthew Wilcox (Oracle) 	set_delayed_call(done, shmem_put_link, folio);
3373e4b57722SMatthew Wilcox (Oracle) 	return folio_address(folio);
33741da177e4SLinus Torvalds }
33751da177e4SLinus Torvalds 
3376b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3377e408e695STheodore Ts'o 
3378e408e695STheodore Ts'o static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3379e408e695STheodore Ts'o {
3380e408e695STheodore Ts'o 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3381e408e695STheodore Ts'o 
3382e408e695STheodore Ts'o 	fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3383e408e695STheodore Ts'o 
3384e408e695STheodore Ts'o 	return 0;
3385e408e695STheodore Ts'o }
3386e408e695STheodore Ts'o 
33878782a9aeSChristian Brauner static int shmem_fileattr_set(struct mnt_idmap *idmap,
3388e408e695STheodore Ts'o 			      struct dentry *dentry, struct fileattr *fa)
3389e408e695STheodore Ts'o {
3390e408e695STheodore Ts'o 	struct inode *inode = d_inode(dentry);
3391e408e695STheodore Ts'o 	struct shmem_inode_info *info = SHMEM_I(inode);
3392e408e695STheodore Ts'o 
3393e408e695STheodore Ts'o 	if (fileattr_has_fsx(fa))
3394e408e695STheodore Ts'o 		return -EOPNOTSUPP;
3395cb241339SHugh Dickins 	if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3396cb241339SHugh Dickins 		return -EOPNOTSUPP;
3397e408e695STheodore Ts'o 
3398e408e695STheodore Ts'o 	info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3399e408e695STheodore Ts'o 		(fa->flags & SHMEM_FL_USER_MODIFIABLE);
3400e408e695STheodore Ts'o 
3401cb241339SHugh Dickins 	shmem_set_inode_flags(inode, info->fsflags);
3402e408e695STheodore Ts'o 	inode->i_ctime = current_time(inode);
340336f05cabSJeff Layton 	inode_inc_iversion(inode);
3404e408e695STheodore Ts'o 	return 0;
3405e408e695STheodore Ts'o }
3406e408e695STheodore Ts'o 
3407b09e0fa4SEric Paris /*
3408b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3409b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3410b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3411b09e0fa4SEric Paris  * filesystem level, though.
3412b09e0fa4SEric Paris  */
3413b09e0fa4SEric Paris 
34146d9d88d0SJarkko Sakkinen /*
34156d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
34166d9d88d0SJarkko Sakkinen  */
34176d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
34186d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
34196d9d88d0SJarkko Sakkinen 			    void *fs_info)
34206d9d88d0SJarkko Sakkinen {
34216d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
34226d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
342338f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
34246d9d88d0SJarkko Sakkinen 	size_t len;
34256d9d88d0SJarkko Sakkinen 
34266d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
342738f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
34286d9d88d0SJarkko Sakkinen 		if (!new_xattr)
34296d9d88d0SJarkko Sakkinen 			return -ENOMEM;
34306d9d88d0SJarkko Sakkinen 
34316d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
34326d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
34336d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
34346d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
34353bef735aSChengguang Xu 			kvfree(new_xattr);
34366d9d88d0SJarkko Sakkinen 			return -ENOMEM;
34376d9d88d0SJarkko Sakkinen 		}
34386d9d88d0SJarkko Sakkinen 
34396d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
34406d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
34416d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
34426d9d88d0SJarkko Sakkinen 		       xattr->name, len);
34436d9d88d0SJarkko Sakkinen 
34443b4c7bc0SChristian Brauner 		simple_xattr_add(&info->xattrs, new_xattr);
34456d9d88d0SJarkko Sakkinen 	}
34466d9d88d0SJarkko Sakkinen 
34476d9d88d0SJarkko Sakkinen 	return 0;
34486d9d88d0SJarkko Sakkinen }
34496d9d88d0SJarkko Sakkinen 
3450aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3451b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3452b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3453aa7c5241SAndreas Gruenbacher {
3454b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3455aa7c5241SAndreas Gruenbacher 
3456aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3457aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3458aa7c5241SAndreas Gruenbacher }
3459aa7c5241SAndreas Gruenbacher 
3460aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
346139f60c1cSChristian Brauner 				   struct mnt_idmap *idmap,
346259301226SAl Viro 				   struct dentry *unused, struct inode *inode,
346359301226SAl Viro 				   const char *name, const void *value,
346459301226SAl Viro 				   size_t size, int flags)
3465aa7c5241SAndreas Gruenbacher {
346659301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
346736f05cabSJeff Layton 	int err;
3468aa7c5241SAndreas Gruenbacher 
3469aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
347036f05cabSJeff Layton 	err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
347136f05cabSJeff Layton 	if (!err) {
347236f05cabSJeff Layton 		inode->i_ctime = current_time(inode);
347336f05cabSJeff Layton 		inode_inc_iversion(inode);
347436f05cabSJeff Layton 	}
347536f05cabSJeff Layton 	return err;
3476aa7c5241SAndreas Gruenbacher }
3477aa7c5241SAndreas Gruenbacher 
3478aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3479aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3480aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3481aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3482aa7c5241SAndreas Gruenbacher };
3483aa7c5241SAndreas Gruenbacher 
3484aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3485aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3486aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3487aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3488aa7c5241SAndreas Gruenbacher };
3489aa7c5241SAndreas Gruenbacher 
3490b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3491aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3492aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3493b09e0fa4SEric Paris 	NULL
3494b09e0fa4SEric Paris };
3495b09e0fa4SEric Paris 
3496b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3497b09e0fa4SEric Paris {
349875c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3499786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3500b09e0fa4SEric Paris }
3501b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3502b09e0fa4SEric Paris 
350369f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
3504f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
35056b255391SAl Viro 	.get_link	= simple_get_link,
3506b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3507b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3508b09e0fa4SEric Paris #endif
35091da177e4SLinus Torvalds };
35101da177e4SLinus Torvalds 
351192e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
3512f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
35136b255391SAl Viro 	.get_link	= shmem_get_link,
3514b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3515b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
351639f0247dSAndreas Gruenbacher #endif
3517b09e0fa4SEric Paris };
351839f0247dSAndreas Gruenbacher 
351991828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
352091828a40SDavid M. Grimes {
352191828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
352291828a40SDavid M. Grimes }
352391828a40SDavid M. Grimes 
352491828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
352591828a40SDavid M. Grimes {
352691828a40SDavid M. Grimes 	__u32 *fh = vfh;
352791828a40SDavid M. Grimes 	__u64 inum = fh[2];
352891828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
352991828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
353091828a40SDavid M. Grimes }
353191828a40SDavid M. Grimes 
353212ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
353312ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
353412ba780dSAmir Goldstein {
353512ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
353612ba780dSAmir Goldstein 
353712ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
353812ba780dSAmir Goldstein }
353912ba780dSAmir Goldstein 
354012ba780dSAmir Goldstein 
3541480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3542480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
354391828a40SDavid M. Grimes {
354491828a40SDavid M. Grimes 	struct inode *inode;
3545480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
354635c2a7f4SHugh Dickins 	u64 inum;
354791828a40SDavid M. Grimes 
3548480b116cSChristoph Hellwig 	if (fh_len < 3)
3549480b116cSChristoph Hellwig 		return NULL;
3550480b116cSChristoph Hellwig 
355135c2a7f4SHugh Dickins 	inum = fid->raw[2];
355235c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
355335c2a7f4SHugh Dickins 
3554480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3555480b116cSChristoph Hellwig 			shmem_match, fid->raw);
355691828a40SDavid M. Grimes 	if (inode) {
355712ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
355891828a40SDavid M. Grimes 		iput(inode);
355991828a40SDavid M. Grimes 	}
356091828a40SDavid M. Grimes 
3561480b116cSChristoph Hellwig 	return dentry;
356291828a40SDavid M. Grimes }
356391828a40SDavid M. Grimes 
3564b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3565b0b0382bSAl Viro 				struct inode *parent)
356691828a40SDavid M. Grimes {
35675fe0c237SAneesh Kumar K.V 	if (*len < 3) {
35685fe0c237SAneesh Kumar K.V 		*len = 3;
356994e07a75SNamjae Jeon 		return FILEID_INVALID;
35705fe0c237SAneesh Kumar K.V 	}
357191828a40SDavid M. Grimes 
35721d3382cbSAl Viro 	if (inode_unhashed(inode)) {
357391828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
357491828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
357591828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
357691828a40SDavid M. Grimes 		 * to do it once
357791828a40SDavid M. Grimes 		 */
357891828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
357991828a40SDavid M. Grimes 		spin_lock(&lock);
35801d3382cbSAl Viro 		if (inode_unhashed(inode))
358191828a40SDavid M. Grimes 			__insert_inode_hash(inode,
358291828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
358391828a40SDavid M. Grimes 		spin_unlock(&lock);
358491828a40SDavid M. Grimes 	}
358591828a40SDavid M. Grimes 
358691828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
358791828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
358891828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
358991828a40SDavid M. Grimes 
359091828a40SDavid M. Grimes 	*len = 3;
359191828a40SDavid M. Grimes 	return 1;
359291828a40SDavid M. Grimes }
359391828a40SDavid M. Grimes 
359439655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
359591828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
359691828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3597480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
359891828a40SDavid M. Grimes };
359991828a40SDavid M. Grimes 
3600626c3920SAl Viro enum shmem_param {
3601626c3920SAl Viro 	Opt_gid,
3602626c3920SAl Viro 	Opt_huge,
3603626c3920SAl Viro 	Opt_mode,
3604626c3920SAl Viro 	Opt_mpol,
3605626c3920SAl Viro 	Opt_nr_blocks,
3606626c3920SAl Viro 	Opt_nr_inodes,
3607626c3920SAl Viro 	Opt_size,
3608626c3920SAl Viro 	Opt_uid,
3609ea3271f7SChris Down 	Opt_inode32,
3610ea3271f7SChris Down 	Opt_inode64,
36112c6efe9cSLuis Chamberlain 	Opt_noswap,
3612626c3920SAl Viro };
36131da177e4SLinus Torvalds 
36145eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = {
36152710c957SAl Viro 	{"never",	SHMEM_HUGE_NEVER },
36162710c957SAl Viro 	{"always",	SHMEM_HUGE_ALWAYS },
36172710c957SAl Viro 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
36182710c957SAl Viro 	{"advise",	SHMEM_HUGE_ADVISE },
36192710c957SAl Viro 	{}
36202710c957SAl Viro };
36212710c957SAl Viro 
3622d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = {
3623626c3920SAl Viro 	fsparam_u32   ("gid",		Opt_gid),
36242710c957SAl Viro 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3625626c3920SAl Viro 	fsparam_u32oct("mode",		Opt_mode),
3626626c3920SAl Viro 	fsparam_string("mpol",		Opt_mpol),
3627626c3920SAl Viro 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3628626c3920SAl Viro 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3629626c3920SAl Viro 	fsparam_string("size",		Opt_size),
3630626c3920SAl Viro 	fsparam_u32   ("uid",		Opt_uid),
3631ea3271f7SChris Down 	fsparam_flag  ("inode32",	Opt_inode32),
3632ea3271f7SChris Down 	fsparam_flag  ("inode64",	Opt_inode64),
36332c6efe9cSLuis Chamberlain 	fsparam_flag  ("noswap",	Opt_noswap),
3634626c3920SAl Viro 	{}
3635626c3920SAl Viro };
3636626c3920SAl Viro 
3637f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3638626c3920SAl Viro {
3639f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3640626c3920SAl Viro 	struct fs_parse_result result;
3641e04dc423SAl Viro 	unsigned long long size;
3642626c3920SAl Viro 	char *rest;
3643626c3920SAl Viro 	int opt;
3644626c3920SAl Viro 
3645d7167b14SAl Viro 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3646f3235626SDavid Howells 	if (opt < 0)
3647626c3920SAl Viro 		return opt;
3648626c3920SAl Viro 
3649626c3920SAl Viro 	switch (opt) {
3650626c3920SAl Viro 	case Opt_size:
3651626c3920SAl Viro 		size = memparse(param->string, &rest);
3652e04dc423SAl Viro 		if (*rest == '%') {
3653e04dc423SAl Viro 			size <<= PAGE_SHIFT;
3654e04dc423SAl Viro 			size *= totalram_pages();
3655e04dc423SAl Viro 			do_div(size, 100);
3656e04dc423SAl Viro 			rest++;
3657e04dc423SAl Viro 		}
3658e04dc423SAl Viro 		if (*rest)
3659626c3920SAl Viro 			goto bad_value;
3660e04dc423SAl Viro 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3661e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3662626c3920SAl Viro 		break;
3663626c3920SAl Viro 	case Opt_nr_blocks:
3664626c3920SAl Viro 		ctx->blocks = memparse(param->string, &rest);
36650c98c8e1SZhaoLong Wang 		if (*rest || ctx->blocks > S64_MAX)
3666626c3920SAl Viro 			goto bad_value;
3667e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3668626c3920SAl Viro 		break;
3669626c3920SAl Viro 	case Opt_nr_inodes:
3670626c3920SAl Viro 		ctx->inodes = memparse(param->string, &rest);
3671e04dc423SAl Viro 		if (*rest)
3672626c3920SAl Viro 			goto bad_value;
3673e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_INODES;
3674626c3920SAl Viro 		break;
3675626c3920SAl Viro 	case Opt_mode:
3676626c3920SAl Viro 		ctx->mode = result.uint_32 & 07777;
3677626c3920SAl Viro 		break;
3678626c3920SAl Viro 	case Opt_uid:
3679626c3920SAl Viro 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3680e04dc423SAl Viro 		if (!uid_valid(ctx->uid))
3681626c3920SAl Viro 			goto bad_value;
3682626c3920SAl Viro 		break;
3683626c3920SAl Viro 	case Opt_gid:
3684626c3920SAl Viro 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3685e04dc423SAl Viro 		if (!gid_valid(ctx->gid))
3686626c3920SAl Viro 			goto bad_value;
3687626c3920SAl Viro 		break;
3688626c3920SAl Viro 	case Opt_huge:
3689626c3920SAl Viro 		ctx->huge = result.uint_32;
3690626c3920SAl Viro 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3691396bcc52SMatthew Wilcox (Oracle) 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3692626c3920SAl Viro 		      has_transparent_hugepage()))
3693626c3920SAl Viro 			goto unsupported_parameter;
3694e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_HUGE;
3695626c3920SAl Viro 		break;
3696626c3920SAl Viro 	case Opt_mpol:
3697626c3920SAl Viro 		if (IS_ENABLED(CONFIG_NUMA)) {
3698e04dc423SAl Viro 			mpol_put(ctx->mpol);
3699e04dc423SAl Viro 			ctx->mpol = NULL;
3700626c3920SAl Viro 			if (mpol_parse_str(param->string, &ctx->mpol))
3701626c3920SAl Viro 				goto bad_value;
3702626c3920SAl Viro 			break;
3703626c3920SAl Viro 		}
3704626c3920SAl Viro 		goto unsupported_parameter;
3705ea3271f7SChris Down 	case Opt_inode32:
3706ea3271f7SChris Down 		ctx->full_inums = false;
3707ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3708ea3271f7SChris Down 		break;
3709ea3271f7SChris Down 	case Opt_inode64:
3710ea3271f7SChris Down 		if (sizeof(ino_t) < 8) {
3711ea3271f7SChris Down 			return invalfc(fc,
3712ea3271f7SChris Down 				       "Cannot use inode64 with <64bit inums in kernel\n");
3713ea3271f7SChris Down 		}
3714ea3271f7SChris Down 		ctx->full_inums = true;
3715ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3716ea3271f7SChris Down 		break;
37172c6efe9cSLuis Chamberlain 	case Opt_noswap:
371801106e14SChristian Brauner 		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
371901106e14SChristian Brauner 			return invalfc(fc,
372001106e14SChristian Brauner 				       "Turning off swap in unprivileged tmpfs mounts unsupported");
372101106e14SChristian Brauner 		}
37222c6efe9cSLuis Chamberlain 		ctx->noswap = true;
37232c6efe9cSLuis Chamberlain 		ctx->seen |= SHMEM_SEEN_NOSWAP;
37242c6efe9cSLuis Chamberlain 		break;
3725e04dc423SAl Viro 	}
3726e04dc423SAl Viro 	return 0;
3727e04dc423SAl Viro 
3728626c3920SAl Viro unsupported_parameter:
3729f35aa2bcSAl Viro 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3730626c3920SAl Viro bad_value:
3731f35aa2bcSAl Viro 	return invalfc(fc, "Bad value for '%s'", param->key);
3732e04dc423SAl Viro }
3733e04dc423SAl Viro 
3734f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data)
3735e04dc423SAl Viro {
3736f3235626SDavid Howells 	char *options = data;
3737f3235626SDavid Howells 
373833f37c64SAl Viro 	if (options) {
373933f37c64SAl Viro 		int err = security_sb_eat_lsm_opts(options, &fc->security);
374033f37c64SAl Viro 		if (err)
374133f37c64SAl Viro 			return err;
374233f37c64SAl Viro 	}
374333f37c64SAl Viro 
3744b00dc3adSHugh Dickins 	while (options != NULL) {
3745626c3920SAl Viro 		char *this_char = options;
3746b00dc3adSHugh Dickins 		for (;;) {
3747b00dc3adSHugh Dickins 			/*
3748b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3749b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3750b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3751b00dc3adSHugh Dickins 			 */
3752b00dc3adSHugh Dickins 			options = strchr(options, ',');
3753b00dc3adSHugh Dickins 			if (options == NULL)
3754b00dc3adSHugh Dickins 				break;
3755b00dc3adSHugh Dickins 			options++;
3756b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3757b00dc3adSHugh Dickins 				options[-1] = '\0';
3758b00dc3adSHugh Dickins 				break;
3759b00dc3adSHugh Dickins 			}
3760b00dc3adSHugh Dickins 		}
3761626c3920SAl Viro 		if (*this_char) {
3762626c3920SAl Viro 			char *value = strchr(this_char, '=');
3763f3235626SDavid Howells 			size_t len = 0;
3764626c3920SAl Viro 			int err;
3765626c3920SAl Viro 
3766626c3920SAl Viro 			if (value) {
3767626c3920SAl Viro 				*value++ = '\0';
3768f3235626SDavid Howells 				len = strlen(value);
37691da177e4SLinus Torvalds 			}
3770f3235626SDavid Howells 			err = vfs_parse_fs_string(fc, this_char, value, len);
3771f3235626SDavid Howells 			if (err < 0)
3772f3235626SDavid Howells 				return err;
37731da177e4SLinus Torvalds 		}
3774626c3920SAl Viro 	}
37751da177e4SLinus Torvalds 	return 0;
37761da177e4SLinus Torvalds }
37771da177e4SLinus Torvalds 
3778f3235626SDavid Howells /*
3779f3235626SDavid Howells  * Reconfigure a shmem filesystem.
3780f3235626SDavid Howells  *
3781f3235626SDavid Howells  * Note that we disallow change from limited->unlimited blocks/inodes while any
3782f3235626SDavid Howells  * are in use; but we must separately disallow unlimited->limited, because in
3783f3235626SDavid Howells  * that case we have no record of how much is already in use.
3784f3235626SDavid Howells  */
3785f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc)
37861da177e4SLinus Torvalds {
3787f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3788f3235626SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
37890edd73b3SHugh Dickins 	unsigned long inodes;
3790bf11b9a8SSebastian Andrzej Siewior 	struct mempolicy *mpol = NULL;
3791f3235626SDavid Howells 	const char *err;
37920edd73b3SHugh Dickins 
3793bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock(&sbinfo->stat_lock);
37940edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
37950c98c8e1SZhaoLong Wang 
3796f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3797f3235626SDavid Howells 		if (!sbinfo->max_blocks) {
3798f3235626SDavid Howells 			err = "Cannot retroactively limit size";
37990edd73b3SHugh Dickins 			goto out;
38000b5071ddSAl Viro 		}
3801f3235626SDavid Howells 		if (percpu_counter_compare(&sbinfo->used_blocks,
3802f3235626SDavid Howells 					   ctx->blocks) > 0) {
3803f3235626SDavid Howells 			err = "Too small a size for current use";
38040b5071ddSAl Viro 			goto out;
3805f3235626SDavid Howells 		}
3806f3235626SDavid Howells 	}
3807f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3808f3235626SDavid Howells 		if (!sbinfo->max_inodes) {
3809f3235626SDavid Howells 			err = "Cannot retroactively limit inodes";
38100b5071ddSAl Viro 			goto out;
38110b5071ddSAl Viro 		}
3812f3235626SDavid Howells 		if (ctx->inodes < inodes) {
3813f3235626SDavid Howells 			err = "Too few inodes for current use";
3814f3235626SDavid Howells 			goto out;
3815f3235626SDavid Howells 		}
3816f3235626SDavid Howells 	}
38170edd73b3SHugh Dickins 
3818ea3271f7SChris Down 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3819ea3271f7SChris Down 	    sbinfo->next_ino > UINT_MAX) {
3820ea3271f7SChris Down 		err = "Current inum too high to switch to 32-bit inums";
3821ea3271f7SChris Down 		goto out;
3822ea3271f7SChris Down 	}
38232c6efe9cSLuis Chamberlain 	if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
38242c6efe9cSLuis Chamberlain 		err = "Cannot disable swap on remount";
38252c6efe9cSLuis Chamberlain 		goto out;
38262c6efe9cSLuis Chamberlain 	}
38272c6efe9cSLuis Chamberlain 	if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
38282c6efe9cSLuis Chamberlain 		err = "Cannot enable swap on remount if it was disabled on first mount";
38292c6efe9cSLuis Chamberlain 		goto out;
38302c6efe9cSLuis Chamberlain 	}
3831ea3271f7SChris Down 
3832f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_HUGE)
3833f3235626SDavid Howells 		sbinfo->huge = ctx->huge;
3834ea3271f7SChris Down 	if (ctx->seen & SHMEM_SEEN_INUMS)
3835ea3271f7SChris Down 		sbinfo->full_inums = ctx->full_inums;
3836f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3837f3235626SDavid Howells 		sbinfo->max_blocks  = ctx->blocks;
3838f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_INODES) {
3839f3235626SDavid Howells 		sbinfo->max_inodes  = ctx->inodes;
3840f3235626SDavid Howells 		sbinfo->free_inodes = ctx->inodes - inodes;
38410b5071ddSAl Viro 	}
384271fe804bSLee Schermerhorn 
38435f00110fSGreg Thelen 	/*
38445f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
38455f00110fSGreg Thelen 	 */
3846f3235626SDavid Howells 	if (ctx->mpol) {
3847bf11b9a8SSebastian Andrzej Siewior 		mpol = sbinfo->mpol;
3848f3235626SDavid Howells 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3849f3235626SDavid Howells 		ctx->mpol = NULL;
38505f00110fSGreg Thelen 	}
38512c6efe9cSLuis Chamberlain 
38522c6efe9cSLuis Chamberlain 	if (ctx->noswap)
38532c6efe9cSLuis Chamberlain 		sbinfo->noswap = true;
38542c6efe9cSLuis Chamberlain 
3855bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3856bf11b9a8SSebastian Andrzej Siewior 	mpol_put(mpol);
3857f3235626SDavid Howells 	return 0;
38580edd73b3SHugh Dickins out:
3859bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3860f35aa2bcSAl Viro 	return invalfc(fc, "%s", err);
38611da177e4SLinus Torvalds }
3862680d794bSakpm@linux-foundation.org 
386334c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3864680d794bSakpm@linux-foundation.org {
386534c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3866283ebdeeSTu Jinjiang 	struct mempolicy *mpol;
3867680d794bSakpm@linux-foundation.org 
3868680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3869b91742d8SZhangPeng 		seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
3870680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3871680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
38720825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
387309208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
38748751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
38758751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
38768751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
38778751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
38788751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
38798751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3880ea3271f7SChris Down 
3881ea3271f7SChris Down 	/*
3882ea3271f7SChris Down 	 * Showing inode{64,32} might be useful even if it's the system default,
3883ea3271f7SChris Down 	 * since then people don't have to resort to checking both here and
3884ea3271f7SChris Down 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
3885ea3271f7SChris Down 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3886ea3271f7SChris Down 	 *
3887ea3271f7SChris Down 	 * We hide it when inode64 isn't the default and we are using 32-bit
3888ea3271f7SChris Down 	 * inodes, since that probably just means the feature isn't even under
3889ea3271f7SChris Down 	 * consideration.
3890ea3271f7SChris Down 	 *
3891ea3271f7SChris Down 	 * As such:
3892ea3271f7SChris Down 	 *
3893ea3271f7SChris Down 	 *                     +-----------------+-----------------+
3894ea3271f7SChris Down 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3895ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3896ea3271f7SChris Down 	 *  | full_inums=true  | show            | show            |
3897ea3271f7SChris Down 	 *  | full_inums=false | show            | hide            |
3898ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3899ea3271f7SChris Down 	 *
3900ea3271f7SChris Down 	 */
3901ea3271f7SChris Down 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3902ea3271f7SChris Down 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3903396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
39045a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
39055a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
39065a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
39075a6e75f8SKirill A. Shutemov #endif
3908283ebdeeSTu Jinjiang 	mpol = shmem_get_sbmpol(sbinfo);
3909283ebdeeSTu Jinjiang 	shmem_show_mpol(seq, mpol);
3910283ebdeeSTu Jinjiang 	mpol_put(mpol);
39112c6efe9cSLuis Chamberlain 	if (sbinfo->noswap)
39122c6efe9cSLuis Chamberlain 		seq_printf(seq, ",noswap");
3913680d794bSakpm@linux-foundation.org 	return 0;
3914680d794bSakpm@linux-foundation.org }
39159183df25SDavid Herrmann 
3916680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
39171da177e4SLinus Torvalds 
39181da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
39191da177e4SLinus Torvalds {
3920602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3921602586a8SHugh Dickins 
3922e809d5f0SChris Down 	free_percpu(sbinfo->ino_batch);
3923602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
392449cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3925602586a8SHugh Dickins 	kfree(sbinfo);
39261da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
39271da177e4SLinus Torvalds }
39281da177e4SLinus Torvalds 
3929f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
39301da177e4SLinus Torvalds {
3931f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
39321da177e4SLinus Torvalds 	struct inode *inode;
39330edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3934680d794bSakpm@linux-foundation.org 
3935680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3936425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3937680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3938680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3939680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3940680d794bSakpm@linux-foundation.org 
3941680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
39421da177e4SLinus Torvalds 
39430edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
39441da177e4SLinus Torvalds 	/*
39451da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
39461da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
39471da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
39481da177e4SLinus Torvalds 	 */
39491751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3950f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3951f3235626SDavid Howells 			ctx->blocks = shmem_default_max_blocks();
3952f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3953f3235626SDavid Howells 			ctx->inodes = shmem_default_max_inodes();
3954ea3271f7SChris Down 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
3955ea3271f7SChris Down 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
39562c6efe9cSLuis Chamberlain 		sbinfo->noswap = ctx->noswap;
3957ca4e0519SAl Viro 	} else {
39581751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
39591da177e4SLinus Torvalds 	}
396091828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
396136f05cabSJeff Layton 	sb->s_flags |= SB_NOSEC | SB_I_VERSION;
39620edd73b3SHugh Dickins #else
39631751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
39640edd73b3SHugh Dickins #endif
3965f3235626SDavid Howells 	sbinfo->max_blocks = ctx->blocks;
3966f3235626SDavid Howells 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3967e809d5f0SChris Down 	if (sb->s_flags & SB_KERNMOUNT) {
3968e809d5f0SChris Down 		sbinfo->ino_batch = alloc_percpu(ino_t);
3969e809d5f0SChris Down 		if (!sbinfo->ino_batch)
3970e809d5f0SChris Down 			goto failed;
3971e809d5f0SChris Down 	}
3972f3235626SDavid Howells 	sbinfo->uid = ctx->uid;
3973f3235626SDavid Howells 	sbinfo->gid = ctx->gid;
3974ea3271f7SChris Down 	sbinfo->full_inums = ctx->full_inums;
3975f3235626SDavid Howells 	sbinfo->mode = ctx->mode;
3976f3235626SDavid Howells 	sbinfo->huge = ctx->huge;
3977f3235626SDavid Howells 	sbinfo->mpol = ctx->mpol;
3978f3235626SDavid Howells 	ctx->mpol = NULL;
39791da177e4SLinus Torvalds 
3980bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock_init(&sbinfo->stat_lock);
3981908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3982602586a8SHugh Dickins 		goto failed;
3983779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3984779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
39851da177e4SLinus Torvalds 
3986285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
398709cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
398809cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
39891da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
39901da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3991cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3992b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
399339f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3994b09e0fa4SEric Paris #endif
3995b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
39961751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
399739f0247dSAndreas Gruenbacher #endif
39982b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
39990edd73b3SHugh Dickins 
40007a80e5b8SGiuseppe Scrivano 	inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0,
40017a80e5b8SGiuseppe Scrivano 				VM_NORESERVE);
40021da177e4SLinus Torvalds 	if (!inode)
40031da177e4SLinus Torvalds 		goto failed;
4004680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
4005680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
4006318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
4007318ceed0SAl Viro 	if (!sb->s_root)
400848fde701SAl Viro 		goto failed;
40091da177e4SLinus Torvalds 	return 0;
40101da177e4SLinus Torvalds 
40111da177e4SLinus Torvalds failed:
40121da177e4SLinus Torvalds 	shmem_put_super(sb);
4013f2b346e4SMiaohe Lin 	return -ENOMEM;
40141da177e4SLinus Torvalds }
40151da177e4SLinus Torvalds 
4016f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc)
4017f3235626SDavid Howells {
4018f3235626SDavid Howells 	return get_tree_nodev(fc, shmem_fill_super);
4019f3235626SDavid Howells }
4020f3235626SDavid Howells 
4021f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc)
4022f3235626SDavid Howells {
4023f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
4024f3235626SDavid Howells 
4025f3235626SDavid Howells 	if (ctx) {
4026f3235626SDavid Howells 		mpol_put(ctx->mpol);
4027f3235626SDavid Howells 		kfree(ctx);
4028f3235626SDavid Howells 	}
4029f3235626SDavid Howells }
4030f3235626SDavid Howells 
4031f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = {
4032f3235626SDavid Howells 	.free			= shmem_free_fc,
4033f3235626SDavid Howells 	.get_tree		= shmem_get_tree,
4034f3235626SDavid Howells #ifdef CONFIG_TMPFS
4035f3235626SDavid Howells 	.parse_monolithic	= shmem_parse_options,
4036f3235626SDavid Howells 	.parse_param		= shmem_parse_one,
4037f3235626SDavid Howells 	.reconfigure		= shmem_reconfigure,
4038f3235626SDavid Howells #endif
4039f3235626SDavid Howells };
4040f3235626SDavid Howells 
4041fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
40421da177e4SLinus Torvalds 
40431da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
40441da177e4SLinus Torvalds {
404541ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
4046fd60b288SMuchun Song 	info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
404741ffe5d5SHugh Dickins 	if (!info)
40481da177e4SLinus Torvalds 		return NULL;
404941ffe5d5SHugh Dickins 	return &info->vfs_inode;
40501da177e4SLinus Torvalds }
40511da177e4SLinus Torvalds 
405274b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode)
4053fa0d7e3dSNick Piggin {
405484e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
40553ed47db3SAl Viro 		kfree(inode->i_link);
4056fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4057fa0d7e3dSNick Piggin }
4058fa0d7e3dSNick Piggin 
40591da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
40601da177e4SLinus Torvalds {
406109208d15SAl Viro 	if (S_ISREG(inode->i_mode))
40621da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
40631da177e4SLinus Torvalds }
40641da177e4SLinus Torvalds 
406541ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
40661da177e4SLinus Torvalds {
406741ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
406841ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
40691da177e4SLinus Torvalds }
40701da177e4SLinus Torvalds 
40719a8ec03eSweiping zhang static void shmem_init_inodecache(void)
40721da177e4SLinus Torvalds {
40731da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
40741da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
40755d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
40761da177e4SLinus Torvalds }
40771da177e4SLinus Torvalds 
407841ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
40791da177e4SLinus Torvalds {
40801a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
40811da177e4SLinus Torvalds }
40821da177e4SLinus Torvalds 
4083a7605426SYang Shi /* Keep the page in page cache instead of truncating it */
4084a7605426SYang Shi static int shmem_error_remove_page(struct address_space *mapping,
4085a7605426SYang Shi 				   struct page *page)
4086a7605426SYang Shi {
4087a7605426SYang Shi 	return 0;
4088a7605426SYang Shi }
4089a7605426SYang Shi 
409030e6a51dSHui Su const struct address_space_operations shmem_aops = {
40911da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
409246de8b97SMatthew Wilcox (Oracle) 	.dirty_folio	= noop_dirty_folio,
40931da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
4094800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
4095800d15a5SNick Piggin 	.write_end	= shmem_write_end,
40961da177e4SLinus Torvalds #endif
40971c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
409854184650SMatthew Wilcox (Oracle) 	.migrate_folio	= migrate_folio,
40991c93923cSAndrew Morton #endif
4100a7605426SYang Shi 	.error_remove_page = shmem_error_remove_page,
41011da177e4SLinus Torvalds };
410230e6a51dSHui Su EXPORT_SYMBOL(shmem_aops);
41031da177e4SLinus Torvalds 
410415ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
41051da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
4106a5454f95SThomas Weißschuh 	.open		= generic_file_open,
4107c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
41081da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
4109220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
41102ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
41118174202bSAl Viro 	.write_iter	= generic_file_write_iter,
41121b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
4113bd194b18SDavid Howells 	.splice_read	= shmem_file_splice_read,
4114f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
411583e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
41161da177e4SLinus Torvalds #endif
41171da177e4SLinus Torvalds };
41181da177e4SLinus Torvalds 
411992e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
412044a30220SYu Zhao 	.getattr	= shmem_getattr,
412194c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
4122b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
4123b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
4124feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
4125e408e695STheodore Ts'o 	.fileattr_get	= shmem_fileattr_get,
4126e408e695STheodore Ts'o 	.fileattr_set	= shmem_fileattr_set,
4127b09e0fa4SEric Paris #endif
41281da177e4SLinus Torvalds };
41291da177e4SLinus Torvalds 
413092e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
41311da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
4132f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
41331da177e4SLinus Torvalds 	.create		= shmem_create,
41341da177e4SLinus Torvalds 	.lookup		= simple_lookup,
41351da177e4SLinus Torvalds 	.link		= shmem_link,
41361da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
41371da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
41381da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
41391da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
41401da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
41412773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
414260545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
41431da177e4SLinus Torvalds #endif
4144b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
4145b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
4146e408e695STheodore Ts'o 	.fileattr_get	= shmem_fileattr_get,
4147e408e695STheodore Ts'o 	.fileattr_set	= shmem_fileattr_set,
4148b09e0fa4SEric Paris #endif
414939f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
415094c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
4151feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
415239f0247dSAndreas Gruenbacher #endif
415339f0247dSAndreas Gruenbacher };
415439f0247dSAndreas Gruenbacher 
415592e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
4156f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
4157b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
4158b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
4159b09e0fa4SEric Paris #endif
416039f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
416194c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
4162feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
416339f0247dSAndreas Gruenbacher #endif
41641da177e4SLinus Torvalds };
41651da177e4SLinus Torvalds 
4166759b9775SHugh Dickins static const struct super_operations shmem_ops = {
41671da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
416874b1da56SAl Viro 	.free_inode	= shmem_free_in_core_inode,
41691da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
41701da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
41711da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
4172680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
41731da177e4SLinus Torvalds #endif
41741f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
41751da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
41761da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
4177396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4178779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
4179779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
4180779750d2SKirill A. Shutemov #endif
41811da177e4SLinus Torvalds };
41821da177e4SLinus Torvalds 
4183f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
418454cb8821SNick Piggin 	.fault		= shmem_fault,
4185d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
41861da177e4SLinus Torvalds #ifdef CONFIG_NUMA
41871da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
41881da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
41891da177e4SLinus Torvalds #endif
41901da177e4SLinus Torvalds };
41911da177e4SLinus Torvalds 
4192d09e8ca6SPasha Tatashin static const struct vm_operations_struct shmem_anon_vm_ops = {
4193d09e8ca6SPasha Tatashin 	.fault		= shmem_fault,
4194d09e8ca6SPasha Tatashin 	.map_pages	= filemap_map_pages,
4195d09e8ca6SPasha Tatashin #ifdef CONFIG_NUMA
4196d09e8ca6SPasha Tatashin 	.set_policy     = shmem_set_policy,
4197d09e8ca6SPasha Tatashin 	.get_policy     = shmem_get_policy,
4198d09e8ca6SPasha Tatashin #endif
4199d09e8ca6SPasha Tatashin };
4200d09e8ca6SPasha Tatashin 
4201f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc)
42021da177e4SLinus Torvalds {
4203f3235626SDavid Howells 	struct shmem_options *ctx;
4204f3235626SDavid Howells 
4205f3235626SDavid Howells 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4206f3235626SDavid Howells 	if (!ctx)
4207f3235626SDavid Howells 		return -ENOMEM;
4208f3235626SDavid Howells 
4209f3235626SDavid Howells 	ctx->mode = 0777 | S_ISVTX;
4210f3235626SDavid Howells 	ctx->uid = current_fsuid();
4211f3235626SDavid Howells 	ctx->gid = current_fsgid();
4212f3235626SDavid Howells 
4213f3235626SDavid Howells 	fc->fs_private = ctx;
4214f3235626SDavid Howells 	fc->ops = &shmem_fs_context_ops;
4215f3235626SDavid Howells 	return 0;
42161da177e4SLinus Torvalds }
42171da177e4SLinus Torvalds 
421841ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
42191da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
42201da177e4SLinus Torvalds 	.name		= "tmpfs",
4221f3235626SDavid Howells 	.init_fs_context = shmem_init_fs_context,
4222f3235626SDavid Howells #ifdef CONFIG_TMPFS
4223d7167b14SAl Viro 	.parameters	= shmem_fs_parameters,
4224f3235626SDavid Howells #endif
42251da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
42267a80e5b8SGiuseppe Scrivano #ifdef CONFIG_SHMEM
42277a80e5b8SGiuseppe Scrivano 	.fs_flags	= FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
42287a80e5b8SGiuseppe Scrivano #else
4229ff36da69SMatthew Wilcox (Oracle) 	.fs_flags	= FS_USERNS_MOUNT,
42307a80e5b8SGiuseppe Scrivano #endif
42311da177e4SLinus Torvalds };
42321da177e4SLinus Torvalds 
42339096bbe9SMiaohe Lin void __init shmem_init(void)
42341da177e4SLinus Torvalds {
42351da177e4SLinus Torvalds 	int error;
42361da177e4SLinus Torvalds 
42379a8ec03eSweiping zhang 	shmem_init_inodecache();
42381da177e4SLinus Torvalds 
423941ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
42401da177e4SLinus Torvalds 	if (error) {
42411170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
42421da177e4SLinus Torvalds 		goto out2;
42431da177e4SLinus Torvalds 	}
424495dc112aSGreg Kroah-Hartman 
4245ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
42461da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
42471da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
42481170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
42491da177e4SLinus Torvalds 		goto out1;
42501da177e4SLinus Torvalds 	}
42515a6e75f8SKirill A. Shutemov 
4252396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4253435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
42545a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
42555a6e75f8SKirill A. Shutemov 	else
42565e6e5a12SHugh Dickins 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
42575a6e75f8SKirill A. Shutemov #endif
42589096bbe9SMiaohe Lin 	return;
42591da177e4SLinus Torvalds 
42601da177e4SLinus Torvalds out1:
426141ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
42621da177e4SLinus Torvalds out2:
426341ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
42641da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
42651da177e4SLinus Torvalds }
4266853ac43aSMatt Mackall 
4267396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
42685a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
42695a6e75f8SKirill A. Shutemov 				  struct kobj_attribute *attr, char *buf)
42705a6e75f8SKirill A. Shutemov {
427126083eb6SColin Ian King 	static const int values[] = {
42725a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
42735a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
42745a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
42755a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
42765a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
42775a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
42785a6e75f8SKirill A. Shutemov 	};
427979d4d38aSJoe Perches 	int len = 0;
428079d4d38aSJoe Perches 	int i;
42815a6e75f8SKirill A. Shutemov 
428279d4d38aSJoe Perches 	for (i = 0; i < ARRAY_SIZE(values); i++) {
428379d4d38aSJoe Perches 		len += sysfs_emit_at(buf, len,
428479d4d38aSJoe Perches 				     shmem_huge == values[i] ? "%s[%s]" : "%s%s",
428579d4d38aSJoe Perches 				     i ? " " : "",
42865a6e75f8SKirill A. Shutemov 				     shmem_format_huge(values[i]));
42875a6e75f8SKirill A. Shutemov 	}
428879d4d38aSJoe Perches 
428979d4d38aSJoe Perches 	len += sysfs_emit_at(buf, len, "\n");
429079d4d38aSJoe Perches 
429179d4d38aSJoe Perches 	return len;
42925a6e75f8SKirill A. Shutemov }
42935a6e75f8SKirill A. Shutemov 
42945a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
42955a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
42965a6e75f8SKirill A. Shutemov {
42975a6e75f8SKirill A. Shutemov 	char tmp[16];
42985a6e75f8SKirill A. Shutemov 	int huge;
42995a6e75f8SKirill A. Shutemov 
43005a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
43015a6e75f8SKirill A. Shutemov 		return -EINVAL;
43025a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
43035a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
43045a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
43055a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
43065a6e75f8SKirill A. Shutemov 
43075a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
43085a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
43095a6e75f8SKirill A. Shutemov 		return -EINVAL;
43105a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
43115a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
43125a6e75f8SKirill A. Shutemov 		return -EINVAL;
43135a6e75f8SKirill A. Shutemov 
43145a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
4315435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
43165a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
43175a6e75f8SKirill A. Shutemov 	return count;
43185a6e75f8SKirill A. Shutemov }
43195a6e75f8SKirill A. Shutemov 
43204bfa8adaSMiaohe Lin struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
4321396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4322f3f0e1d2SKirill A. Shutemov 
4323853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
4324853ac43aSMatt Mackall 
4325853ac43aSMatt Mackall /*
4326853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4327853ac43aSMatt Mackall  *
4328853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
4329853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
4330853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
4331853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
4332853ac43aSMatt Mackall  */
4333853ac43aSMatt Mackall 
433441ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
4335853ac43aSMatt Mackall 	.name		= "tmpfs",
4336f3235626SDavid Howells 	.init_fs_context = ramfs_init_fs_context,
4337d7167b14SAl Viro 	.parameters	= ramfs_fs_parameters,
433836ce9d76SRoberto Sassu 	.kill_sb	= ramfs_kill_sb,
43392b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
4340853ac43aSMatt Mackall };
4341853ac43aSMatt Mackall 
43429096bbe9SMiaohe Lin void __init shmem_init(void)
4343853ac43aSMatt Mackall {
434441ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4345853ac43aSMatt Mackall 
434641ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
4347853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
4348853ac43aSMatt Mackall }
4349853ac43aSMatt Mackall 
435010a9c496SChristoph Hellwig int shmem_unuse(unsigned int type)
4351853ac43aSMatt Mackall {
4352853ac43aSMatt Mackall 	return 0;
4353853ac43aSMatt Mackall }
4354853ac43aSMatt Mackall 
4355d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
43563f96b79aSHugh Dickins {
43573f96b79aSHugh Dickins 	return 0;
43583f96b79aSHugh Dickins }
43593f96b79aSHugh Dickins 
436024513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
436124513264SHugh Dickins {
436224513264SHugh Dickins }
436324513264SHugh Dickins 
4364c01d5b30SHugh Dickins #ifdef CONFIG_MMU
4365c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4366c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
4367c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
4368c01d5b30SHugh Dickins {
4369c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4370c01d5b30SHugh Dickins }
4371c01d5b30SHugh Dickins #endif
4372c01d5b30SHugh Dickins 
437341ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
437494c1e62dSHugh Dickins {
437541ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
437694c1e62dSHugh Dickins }
437794c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
437894c1e62dSHugh Dickins 
4379853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
4380d09e8ca6SPasha Tatashin #define shmem_anon_vm_ops			generic_file_vm_ops
43810b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
43827a80e5b8SGiuseppe Scrivano #define shmem_get_inode(idmap, sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
43830b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
43840b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
4385853ac43aSMatt Mackall 
4386853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4387853ac43aSMatt Mackall 
4388853ac43aSMatt Mackall /* common code */
43891da177e4SLinus Torvalds 
4390703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4391c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
43921da177e4SLinus Torvalds {
43931da177e4SLinus Torvalds 	struct inode *inode;
439493dec2daSAl Viro 	struct file *res;
43951da177e4SLinus Torvalds 
4396703321b6SMatthew Auld 	if (IS_ERR(mnt))
4397703321b6SMatthew Auld 		return ERR_CAST(mnt);
43981da177e4SLinus Torvalds 
4399285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
44001da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
44011da177e4SLinus Torvalds 
44021da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
44031da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
44041da177e4SLinus Torvalds 
44057a80e5b8SGiuseppe Scrivano 	if (is_idmapped_mnt(mnt))
44067a80e5b8SGiuseppe Scrivano 		return ERR_PTR(-EINVAL);
44077a80e5b8SGiuseppe Scrivano 
44087a80e5b8SGiuseppe Scrivano 	inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
44097a80e5b8SGiuseppe Scrivano 				S_IFREG | S_IRWXUGO, 0, flags);
4410dac2d1f6SAl Viro 	if (unlikely(!inode)) {
4411dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
4412dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
4413dac2d1f6SAl Viro 	}
4414c7277090SEric Paris 	inode->i_flags |= i_flags;
44151da177e4SLinus Torvalds 	inode->i_size = size;
44166d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
441726567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
441893dec2daSAl Viro 	if (!IS_ERR(res))
441993dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
44204b42af81SAl Viro 				&shmem_file_operations);
44216b4d0b27SAl Viro 	if (IS_ERR(res))
442293dec2daSAl Viro 		iput(inode);
44236b4d0b27SAl Viro 	return res;
44241da177e4SLinus Torvalds }
4425c7277090SEric Paris 
4426c7277090SEric Paris /**
4427c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4428c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
4429c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
4430e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
4431e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
4432c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4433c7277090SEric Paris  * @size: size to be set for the file
4434c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4435c7277090SEric Paris  */
4436c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4437c7277090SEric Paris {
4438703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4439c7277090SEric Paris }
4440c7277090SEric Paris 
4441c7277090SEric Paris /**
4442c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4443c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4444c7277090SEric Paris  * @size: size to be set for the file
4445c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4446c7277090SEric Paris  */
4447c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4448c7277090SEric Paris {
4449703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4450c7277090SEric Paris }
4451395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
44521da177e4SLinus Torvalds 
445346711810SRandy Dunlap /**
4454703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4455703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
4456703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
4457703321b6SMatthew Auld  * @size: size to be set for the file
4458703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4459703321b6SMatthew Auld  */
4460703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4461703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
4462703321b6SMatthew Auld {
4463703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
4464703321b6SMatthew Auld }
4465703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4466703321b6SMatthew Auld 
4467703321b6SMatthew Auld /**
44681da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
446945e55300SPeter Collingbourne  * @vma: the vma to be mmapped is prepared by do_mmap
44701da177e4SLinus Torvalds  */
44711da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
44721da177e4SLinus Torvalds {
44731da177e4SLinus Torvalds 	struct file *file;
44741da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
44751da177e4SLinus Torvalds 
447666fc1303SHugh Dickins 	/*
4477c1e8d7c6SMichel Lespinasse 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
447866fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
447966fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
448066fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
448166fc1303SHugh Dickins 	 */
4482703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
44831da177e4SLinus Torvalds 	if (IS_ERR(file))
44841da177e4SLinus Torvalds 		return PTR_ERR(file);
44851da177e4SLinus Torvalds 
44861da177e4SLinus Torvalds 	if (vma->vm_file)
44871da177e4SLinus Torvalds 		fput(vma->vm_file);
44881da177e4SLinus Torvalds 	vma->vm_file = file;
4489d09e8ca6SPasha Tatashin 	vma->vm_ops = &shmem_anon_vm_ops;
4490f3f0e1d2SKirill A. Shutemov 
44911da177e4SLinus Torvalds 	return 0;
44921da177e4SLinus Torvalds }
4493d9d90e5eSHugh Dickins 
4494d9d90e5eSHugh Dickins /**
4495f01b2b3eSMatthew Wilcox (Oracle)  * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
4496f01b2b3eSMatthew Wilcox (Oracle)  * @mapping:	the folio's address_space
4497f01b2b3eSMatthew Wilcox (Oracle)  * @index:	the folio index
4498d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4499d9d90e5eSHugh Dickins  *
4500d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4501d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
45027e0a1265SMatthew Wilcox (Oracle)  * But read_cache_page_gfp() uses the ->read_folio() method: which does not
4503d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4504d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4505d9d90e5eSHugh Dickins  *
450668da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
450768da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4508d9d90e5eSHugh Dickins  */
4509f01b2b3eSMatthew Wilcox (Oracle) struct folio *shmem_read_folio_gfp(struct address_space *mapping,
4510d9d90e5eSHugh Dickins 		pgoff_t index, gfp_t gfp)
4511d9d90e5eSHugh Dickins {
451268da9f05SHugh Dickins #ifdef CONFIG_SHMEM
451368da9f05SHugh Dickins 	struct inode *inode = mapping->host;
4514a3a9c397SMatthew Wilcox (Oracle) 	struct folio *folio;
451568da9f05SHugh Dickins 	int error;
451668da9f05SHugh Dickins 
451730e6a51dSHui Su 	BUG_ON(!shmem_mapping(mapping));
4518a3a9c397SMatthew Wilcox (Oracle) 	error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
4519cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
452068da9f05SHugh Dickins 	if (error)
4521a7605426SYang Shi 		return ERR_PTR(error);
4522a7605426SYang Shi 
4523a3a9c397SMatthew Wilcox (Oracle) 	folio_unlock(folio);
4524f01b2b3eSMatthew Wilcox (Oracle) 	return folio;
4525f01b2b3eSMatthew Wilcox (Oracle) #else
4526f01b2b3eSMatthew Wilcox (Oracle) 	/*
4527f01b2b3eSMatthew Wilcox (Oracle) 	 * The tiny !SHMEM case uses ramfs without swap
4528f01b2b3eSMatthew Wilcox (Oracle) 	 */
4529f01b2b3eSMatthew Wilcox (Oracle) 	return mapping_read_folio_gfp(mapping, index, gfp);
4530f01b2b3eSMatthew Wilcox (Oracle) #endif
4531f01b2b3eSMatthew Wilcox (Oracle) }
4532f01b2b3eSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
4533f01b2b3eSMatthew Wilcox (Oracle) 
4534f01b2b3eSMatthew Wilcox (Oracle) struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4535f01b2b3eSMatthew Wilcox (Oracle) 					 pgoff_t index, gfp_t gfp)
4536f01b2b3eSMatthew Wilcox (Oracle) {
4537f01b2b3eSMatthew Wilcox (Oracle) 	struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
4538f01b2b3eSMatthew Wilcox (Oracle) 	struct page *page;
4539f01b2b3eSMatthew Wilcox (Oracle) 
4540f01b2b3eSMatthew Wilcox (Oracle) 	if (IS_ERR(folio))
4541f01b2b3eSMatthew Wilcox (Oracle) 		return &folio->page;
4542f01b2b3eSMatthew Wilcox (Oracle) 
4543a3a9c397SMatthew Wilcox (Oracle) 	page = folio_file_page(folio, index);
4544a7605426SYang Shi 	if (PageHWPoison(page)) {
4545a3a9c397SMatthew Wilcox (Oracle) 		folio_put(folio);
4546a7605426SYang Shi 		return ERR_PTR(-EIO);
4547a7605426SYang Shi 	}
4548a7605426SYang Shi 
454968da9f05SHugh Dickins 	return page;
4550d9d90e5eSHugh Dickins }
4551d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4552