xref: /openbmc/linux/mm/shmem.c (revision fcb14cb1bdacec5b4374fe161e83fb8208164a85)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31853ac43aSMatt Mackall #include <linux/mm.h>
3246c9a946SArnd Bergmann #include <linux/random.h>
33174cd4b1SIngo Molnar #include <linux/sched/signal.h>
34b95f1b31SPaul Gortmaker #include <linux/export.h>
35853ac43aSMatt Mackall #include <linux/swap.h>
36e2e40f2cSChristoph Hellwig #include <linux/uio.h>
37749df87bSMike Kravetz #include <linux/hugetlb.h>
38626c3920SAl Viro #include <linux/fs_parser.h>
3986a2f3f2SMiaohe Lin #include <linux/swapfile.h>
40014bb1deSNeilBrown #include "swap.h"
4195cc09d6SAndrea Arcangeli 
42853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
43853ac43aSMatt Mackall 
44853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
451da177e4SLinus Torvalds /*
461da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
471da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
481da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
491da177e4SLinus Torvalds  */
501da177e4SLinus Torvalds 
5139f0247dSAndreas Gruenbacher #include <linux/xattr.h>
52a5694255SChristoph Hellwig #include <linux/exportfs.h>
531c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
54feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
551da177e4SLinus Torvalds #include <linux/mman.h>
561da177e4SLinus Torvalds #include <linux/string.h>
571da177e4SLinus Torvalds #include <linux/slab.h>
581da177e4SLinus Torvalds #include <linux/backing-dev.h>
591da177e4SLinus Torvalds #include <linux/shmem_fs.h>
601da177e4SLinus Torvalds #include <linux/writeback.h>
61bda97eabSHugh Dickins #include <linux/pagevec.h>
6241ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6383e4fa9cSHugh Dickins #include <linux/falloc.h>
64708e3508SHugh Dickins #include <linux/splice.h>
651da177e4SLinus Torvalds #include <linux/security.h>
661da177e4SLinus Torvalds #include <linux/swapops.h>
671da177e4SLinus Torvalds #include <linux/mempolicy.h>
681da177e4SLinus Torvalds #include <linux/namei.h>
69b00dc3adSHugh Dickins #include <linux/ctype.h>
70304dbdb7SLee Schermerhorn #include <linux/migrate.h>
71c1f60a5aSChristoph Lameter #include <linux/highmem.h>
72680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7392562927SMimi Zohar #include <linux/magic.h>
749183df25SDavid Herrmann #include <linux/syscalls.h>
7540e041a2SDavid Herrmann #include <linux/fcntl.h>
769183df25SDavid Herrmann #include <uapi/linux/memfd.h>
77cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
784c27fe4cSMike Rapoport #include <linux/rmap.h>
792b4db796SAmir Goldstein #include <linux/uuid.h>
80304dbdb7SLee Schermerhorn 
817c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
821da177e4SLinus Torvalds 
83dd56b046SMel Gorman #include "internal.h"
84dd56b046SMel Gorman 
8509cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8609cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
871da177e4SLinus Torvalds 
881da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
891da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
901da177e4SLinus Torvalds 
9169f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9269f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9369f07ec9SHugh Dickins 
941aac1400SHugh Dickins /*
95f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
969608703eSJan Kara  * inode->i_private (with i_rwsem making sure that it has only one user at
97f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
981aac1400SHugh Dickins  */
991aac1400SHugh Dickins struct shmem_falloc {
1008e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1011aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1021aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1031aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1041aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1051aac1400SHugh Dickins };
1061aac1400SHugh Dickins 
1070b5071ddSAl Viro struct shmem_options {
1080b5071ddSAl Viro 	unsigned long long blocks;
1090b5071ddSAl Viro 	unsigned long long inodes;
1100b5071ddSAl Viro 	struct mempolicy *mpol;
1110b5071ddSAl Viro 	kuid_t uid;
1120b5071ddSAl Viro 	kgid_t gid;
1130b5071ddSAl Viro 	umode_t mode;
114ea3271f7SChris Down 	bool full_inums;
1150b5071ddSAl Viro 	int huge;
1160b5071ddSAl Viro 	int seen;
1170b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1
1180b5071ddSAl Viro #define SHMEM_SEEN_INODES 2
1190b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4
120ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8
1210b5071ddSAl Viro };
1220b5071ddSAl Viro 
123b76db735SAndrew Morton #ifdef CONFIG_TMPFS
124680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
125680d794bSakpm@linux-foundation.org {
126ca79b0c2SArun KS 	return totalram_pages() / 2;
127680d794bSakpm@linux-foundation.org }
128680d794bSakpm@linux-foundation.org 
129680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
130680d794bSakpm@linux-foundation.org {
131ca79b0c2SArun KS 	unsigned long nr_pages = totalram_pages();
132ca79b0c2SArun KS 
133ca79b0c2SArun KS 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
134680d794bSakpm@linux-foundation.org }
135b76db735SAndrew Morton #endif
136680d794bSakpm@linux-foundation.org 
137da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
138da08e9b7SMatthew Wilcox (Oracle) 			     struct folio **foliop, enum sgp_type sgp,
139c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
140c5bf121eSVineeth Remanan Pillai 			     vm_fault_t *fault_type);
14168da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1429e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp,
143cfda0526SMike Rapoport 		gfp_t gfp, struct vm_area_struct *vma,
1442b740303SSouptick Joarder 		struct vm_fault *vmf, vm_fault_t *fault_type);
14568da9f05SHugh Dickins 
146f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index,
1479e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp)
14868da9f05SHugh Dickins {
14968da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
150cfda0526SMike Rapoport 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
15168da9f05SHugh Dickins }
1521da177e4SLinus Torvalds 
1531da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1541da177e4SLinus Torvalds {
1551da177e4SLinus Torvalds 	return sb->s_fs_info;
1561da177e4SLinus Torvalds }
1571da177e4SLinus Torvalds 
1581da177e4SLinus Torvalds /*
1591da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1601da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1611da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1621da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1631da177e4SLinus Torvalds  */
1641da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1651da177e4SLinus Torvalds {
1660b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
167191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds 
1701da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1711da177e4SLinus Torvalds {
1720b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1731da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1741da177e4SLinus Torvalds }
1751da177e4SLinus Torvalds 
17677142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
17777142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
17877142517SKonstantin Khlebnikov {
17977142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
18077142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
18177142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
18277142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
18377142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
18477142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
18577142517SKonstantin Khlebnikov 	}
18677142517SKonstantin Khlebnikov 	return 0;
18777142517SKonstantin Khlebnikov }
18877142517SKonstantin Khlebnikov 
1891da177e4SLinus Torvalds /*
1901da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
19175edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
1921da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1931da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1941da177e4SLinus Torvalds  */
195800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
1961da177e4SLinus Torvalds {
197800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
198800d8c63SKirill A. Shutemov 		return 0;
199800d8c63SKirill A. Shutemov 
200800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
201800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
2021da177e4SLinus Torvalds }
2031da177e4SLinus Torvalds 
2041da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
2051da177e4SLinus Torvalds {
2060b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
20709cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
2081da177e4SLinus Torvalds }
2091da177e4SLinus Torvalds 
2100f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
2110f079694SMike Rapoport {
2120f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2130f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2140f079694SMike Rapoport 
2150f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
2160f079694SMike Rapoport 		return false;
2170f079694SMike Rapoport 
2180f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2190f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2200f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2210f079694SMike Rapoport 			goto unacct;
2220f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2230f079694SMike Rapoport 	}
2240f079694SMike Rapoport 
2250f079694SMike Rapoport 	return true;
2260f079694SMike Rapoport 
2270f079694SMike Rapoport unacct:
2280f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2290f079694SMike Rapoport 	return false;
2300f079694SMike Rapoport }
2310f079694SMike Rapoport 
2320f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2330f079694SMike Rapoport {
2340f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2350f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2360f079694SMike Rapoport 
2370f079694SMike Rapoport 	if (sbinfo->max_blocks)
2380f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2390f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2400f079694SMike Rapoport }
2410f079694SMike Rapoport 
242759b9775SHugh Dickins static const struct super_operations shmem_ops;
24330e6a51dSHui Su const struct address_space_operations shmem_aops;
24415ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
24592e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
24692e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
24792e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
248f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
249779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2501da177e4SLinus Torvalds 
251b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
252b0506e48SMike Rapoport {
253b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
254b0506e48SMike Rapoport }
255b0506e48SMike Rapoport 
2561da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
257cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2581da177e4SLinus Torvalds 
259e809d5f0SChris Down /*
260e809d5f0SChris Down  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
261e809d5f0SChris Down  * produces a novel ino for the newly allocated inode.
262e809d5f0SChris Down  *
263e809d5f0SChris Down  * It may also be called when making a hard link to permit the space needed by
264e809d5f0SChris Down  * each dentry. However, in that case, no new inode number is needed since that
265e809d5f0SChris Down  * internally draws from another pool of inode numbers (currently global
266e809d5f0SChris Down  * get_next_ino()). This case is indicated by passing NULL as inop.
267e809d5f0SChris Down  */
268e809d5f0SChris Down #define SHMEM_INO_BATCH 1024
269e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
2705b04c689SPavel Emelyanov {
2715b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
272e809d5f0SChris Down 	ino_t ino;
273e809d5f0SChris Down 
274e809d5f0SChris Down 	if (!(sb->s_flags & SB_KERNMOUNT)) {
275bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
276bb3e96d6SByron Stanoszek 		if (sbinfo->max_inodes) {
2775b04c689SPavel Emelyanov 			if (!sbinfo->free_inodes) {
278bf11b9a8SSebastian Andrzej Siewior 				raw_spin_unlock(&sbinfo->stat_lock);
2795b04c689SPavel Emelyanov 				return -ENOSPC;
2805b04c689SPavel Emelyanov 			}
2815b04c689SPavel Emelyanov 			sbinfo->free_inodes--;
282bb3e96d6SByron Stanoszek 		}
283e809d5f0SChris Down 		if (inop) {
284e809d5f0SChris Down 			ino = sbinfo->next_ino++;
285e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
286e809d5f0SChris Down 				ino = sbinfo->next_ino++;
287ea3271f7SChris Down 			if (unlikely(!sbinfo->full_inums &&
288ea3271f7SChris Down 				     ino > UINT_MAX)) {
289e809d5f0SChris Down 				/*
290e809d5f0SChris Down 				 * Emulate get_next_ino uint wraparound for
291e809d5f0SChris Down 				 * compatibility
292e809d5f0SChris Down 				 */
293ea3271f7SChris Down 				if (IS_ENABLED(CONFIG_64BIT))
294ea3271f7SChris Down 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
295ea3271f7SChris Down 						__func__, MINOR(sb->s_dev));
296ea3271f7SChris Down 				sbinfo->next_ino = 1;
297ea3271f7SChris Down 				ino = sbinfo->next_ino++;
2985b04c689SPavel Emelyanov 			}
299e809d5f0SChris Down 			*inop = ino;
300e809d5f0SChris Down 		}
301bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
302e809d5f0SChris Down 	} else if (inop) {
303e809d5f0SChris Down 		/*
304e809d5f0SChris Down 		 * __shmem_file_setup, one of our callers, is lock-free: it
305e809d5f0SChris Down 		 * doesn't hold stat_lock in shmem_reserve_inode since
306e809d5f0SChris Down 		 * max_inodes is always 0, and is called from potentially
307e809d5f0SChris Down 		 * unknown contexts. As such, use a per-cpu batched allocator
308e809d5f0SChris Down 		 * which doesn't require the per-sb stat_lock unless we are at
309e809d5f0SChris Down 		 * the batch boundary.
310ea3271f7SChris Down 		 *
311ea3271f7SChris Down 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
312ea3271f7SChris Down 		 * shmem mounts are not exposed to userspace, so we don't need
313ea3271f7SChris Down 		 * to worry about things like glibc compatibility.
314e809d5f0SChris Down 		 */
315e809d5f0SChris Down 		ino_t *next_ino;
316bf11b9a8SSebastian Andrzej Siewior 
317e809d5f0SChris Down 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
318e809d5f0SChris Down 		ino = *next_ino;
319e809d5f0SChris Down 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
320bf11b9a8SSebastian Andrzej Siewior 			raw_spin_lock(&sbinfo->stat_lock);
321e809d5f0SChris Down 			ino = sbinfo->next_ino;
322e809d5f0SChris Down 			sbinfo->next_ino += SHMEM_INO_BATCH;
323bf11b9a8SSebastian Andrzej Siewior 			raw_spin_unlock(&sbinfo->stat_lock);
324e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
325e809d5f0SChris Down 				ino++;
326e809d5f0SChris Down 		}
327e809d5f0SChris Down 		*inop = ino;
328e809d5f0SChris Down 		*next_ino = ++ino;
329e809d5f0SChris Down 		put_cpu();
330e809d5f0SChris Down 	}
331e809d5f0SChris Down 
3325b04c689SPavel Emelyanov 	return 0;
3335b04c689SPavel Emelyanov }
3345b04c689SPavel Emelyanov 
3355b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
3365b04c689SPavel Emelyanov {
3375b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3385b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
339bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
3405b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
341bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
3425b04c689SPavel Emelyanov 	}
3435b04c689SPavel Emelyanov }
3445b04c689SPavel Emelyanov 
34546711810SRandy Dunlap /**
34641ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
3471da177e4SLinus Torvalds  * @inode: inode to recalc
3481da177e4SLinus Torvalds  *
3491da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
3501da177e4SLinus Torvalds  * undirtied hole pages behind our back.
3511da177e4SLinus Torvalds  *
3521da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
3531da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
3541da177e4SLinus Torvalds  *
3551da177e4SLinus Torvalds  * It has to be called with the spinlock held.
3561da177e4SLinus Torvalds  */
3571da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
3581da177e4SLinus Torvalds {
3591da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
3601da177e4SLinus Torvalds 	long freed;
3611da177e4SLinus Torvalds 
3621da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
3631da177e4SLinus Torvalds 	if (freed > 0) {
3641da177e4SLinus Torvalds 		info->alloced -= freed;
36554af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
3660f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
3671da177e4SLinus Torvalds 	}
3681da177e4SLinus Torvalds }
3691da177e4SLinus Torvalds 
370800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
371800d8c63SKirill A. Shutemov {
372800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3734595ef88SKirill A. Shutemov 	unsigned long flags;
374800d8c63SKirill A. Shutemov 
3750f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
376800d8c63SKirill A. Shutemov 		return false;
377b1cc94abSMike Rapoport 
378aaa52e34SHugh Dickins 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
379aaa52e34SHugh Dickins 	inode->i_mapping->nrpages += pages;
380aaa52e34SHugh Dickins 
3814595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
382800d8c63SKirill A. Shutemov 	info->alloced += pages;
383800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
384800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3854595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
386800d8c63SKirill A. Shutemov 
387800d8c63SKirill A. Shutemov 	return true;
388800d8c63SKirill A. Shutemov }
389800d8c63SKirill A. Shutemov 
390800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
391800d8c63SKirill A. Shutemov {
392800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3934595ef88SKirill A. Shutemov 	unsigned long flags;
394800d8c63SKirill A. Shutemov 
3956ffcd825SMatthew Wilcox (Oracle) 	/* nrpages adjustment done by __filemap_remove_folio() or caller */
396aaa52e34SHugh Dickins 
3974595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
398800d8c63SKirill A. Shutemov 	info->alloced -= pages;
399800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
400800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
4014595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
402800d8c63SKirill A. Shutemov 
4030f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
404800d8c63SKirill A. Shutemov }
405800d8c63SKirill A. Shutemov 
4067a5d0fbbSHugh Dickins /*
40762f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
4087a5d0fbbSHugh Dickins  */
40962f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
4107a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
4117a5d0fbbSHugh Dickins {
41262f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
4136dbaf22cSJohannes Weiner 	void *item;
4147a5d0fbbSHugh Dickins 
4157a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
4166dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
41762f945b6SMatthew Wilcox 	item = xas_load(&xas);
4187a5d0fbbSHugh Dickins 	if (item != expected)
4197a5d0fbbSHugh Dickins 		return -ENOENT;
42062f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
4217a5d0fbbSHugh Dickins 	return 0;
4227a5d0fbbSHugh Dickins }
4237a5d0fbbSHugh Dickins 
4247a5d0fbbSHugh Dickins /*
425d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
426d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
427d1899228SHugh Dickins  *
428d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
429d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
430d1899228SHugh Dickins  */
431d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
432d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
433d1899228SHugh Dickins {
434a12831bfSMatthew Wilcox 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
435d1899228SHugh Dickins }
436d1899228SHugh Dickins 
437d1899228SHugh Dickins /*
4385a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
4395a6e75f8SKirill A. Shutemov  *
4405a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
4415a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
4425a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
4435a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
4445a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
4455a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
4465a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
4475a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
4485a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
4495a6e75f8SKirill A. Shutemov  */
4505a6e75f8SKirill A. Shutemov 
4515a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
4525a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
4535a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
4545a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
4555a6e75f8SKirill A. Shutemov 
4565a6e75f8SKirill A. Shutemov /*
4575a6e75f8SKirill A. Shutemov  * Special values.
4585a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
4595a6e75f8SKirill A. Shutemov  *
4605a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
4615a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
4625a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
4635a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
4645a6e75f8SKirill A. Shutemov  *
4655a6e75f8SKirill A. Shutemov  */
4665a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
4675a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
4685a6e75f8SKirill A. Shutemov 
469396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4705a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
4715a6e75f8SKirill A. Shutemov 
4725e6e5a12SHugh Dickins static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
4735a6e75f8SKirill A. Shutemov 
4745e6e5a12SHugh Dickins bool shmem_is_huge(struct vm_area_struct *vma,
4755e6e5a12SHugh Dickins 		   struct inode *inode, pgoff_t index)
476c852023eSHugh Dickins {
477c852023eSHugh Dickins 	loff_t i_size;
478c852023eSHugh Dickins 
479f7cd16a5SXavier Roche 	if (!S_ISREG(inode->i_mode))
480f7cd16a5SXavier Roche 		return false;
4815e6e5a12SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
4825e6e5a12SHugh Dickins 		return false;
4835e6e5a12SHugh Dickins 	if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
4845e6e5a12SHugh Dickins 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
485c852023eSHugh Dickins 		return false;
486c852023eSHugh Dickins 	if (shmem_huge == SHMEM_HUGE_FORCE)
487c852023eSHugh Dickins 		return true;
4885e6e5a12SHugh Dickins 
4895e6e5a12SHugh Dickins 	switch (SHMEM_SB(inode->i_sb)->huge) {
490c852023eSHugh Dickins 	case SHMEM_HUGE_ALWAYS:
491c852023eSHugh Dickins 		return true;
492c852023eSHugh Dickins 	case SHMEM_HUGE_WITHIN_SIZE:
493de6ee659SLiu Yuntao 		index = round_up(index + 1, HPAGE_PMD_NR);
494c852023eSHugh Dickins 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
495de6ee659SLiu Yuntao 		if (i_size >> PAGE_SHIFT >= index)
496c852023eSHugh Dickins 			return true;
497c852023eSHugh Dickins 		fallthrough;
498c852023eSHugh Dickins 	case SHMEM_HUGE_ADVISE:
4995e6e5a12SHugh Dickins 		if (vma && (vma->vm_flags & VM_HUGEPAGE))
5005e6e5a12SHugh Dickins 			return true;
5015e6e5a12SHugh Dickins 		fallthrough;
502c852023eSHugh Dickins 	default:
503c852023eSHugh Dickins 		return false;
504c852023eSHugh Dickins 	}
505c852023eSHugh Dickins }
5065a6e75f8SKirill A. Shutemov 
507e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS)
5085a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
5095a6e75f8SKirill A. Shutemov {
5105a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
5115a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
5125a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
5135a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
5145a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
5155a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
5165a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
5175a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
5185a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
5195a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
5205a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
5215a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
5225a6e75f8SKirill A. Shutemov 	return -EINVAL;
5235a6e75f8SKirill A. Shutemov }
524e5f2249aSArnd Bergmann #endif
5255a6e75f8SKirill A. Shutemov 
526e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
5275a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
5285a6e75f8SKirill A. Shutemov {
5295a6e75f8SKirill A. Shutemov 	switch (huge) {
5305a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
5315a6e75f8SKirill A. Shutemov 		return "never";
5325a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
5335a6e75f8SKirill A. Shutemov 		return "always";
5345a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
5355a6e75f8SKirill A. Shutemov 		return "within_size";
5365a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
5375a6e75f8SKirill A. Shutemov 		return "advise";
5385a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
5395a6e75f8SKirill A. Shutemov 		return "deny";
5405a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
5415a6e75f8SKirill A. Shutemov 		return "force";
5425a6e75f8SKirill A. Shutemov 	default:
5435a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
5445a6e75f8SKirill A. Shutemov 		return "bad_val";
5455a6e75f8SKirill A. Shutemov 	}
5465a6e75f8SKirill A. Shutemov }
547f1f5929cSJérémy Lefaure #endif
5485a6e75f8SKirill A. Shutemov 
549779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
550779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
551779750d2SKirill A. Shutemov {
552779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
553253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
554779750d2SKirill A. Shutemov 	struct inode *inode;
555779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
55605624571SMatthew Wilcox (Oracle) 	struct folio *folio;
557779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
55862c9827cSGang Li 	int split = 0;
559779750d2SKirill A. Shutemov 
560779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
561779750d2SKirill A. Shutemov 		return SHRINK_STOP;
562779750d2SKirill A. Shutemov 
563779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
564779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
565779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
566779750d2SKirill A. Shutemov 
567779750d2SKirill A. Shutemov 		/* pin the inode */
568779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
569779750d2SKirill A. Shutemov 
570779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
571779750d2SKirill A. Shutemov 		if (!inode) {
572779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
573779750d2SKirill A. Shutemov 			goto next;
574779750d2SKirill A. Shutemov 		}
575779750d2SKirill A. Shutemov 
576779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
577779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
578779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
579253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
580779750d2SKirill A. Shutemov 			goto next;
581779750d2SKirill A. Shutemov 		}
582779750d2SKirill A. Shutemov 
583779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
584779750d2SKirill A. Shutemov next:
58562c9827cSGang Li 		sbinfo->shrinklist_len--;
586779750d2SKirill A. Shutemov 		if (!--batch)
587779750d2SKirill A. Shutemov 			break;
588779750d2SKirill A. Shutemov 	}
589779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
590779750d2SKirill A. Shutemov 
591253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
592253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
593253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
594253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
595253fd0f0SKirill A. Shutemov 		iput(inode);
596253fd0f0SKirill A. Shutemov 	}
597253fd0f0SKirill A. Shutemov 
598779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
599779750d2SKirill A. Shutemov 		int ret;
60005624571SMatthew Wilcox (Oracle) 		pgoff_t index;
601779750d2SKirill A. Shutemov 
602779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
603779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
604779750d2SKirill A. Shutemov 
605b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
60662c9827cSGang Li 			goto move_back;
607779750d2SKirill A. Shutemov 
60805624571SMatthew Wilcox (Oracle) 		index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
60905624571SMatthew Wilcox (Oracle) 		folio = filemap_get_folio(inode->i_mapping, index);
61005624571SMatthew Wilcox (Oracle) 		if (!folio)
611779750d2SKirill A. Shutemov 			goto drop;
612779750d2SKirill A. Shutemov 
613b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
61405624571SMatthew Wilcox (Oracle) 		if (!folio_test_large(folio)) {
61505624571SMatthew Wilcox (Oracle) 			folio_put(folio);
616779750d2SKirill A. Shutemov 			goto drop;
617779750d2SKirill A. Shutemov 		}
618779750d2SKirill A. Shutemov 
619b3cd54b2SKirill A. Shutemov 		/*
62062c9827cSGang Li 		 * Move the inode on the list back to shrinklist if we failed
62162c9827cSGang Li 		 * to lock the page at this time.
622b3cd54b2SKirill A. Shutemov 		 *
623b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
624b3cd54b2SKirill A. Shutemov 		 * reclaim path.
625b3cd54b2SKirill A. Shutemov 		 */
62605624571SMatthew Wilcox (Oracle) 		if (!folio_trylock(folio)) {
62705624571SMatthew Wilcox (Oracle) 			folio_put(folio);
62862c9827cSGang Li 			goto move_back;
629b3cd54b2SKirill A. Shutemov 		}
630b3cd54b2SKirill A. Shutemov 
63105624571SMatthew Wilcox (Oracle) 		ret = split_huge_page(&folio->page);
63205624571SMatthew Wilcox (Oracle) 		folio_unlock(folio);
63305624571SMatthew Wilcox (Oracle) 		folio_put(folio);
634779750d2SKirill A. Shutemov 
63562c9827cSGang Li 		/* If split failed move the inode on the list back to shrinklist */
636b3cd54b2SKirill A. Shutemov 		if (ret)
63762c9827cSGang Li 			goto move_back;
638779750d2SKirill A. Shutemov 
639779750d2SKirill A. Shutemov 		split++;
640779750d2SKirill A. Shutemov drop:
641779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
64262c9827cSGang Li 		goto put;
64362c9827cSGang Li move_back:
64462c9827cSGang Li 		/*
64562c9827cSGang Li 		 * Make sure the inode is either on the global list or deleted
64662c9827cSGang Li 		 * from any local list before iput() since it could be deleted
64762c9827cSGang Li 		 * in another thread once we put the inode (then the local list
64862c9827cSGang Li 		 * is corrupted).
64962c9827cSGang Li 		 */
65062c9827cSGang Li 		spin_lock(&sbinfo->shrinklist_lock);
65162c9827cSGang Li 		list_move(&info->shrinklist, &sbinfo->shrinklist);
65262c9827cSGang Li 		sbinfo->shrinklist_len++;
65362c9827cSGang Li 		spin_unlock(&sbinfo->shrinklist_lock);
65462c9827cSGang Li put:
655779750d2SKirill A. Shutemov 		iput(inode);
656779750d2SKirill A. Shutemov 	}
657779750d2SKirill A. Shutemov 
658779750d2SKirill A. Shutemov 	return split;
659779750d2SKirill A. Shutemov }
660779750d2SKirill A. Shutemov 
661779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
662779750d2SKirill A. Shutemov 		struct shrink_control *sc)
663779750d2SKirill A. Shutemov {
664779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
665779750d2SKirill A. Shutemov 
666779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
667779750d2SKirill A. Shutemov 		return SHRINK_STOP;
668779750d2SKirill A. Shutemov 
669779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
670779750d2SKirill A. Shutemov }
671779750d2SKirill A. Shutemov 
672779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
673779750d2SKirill A. Shutemov 		struct shrink_control *sc)
674779750d2SKirill A. Shutemov {
675779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
676779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
677779750d2SKirill A. Shutemov }
678396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
6795a6e75f8SKirill A. Shutemov 
6805a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
6815a6e75f8SKirill A. Shutemov 
6825e6e5a12SHugh Dickins bool shmem_is_huge(struct vm_area_struct *vma,
6835e6e5a12SHugh Dickins 		   struct inode *inode, pgoff_t index)
6845e6e5a12SHugh Dickins {
6855e6e5a12SHugh Dickins 	return false;
6865e6e5a12SHugh Dickins }
6875e6e5a12SHugh Dickins 
688779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
689779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
690779750d2SKirill A. Shutemov {
691779750d2SKirill A. Shutemov 	return 0;
692779750d2SKirill A. Shutemov }
693396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6945a6e75f8SKirill A. Shutemov 
6955a6e75f8SKirill A. Shutemov /*
6962bb876b5SMatthew Wilcox (Oracle)  * Like filemap_add_folio, but error if expected item has gone.
69746f65ec1SHugh Dickins  */
698b7dd44a1SMatthew Wilcox (Oracle) static int shmem_add_to_page_cache(struct folio *folio,
69946f65ec1SHugh Dickins 				   struct address_space *mapping,
7003fea5a49SJohannes Weiner 				   pgoff_t index, void *expected, gfp_t gfp,
7013fea5a49SJohannes Weiner 				   struct mm_struct *charge_mm)
70246f65ec1SHugh Dickins {
703b7dd44a1SMatthew Wilcox (Oracle) 	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
704b7dd44a1SMatthew Wilcox (Oracle) 	long nr = folio_nr_pages(folio);
7053fea5a49SJohannes Weiner 	int error;
70646f65ec1SHugh Dickins 
707b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
708b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
709b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
710b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON(expected && folio_test_large(folio));
71146f65ec1SHugh Dickins 
712b7dd44a1SMatthew Wilcox (Oracle) 	folio_ref_add(folio, nr);
713b7dd44a1SMatthew Wilcox (Oracle) 	folio->mapping = mapping;
714b7dd44a1SMatthew Wilcox (Oracle) 	folio->index = index;
71546f65ec1SHugh Dickins 
716b7dd44a1SMatthew Wilcox (Oracle) 	if (!folio_test_swapcache(folio)) {
717b7dd44a1SMatthew Wilcox (Oracle) 		error = mem_cgroup_charge(folio, charge_mm, gfp);
7183fea5a49SJohannes Weiner 		if (error) {
719b7dd44a1SMatthew Wilcox (Oracle) 			if (folio_test_pmd_mappable(folio)) {
7203fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK);
7213fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
7223fea5a49SJohannes Weiner 			}
7233fea5a49SJohannes Weiner 			goto error;
7243fea5a49SJohannes Weiner 		}
7254c6355b2SJohannes Weiner 	}
726b7dd44a1SMatthew Wilcox (Oracle) 	folio_throttle_swaprate(folio, gfp);
7273fea5a49SJohannes Weiner 
728552446a4SMatthew Wilcox 	do {
729552446a4SMatthew Wilcox 		xas_lock_irq(&xas);
7306b24ca4aSMatthew Wilcox (Oracle) 		if (expected != xas_find_conflict(&xas)) {
731552446a4SMatthew Wilcox 			xas_set_err(&xas, -EEXIST);
7326b24ca4aSMatthew Wilcox (Oracle) 			goto unlock;
7336b24ca4aSMatthew Wilcox (Oracle) 		}
7346b24ca4aSMatthew Wilcox (Oracle) 		if (expected && xas_find_conflict(&xas)) {
7356b24ca4aSMatthew Wilcox (Oracle) 			xas_set_err(&xas, -EEXIST);
7366b24ca4aSMatthew Wilcox (Oracle) 			goto unlock;
7376b24ca4aSMatthew Wilcox (Oracle) 		}
738b7dd44a1SMatthew Wilcox (Oracle) 		xas_store(&xas, folio);
739552446a4SMatthew Wilcox 		if (xas_error(&xas))
740552446a4SMatthew Wilcox 			goto unlock;
741b7dd44a1SMatthew Wilcox (Oracle) 		if (folio_test_pmd_mappable(folio)) {
742800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
743b7dd44a1SMatthew Wilcox (Oracle) 			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
744552446a4SMatthew Wilcox 		}
745552446a4SMatthew Wilcox 		mapping->nrpages += nr;
746b7dd44a1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
747b7dd44a1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
748552446a4SMatthew Wilcox unlock:
749552446a4SMatthew Wilcox 		xas_unlock_irq(&xas);
750552446a4SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
751552446a4SMatthew Wilcox 
752552446a4SMatthew Wilcox 	if (xas_error(&xas)) {
7533fea5a49SJohannes Weiner 		error = xas_error(&xas);
7543fea5a49SJohannes Weiner 		goto error;
75546f65ec1SHugh Dickins 	}
756552446a4SMatthew Wilcox 
757552446a4SMatthew Wilcox 	return 0;
7583fea5a49SJohannes Weiner error:
759b7dd44a1SMatthew Wilcox (Oracle) 	folio->mapping = NULL;
760b7dd44a1SMatthew Wilcox (Oracle) 	folio_ref_sub(folio, nr);
7613fea5a49SJohannes Weiner 	return error;
76246f65ec1SHugh Dickins }
76346f65ec1SHugh Dickins 
76446f65ec1SHugh Dickins /*
7656922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
7666922c0c7SHugh Dickins  */
7676922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
7686922c0c7SHugh Dickins {
7696922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
7706922c0c7SHugh Dickins 	int error;
7716922c0c7SHugh Dickins 
772800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
773800d8c63SKirill A. Shutemov 
774b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
77562f945b6SMatthew Wilcox 	error = shmem_replace_entry(mapping, page->index, page, radswap);
7766922c0c7SHugh Dickins 	page->mapping = NULL;
7776922c0c7SHugh Dickins 	mapping->nrpages--;
7780d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_FILE_PAGES);
7790d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_SHMEM);
780b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
78109cbfeafSKirill A. Shutemov 	put_page(page);
7826922c0c7SHugh Dickins 	BUG_ON(error);
7836922c0c7SHugh Dickins }
7846922c0c7SHugh Dickins 
7856922c0c7SHugh Dickins /*
786c121d3bbSMatthew Wilcox  * Remove swap entry from page cache, free the swap and its page cache.
7877a5d0fbbSHugh Dickins  */
7887a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
7897a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
7907a5d0fbbSHugh Dickins {
7916dbaf22cSJohannes Weiner 	void *old;
7927a5d0fbbSHugh Dickins 
79355f3f7eaSMatthew Wilcox 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
7946dbaf22cSJohannes Weiner 	if (old != radswap)
7956dbaf22cSJohannes Weiner 		return -ENOENT;
7967a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
7976dbaf22cSJohannes Weiner 	return 0;
7987a5d0fbbSHugh Dickins }
7997a5d0fbbSHugh Dickins 
8007a5d0fbbSHugh Dickins /*
8016a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
80248131e03SVlastimil Babka  * given offsets are swapped out.
8036a15a370SVlastimil Babka  *
8049608703eSJan Kara  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
8056a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
8066a15a370SVlastimil Babka  */
80748131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
80848131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
8096a15a370SVlastimil Babka {
8107ae3424fSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
8116a15a370SVlastimil Babka 	struct page *page;
81248131e03SVlastimil Babka 	unsigned long swapped = 0;
8136a15a370SVlastimil Babka 
8146a15a370SVlastimil Babka 	rcu_read_lock();
8157ae3424fSMatthew Wilcox 	xas_for_each(&xas, page, end - 1) {
8167ae3424fSMatthew Wilcox 		if (xas_retry(&xas, page))
8172cf938aaSMatthew Wilcox 			continue;
8183159f943SMatthew Wilcox 		if (xa_is_value(page))
8196a15a370SVlastimil Babka 			swapped++;
8206a15a370SVlastimil Babka 
8216a15a370SVlastimil Babka 		if (need_resched()) {
8227ae3424fSMatthew Wilcox 			xas_pause(&xas);
8236a15a370SVlastimil Babka 			cond_resched_rcu();
8246a15a370SVlastimil Babka 		}
8256a15a370SVlastimil Babka 	}
8266a15a370SVlastimil Babka 
8276a15a370SVlastimil Babka 	rcu_read_unlock();
8286a15a370SVlastimil Babka 
8296a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
8306a15a370SVlastimil Babka }
8316a15a370SVlastimil Babka 
8326a15a370SVlastimil Babka /*
83348131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
83448131e03SVlastimil Babka  * given vma is swapped out.
83548131e03SVlastimil Babka  *
8369608703eSJan Kara  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
83748131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
83848131e03SVlastimil Babka  */
83948131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
84048131e03SVlastimil Babka {
84148131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
84248131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
84348131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
84448131e03SVlastimil Babka 	unsigned long swapped;
84548131e03SVlastimil Babka 
84648131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
84748131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
84848131e03SVlastimil Babka 
84948131e03SVlastimil Babka 	/*
85048131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
85148131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
85248131e03SVlastimil Babka 	 * already track.
85348131e03SVlastimil Babka 	 */
85448131e03SVlastimil Babka 	if (!swapped)
85548131e03SVlastimil Babka 		return 0;
85648131e03SVlastimil Babka 
85748131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
85848131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
85948131e03SVlastimil Babka 
86048131e03SVlastimil Babka 	/* Here comes the more involved part */
86102399c88SPeter Xu 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
86202399c88SPeter Xu 					vma->vm_pgoff + vma_pages(vma));
86348131e03SVlastimil Babka }
86448131e03SVlastimil Babka 
86548131e03SVlastimil Babka /*
86624513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
86724513264SHugh Dickins  */
86824513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
86924513264SHugh Dickins {
870105c988fSMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
87124513264SHugh Dickins 	pgoff_t index = 0;
87224513264SHugh Dickins 
873105c988fSMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
87424513264SHugh Dickins 	/*
87524513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
87624513264SHugh Dickins 	 */
877105c988fSMatthew Wilcox (Oracle) 	while (!mapping_unevictable(mapping) &&
878105c988fSMatthew Wilcox (Oracle) 	       filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
879105c988fSMatthew Wilcox (Oracle) 		check_move_unevictable_folios(&fbatch);
880105c988fSMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
88124513264SHugh Dickins 		cond_resched();
88224513264SHugh Dickins 	}
8837a5d0fbbSHugh Dickins }
8847a5d0fbbSHugh Dickins 
885b9a8a419SMatthew Wilcox (Oracle) static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
88671725ed1SHugh Dickins {
887b9a8a419SMatthew Wilcox (Oracle) 	struct folio *folio;
888b9a8a419SMatthew Wilcox (Oracle) 	struct page *page;
88971725ed1SHugh Dickins 
890b9a8a419SMatthew Wilcox (Oracle) 	/*
891b9a8a419SMatthew Wilcox (Oracle) 	 * At first avoid shmem_getpage(,,,SGP_READ): that fails
892b9a8a419SMatthew Wilcox (Oracle) 	 * beyond i_size, and reports fallocated pages as holes.
893b9a8a419SMatthew Wilcox (Oracle) 	 */
894b9a8a419SMatthew Wilcox (Oracle) 	folio = __filemap_get_folio(inode->i_mapping, index,
895b9a8a419SMatthew Wilcox (Oracle) 					FGP_ENTRY | FGP_LOCK, 0);
896b9a8a419SMatthew Wilcox (Oracle) 	if (!xa_is_value(folio))
897b9a8a419SMatthew Wilcox (Oracle) 		return folio;
898b9a8a419SMatthew Wilcox (Oracle) 	/*
899b9a8a419SMatthew Wilcox (Oracle) 	 * But read a page back from swap if any of it is within i_size
900b9a8a419SMatthew Wilcox (Oracle) 	 * (although in some cases this is just a waste of time).
901b9a8a419SMatthew Wilcox (Oracle) 	 */
902b9a8a419SMatthew Wilcox (Oracle) 	page = NULL;
903b9a8a419SMatthew Wilcox (Oracle) 	shmem_getpage(inode, index, &page, SGP_READ);
904b9a8a419SMatthew Wilcox (Oracle) 	return page ? page_folio(page) : NULL;
90571725ed1SHugh Dickins }
90671725ed1SHugh Dickins 
90771725ed1SHugh Dickins /*
9087f4446eeSMatthew Wilcox  * Remove range of pages and swap entries from page cache, and free them.
9091635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
9107a5d0fbbSHugh Dickins  */
9111635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
9121635f6a7SHugh Dickins 								 bool unfalloc)
9131da177e4SLinus Torvalds {
914285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
9151da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
91609cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
91709cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
9180e499ed3SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
9197a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
920b9a8a419SMatthew Wilcox (Oracle) 	struct folio *folio;
921b9a8a419SMatthew Wilcox (Oracle) 	bool same_folio;
9227a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
923285b2c4fSHugh Dickins 	pgoff_t index;
924bda97eabSHugh Dickins 	int i;
9251da177e4SLinus Torvalds 
92683e4fa9cSHugh Dickins 	if (lend == -1)
92783e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
928bda97eabSHugh Dickins 
929d144bf62SHugh Dickins 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
930d144bf62SHugh Dickins 		info->fallocend = start;
931d144bf62SHugh Dickins 
93251dcbdacSMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
933bda97eabSHugh Dickins 	index = start;
9345c211ba2SMatthew Wilcox (Oracle) 	while (index < end && find_lock_entries(mapping, index, end - 1,
93551dcbdacSMatthew Wilcox (Oracle) 			&fbatch, indices)) {
93651dcbdacSMatthew Wilcox (Oracle) 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
937b9a8a419SMatthew Wilcox (Oracle) 			folio = fbatch.folios[i];
938bda97eabSHugh Dickins 
9397a5d0fbbSHugh Dickins 			index = indices[i];
940bda97eabSHugh Dickins 
9417b774aabSMatthew Wilcox (Oracle) 			if (xa_is_value(folio)) {
9421635f6a7SHugh Dickins 				if (unfalloc)
9431635f6a7SHugh Dickins 					continue;
9447a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
9457b774aabSMatthew Wilcox (Oracle) 								index, folio);
9467a5d0fbbSHugh Dickins 				continue;
9477a5d0fbbSHugh Dickins 			}
9487b774aabSMatthew Wilcox (Oracle) 			index += folio_nr_pages(folio) - 1;
9497a5d0fbbSHugh Dickins 
9507b774aabSMatthew Wilcox (Oracle) 			if (!unfalloc || !folio_test_uptodate(folio))
9511e84a3d9SMatthew Wilcox (Oracle) 				truncate_inode_folio(mapping, folio);
9527b774aabSMatthew Wilcox (Oracle) 			folio_unlock(folio);
953bda97eabSHugh Dickins 		}
95451dcbdacSMatthew Wilcox (Oracle) 		folio_batch_remove_exceptionals(&fbatch);
95551dcbdacSMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
956bda97eabSHugh Dickins 		cond_resched();
957bda97eabSHugh Dickins 		index++;
958bda97eabSHugh Dickins 	}
959bda97eabSHugh Dickins 
960b9a8a419SMatthew Wilcox (Oracle) 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
961b9a8a419SMatthew Wilcox (Oracle) 	folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
962b9a8a419SMatthew Wilcox (Oracle) 	if (folio) {
963b9a8a419SMatthew Wilcox (Oracle) 		same_folio = lend < folio_pos(folio) + folio_size(folio);
964b9a8a419SMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
965b9a8a419SMatthew Wilcox (Oracle) 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
966b9a8a419SMatthew Wilcox (Oracle) 			start = folio->index + folio_nr_pages(folio);
967b9a8a419SMatthew Wilcox (Oracle) 			if (same_folio)
968b9a8a419SMatthew Wilcox (Oracle) 				end = folio->index;
96983e4fa9cSHugh Dickins 		}
970b9a8a419SMatthew Wilcox (Oracle) 		folio_unlock(folio);
971b9a8a419SMatthew Wilcox (Oracle) 		folio_put(folio);
972b9a8a419SMatthew Wilcox (Oracle) 		folio = NULL;
973bda97eabSHugh Dickins 	}
974b9a8a419SMatthew Wilcox (Oracle) 
975b9a8a419SMatthew Wilcox (Oracle) 	if (!same_folio)
976b9a8a419SMatthew Wilcox (Oracle) 		folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
977b9a8a419SMatthew Wilcox (Oracle) 	if (folio) {
978b9a8a419SMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
979b9a8a419SMatthew Wilcox (Oracle) 		if (!truncate_inode_partial_folio(folio, lstart, lend))
980b9a8a419SMatthew Wilcox (Oracle) 			end = folio->index;
981b9a8a419SMatthew Wilcox (Oracle) 		folio_unlock(folio);
982b9a8a419SMatthew Wilcox (Oracle) 		folio_put(folio);
983bda97eabSHugh Dickins 	}
984bda97eabSHugh Dickins 
985bda97eabSHugh Dickins 	index = start;
986b1a36650SHugh Dickins 	while (index < end) {
987bda97eabSHugh Dickins 		cond_resched();
9880cd6144aSJohannes Weiner 
9890e499ed3SMatthew Wilcox (Oracle) 		if (!find_get_entries(mapping, index, end - 1, &fbatch,
990cf2039afSMatthew Wilcox (Oracle) 				indices)) {
991b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
992b1a36650SHugh Dickins 			if (index == start || end != -1)
993bda97eabSHugh Dickins 				break;
994b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
995bda97eabSHugh Dickins 			index = start;
996bda97eabSHugh Dickins 			continue;
997bda97eabSHugh Dickins 		}
9980e499ed3SMatthew Wilcox (Oracle) 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
999b9a8a419SMatthew Wilcox (Oracle) 			folio = fbatch.folios[i];
1000bda97eabSHugh Dickins 
10017a5d0fbbSHugh Dickins 			index = indices[i];
10020e499ed3SMatthew Wilcox (Oracle) 			if (xa_is_value(folio)) {
10031635f6a7SHugh Dickins 				if (unfalloc)
10041635f6a7SHugh Dickins 					continue;
10050e499ed3SMatthew Wilcox (Oracle) 				if (shmem_free_swap(mapping, index, folio)) {
1006b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
1007b1a36650SHugh Dickins 					index--;
1008b1a36650SHugh Dickins 					break;
1009b1a36650SHugh Dickins 				}
1010b1a36650SHugh Dickins 				nr_swaps_freed++;
10117a5d0fbbSHugh Dickins 				continue;
10127a5d0fbbSHugh Dickins 			}
10137a5d0fbbSHugh Dickins 
10140e499ed3SMatthew Wilcox (Oracle) 			folio_lock(folio);
1015800d8c63SKirill A. Shutemov 
10160e499ed3SMatthew Wilcox (Oracle) 			if (!unfalloc || !folio_test_uptodate(folio)) {
10170e499ed3SMatthew Wilcox (Oracle) 				if (folio_mapping(folio) != mapping) {
1018b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
10190e499ed3SMatthew Wilcox (Oracle) 					folio_unlock(folio);
1020b1a36650SHugh Dickins 					index--;
1021b1a36650SHugh Dickins 					break;
10227a5d0fbbSHugh Dickins 				}
10230e499ed3SMatthew Wilcox (Oracle) 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
10240e499ed3SMatthew Wilcox (Oracle) 						folio);
10250e499ed3SMatthew Wilcox (Oracle) 				truncate_inode_folio(mapping, folio);
102671725ed1SHugh Dickins 			}
1027b9a8a419SMatthew Wilcox (Oracle) 			index = folio->index + folio_nr_pages(folio) - 1;
10280e499ed3SMatthew Wilcox (Oracle) 			folio_unlock(folio);
1029bda97eabSHugh Dickins 		}
10300e499ed3SMatthew Wilcox (Oracle) 		folio_batch_remove_exceptionals(&fbatch);
10310e499ed3SMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
1032bda97eabSHugh Dickins 		index++;
1033bda97eabSHugh Dickins 	}
103494c1e62dSHugh Dickins 
10354595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
10367a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
10371da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
10384595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
10391635f6a7SHugh Dickins }
10401da177e4SLinus Torvalds 
10411635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
10421635f6a7SHugh Dickins {
10431635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
1044078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
10451da177e4SLinus Torvalds }
104694c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
10471da177e4SLinus Torvalds 
1048549c7297SChristian Brauner static int shmem_getattr(struct user_namespace *mnt_userns,
1049549c7297SChristian Brauner 			 const struct path *path, struct kstat *stat,
1050a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
105144a30220SYu Zhao {
1052a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
105344a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
105444a30220SYu Zhao 
1055d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
10564595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
105744a30220SYu Zhao 		shmem_recalc_inode(inode);
10584595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1059d0424c42SHugh Dickins 	}
10600d56a451SChristian Brauner 	generic_fillattr(&init_user_ns, inode, stat);
106189fdcd26SYang Shi 
1062a7fddc36SHugh Dickins 	if (shmem_is_huge(NULL, inode, 0))
106389fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
106489fdcd26SYang Shi 
1065f7cd16a5SXavier Roche 	if (request_mask & STATX_BTIME) {
1066f7cd16a5SXavier Roche 		stat->result_mask |= STATX_BTIME;
1067f7cd16a5SXavier Roche 		stat->btime.tv_sec = info->i_crtime.tv_sec;
1068f7cd16a5SXavier Roche 		stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1069f7cd16a5SXavier Roche 	}
1070f7cd16a5SXavier Roche 
107144a30220SYu Zhao 	return 0;
107244a30220SYu Zhao }
107344a30220SYu Zhao 
1074549c7297SChristian Brauner static int shmem_setattr(struct user_namespace *mnt_userns,
1075549c7297SChristian Brauner 			 struct dentry *dentry, struct iattr *attr)
10761da177e4SLinus Torvalds {
107775c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
107840e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
10791da177e4SLinus Torvalds 	int error;
10801da177e4SLinus Torvalds 
10812f221d6fSChristian Brauner 	error = setattr_prepare(&init_user_ns, dentry, attr);
1082db78b877SChristoph Hellwig 	if (error)
1083db78b877SChristoph Hellwig 		return error;
1084db78b877SChristoph Hellwig 
108594c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
108694c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
108794c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
10883889e6e7Snpiggin@suse.de 
10899608703eSJan Kara 		/* protected by i_rwsem */
109040e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
109140e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
109240e041a2SDavid Herrmann 			return -EPERM;
109340e041a2SDavid Herrmann 
109494c1e62dSHugh Dickins 		if (newsize != oldsize) {
109577142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
109677142517SKonstantin Khlebnikov 					oldsize, newsize);
109777142517SKonstantin Khlebnikov 			if (error)
109877142517SKonstantin Khlebnikov 				return error;
109994c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1100078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
110194c1e62dSHugh Dickins 		}
1102afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
110394c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1104d0424c42SHugh Dickins 			if (oldsize > holebegin)
1105d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1106d0424c42SHugh Dickins 							holebegin, 0, 1);
1107d0424c42SHugh Dickins 			if (info->alloced)
1108d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1109d0424c42SHugh Dickins 							newsize, (loff_t)-1);
111094c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1111d0424c42SHugh Dickins 			if (oldsize > holebegin)
1112d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1113d0424c42SHugh Dickins 							holebegin, 0, 1);
111494c1e62dSHugh Dickins 		}
11151da177e4SLinus Torvalds 	}
11161da177e4SLinus Torvalds 
11172f221d6fSChristian Brauner 	setattr_copy(&init_user_ns, inode, attr);
1118db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1119e65ce2a5SChristian Brauner 		error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
11201da177e4SLinus Torvalds 	return error;
11211da177e4SLinus Torvalds }
11221da177e4SLinus Torvalds 
11231f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
11241da177e4SLinus Torvalds {
11251da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1126779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
11271da177e4SLinus Torvalds 
112830e6a51dSHui Su 	if (shmem_mapping(inode->i_mapping)) {
11291da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
11301da177e4SLinus Torvalds 		inode->i_size = 0;
1131bc786390SHugh Dickins 		mapping_set_exiting(inode->i_mapping);
11323889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1133779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1134779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1135779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1136779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1137779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1138779750d2SKirill A. Shutemov 			}
1139779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1140779750d2SKirill A. Shutemov 		}
1141af53d3e9SHugh Dickins 		while (!list_empty(&info->swaplist)) {
1142af53d3e9SHugh Dickins 			/* Wait while shmem_unuse() is scanning this inode... */
1143af53d3e9SHugh Dickins 			wait_var_event(&info->stop_eviction,
1144af53d3e9SHugh Dickins 				       !atomic_read(&info->stop_eviction));
1145cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
1146af53d3e9SHugh Dickins 			/* ...but beware of the race if we peeked too early */
1147af53d3e9SHugh Dickins 			if (!atomic_read(&info->stop_eviction))
11481da177e4SLinus Torvalds 				list_del_init(&info->swaplist);
1149cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
11501da177e4SLinus Torvalds 		}
11513ed47db3SAl Viro 	}
1152b09e0fa4SEric Paris 
115338f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
11540f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
11555b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1156dbd5768fSJan Kara 	clear_inode(inode);
11571da177e4SLinus Torvalds }
11581da177e4SLinus Torvalds 
1159b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1160da08e9b7SMatthew Wilcox (Oracle) 				   pgoff_t start, struct folio_batch *fbatch,
1161da08e9b7SMatthew Wilcox (Oracle) 				   pgoff_t *indices, unsigned int type)
1162478922e2SMatthew Wilcox {
1163b56a2d8aSVineeth Remanan Pillai 	XA_STATE(xas, &mapping->i_pages, start);
1164da08e9b7SMatthew Wilcox (Oracle) 	struct folio *folio;
116587039546SHugh Dickins 	swp_entry_t entry;
1166478922e2SMatthew Wilcox 
1167478922e2SMatthew Wilcox 	rcu_read_lock();
1168da08e9b7SMatthew Wilcox (Oracle) 	xas_for_each(&xas, folio, ULONG_MAX) {
1169da08e9b7SMatthew Wilcox (Oracle) 		if (xas_retry(&xas, folio))
11705b9c98f3SMike Kravetz 			continue;
1171b56a2d8aSVineeth Remanan Pillai 
1172da08e9b7SMatthew Wilcox (Oracle) 		if (!xa_is_value(folio))
1173478922e2SMatthew Wilcox 			continue;
1174b56a2d8aSVineeth Remanan Pillai 
1175da08e9b7SMatthew Wilcox (Oracle) 		entry = radix_to_swp_entry(folio);
11766cec2b95SMiaohe Lin 		/*
11776cec2b95SMiaohe Lin 		 * swapin error entries can be found in the mapping. But they're
11786cec2b95SMiaohe Lin 		 * deliberately ignored here as we've done everything we can do.
11796cec2b95SMiaohe Lin 		 */
118087039546SHugh Dickins 		if (swp_type(entry) != type)
1181b56a2d8aSVineeth Remanan Pillai 			continue;
1182b56a2d8aSVineeth Remanan Pillai 
1183e384200eSHugh Dickins 		indices[folio_batch_count(fbatch)] = xas.xa_index;
1184da08e9b7SMatthew Wilcox (Oracle) 		if (!folio_batch_add(fbatch, folio))
1185da08e9b7SMatthew Wilcox (Oracle) 			break;
1186b56a2d8aSVineeth Remanan Pillai 
1187b56a2d8aSVineeth Remanan Pillai 		if (need_resched()) {
1188e21a2955SMatthew Wilcox 			xas_pause(&xas);
1189478922e2SMatthew Wilcox 			cond_resched_rcu();
1190478922e2SMatthew Wilcox 		}
1191b56a2d8aSVineeth Remanan Pillai 	}
1192478922e2SMatthew Wilcox 	rcu_read_unlock();
1193e21a2955SMatthew Wilcox 
1194da08e9b7SMatthew Wilcox (Oracle) 	return xas.xa_index;
1195b56a2d8aSVineeth Remanan Pillai }
1196b56a2d8aSVineeth Remanan Pillai 
1197b56a2d8aSVineeth Remanan Pillai /*
1198b56a2d8aSVineeth Remanan Pillai  * Move the swapped pages for an inode to page cache. Returns the count
1199b56a2d8aSVineeth Remanan Pillai  * of pages swapped in, or the error in case of failure.
1200b56a2d8aSVineeth Remanan Pillai  */
1201da08e9b7SMatthew Wilcox (Oracle) static int shmem_unuse_swap_entries(struct inode *inode,
1202da08e9b7SMatthew Wilcox (Oracle) 		struct folio_batch *fbatch, pgoff_t *indices)
1203b56a2d8aSVineeth Remanan Pillai {
1204b56a2d8aSVineeth Remanan Pillai 	int i = 0;
1205b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
1206b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1207b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1208b56a2d8aSVineeth Remanan Pillai 
1209da08e9b7SMatthew Wilcox (Oracle) 	for (i = 0; i < folio_batch_count(fbatch); i++) {
1210da08e9b7SMatthew Wilcox (Oracle) 		struct folio *folio = fbatch->folios[i];
1211b56a2d8aSVineeth Remanan Pillai 
1212da08e9b7SMatthew Wilcox (Oracle) 		if (!xa_is_value(folio))
1213b56a2d8aSVineeth Remanan Pillai 			continue;
1214da08e9b7SMatthew Wilcox (Oracle) 		error = shmem_swapin_folio(inode, indices[i],
1215da08e9b7SMatthew Wilcox (Oracle) 					  &folio, SGP_CACHE,
1216b56a2d8aSVineeth Remanan Pillai 					  mapping_gfp_mask(mapping),
1217b56a2d8aSVineeth Remanan Pillai 					  NULL, NULL);
1218b56a2d8aSVineeth Remanan Pillai 		if (error == 0) {
1219da08e9b7SMatthew Wilcox (Oracle) 			folio_unlock(folio);
1220da08e9b7SMatthew Wilcox (Oracle) 			folio_put(folio);
1221b56a2d8aSVineeth Remanan Pillai 			ret++;
1222b56a2d8aSVineeth Remanan Pillai 		}
1223b56a2d8aSVineeth Remanan Pillai 		if (error == -ENOMEM)
1224b56a2d8aSVineeth Remanan Pillai 			break;
1225b56a2d8aSVineeth Remanan Pillai 		error = 0;
1226b56a2d8aSVineeth Remanan Pillai 	}
1227b56a2d8aSVineeth Remanan Pillai 	return error ? error : ret;
1228478922e2SMatthew Wilcox }
1229478922e2SMatthew Wilcox 
123046f65ec1SHugh Dickins /*
123146f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
123246f65ec1SHugh Dickins  */
123310a9c496SChristoph Hellwig static int shmem_unuse_inode(struct inode *inode, unsigned int type)
12341da177e4SLinus Torvalds {
1235b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1236b56a2d8aSVineeth Remanan Pillai 	pgoff_t start = 0;
1237da08e9b7SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
1238b56a2d8aSVineeth Remanan Pillai 	pgoff_t indices[PAGEVEC_SIZE];
1239b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
12401da177e4SLinus Torvalds 
1241b56a2d8aSVineeth Remanan Pillai 	do {
1242da08e9b7SMatthew Wilcox (Oracle) 		folio_batch_init(&fbatch);
1243da08e9b7SMatthew Wilcox (Oracle) 		shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1244da08e9b7SMatthew Wilcox (Oracle) 		if (folio_batch_count(&fbatch) == 0) {
1245b56a2d8aSVineeth Remanan Pillai 			ret = 0;
1246778dd893SHugh Dickins 			break;
1247b56a2d8aSVineeth Remanan Pillai 		}
1248b56a2d8aSVineeth Remanan Pillai 
1249da08e9b7SMatthew Wilcox (Oracle) 		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1250b56a2d8aSVineeth Remanan Pillai 		if (ret < 0)
1251b56a2d8aSVineeth Remanan Pillai 			break;
1252b56a2d8aSVineeth Remanan Pillai 
1253da08e9b7SMatthew Wilcox (Oracle) 		start = indices[folio_batch_count(&fbatch) - 1];
1254b56a2d8aSVineeth Remanan Pillai 	} while (true);
1255b56a2d8aSVineeth Remanan Pillai 
1256b56a2d8aSVineeth Remanan Pillai 	return ret;
1257b56a2d8aSVineeth Remanan Pillai }
1258b56a2d8aSVineeth Remanan Pillai 
1259b56a2d8aSVineeth Remanan Pillai /*
1260b56a2d8aSVineeth Remanan Pillai  * Read all the shared memory data that resides in the swap
1261b56a2d8aSVineeth Remanan Pillai  * device 'type' back into memory, so the swap device can be
1262b56a2d8aSVineeth Remanan Pillai  * unused.
1263b56a2d8aSVineeth Remanan Pillai  */
126410a9c496SChristoph Hellwig int shmem_unuse(unsigned int type)
1265b56a2d8aSVineeth Remanan Pillai {
1266b56a2d8aSVineeth Remanan Pillai 	struct shmem_inode_info *info, *next;
1267b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1268b56a2d8aSVineeth Remanan Pillai 
1269b56a2d8aSVineeth Remanan Pillai 	if (list_empty(&shmem_swaplist))
1270b56a2d8aSVineeth Remanan Pillai 		return 0;
1271b56a2d8aSVineeth Remanan Pillai 
1272b56a2d8aSVineeth Remanan Pillai 	mutex_lock(&shmem_swaplist_mutex);
1273b56a2d8aSVineeth Remanan Pillai 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1274b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped) {
1275b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1276b56a2d8aSVineeth Remanan Pillai 			continue;
1277b56a2d8aSVineeth Remanan Pillai 		}
1278af53d3e9SHugh Dickins 		/*
1279af53d3e9SHugh Dickins 		 * Drop the swaplist mutex while searching the inode for swap;
1280af53d3e9SHugh Dickins 		 * but before doing so, make sure shmem_evict_inode() will not
1281af53d3e9SHugh Dickins 		 * remove placeholder inode from swaplist, nor let it be freed
1282af53d3e9SHugh Dickins 		 * (igrab() would protect from unlink, but not from unmount).
1283af53d3e9SHugh Dickins 		 */
1284af53d3e9SHugh Dickins 		atomic_inc(&info->stop_eviction);
1285b56a2d8aSVineeth Remanan Pillai 		mutex_unlock(&shmem_swaplist_mutex);
1286b56a2d8aSVineeth Remanan Pillai 
128710a9c496SChristoph Hellwig 		error = shmem_unuse_inode(&info->vfs_inode, type);
1288b56a2d8aSVineeth Remanan Pillai 		cond_resched();
1289b56a2d8aSVineeth Remanan Pillai 
1290b56a2d8aSVineeth Remanan Pillai 		mutex_lock(&shmem_swaplist_mutex);
1291b56a2d8aSVineeth Remanan Pillai 		next = list_next_entry(info, swaplist);
1292b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped)
1293b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1294af53d3e9SHugh Dickins 		if (atomic_dec_and_test(&info->stop_eviction))
1295af53d3e9SHugh Dickins 			wake_up_var(&info->stop_eviction);
1296b56a2d8aSVineeth Remanan Pillai 		if (error)
1297b56a2d8aSVineeth Remanan Pillai 			break;
12981da177e4SLinus Torvalds 	}
1299cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1300778dd893SHugh Dickins 
1301778dd893SHugh Dickins 	return error;
13021da177e4SLinus Torvalds }
13031da177e4SLinus Torvalds 
13041da177e4SLinus Torvalds /*
13051da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
13061da177e4SLinus Torvalds  */
13071da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
13081da177e4SLinus Torvalds {
1309e2e3fdc7SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
13101da177e4SLinus Torvalds 	struct shmem_inode_info *info;
13111da177e4SLinus Torvalds 	struct address_space *mapping;
13121da177e4SLinus Torvalds 	struct inode *inode;
13136922c0c7SHugh Dickins 	swp_entry_t swap;
13146922c0c7SHugh Dickins 	pgoff_t index;
13151da177e4SLinus Torvalds 
13161e6decf3SHugh Dickins 	/*
13171e6decf3SHugh Dickins 	 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
13181e6decf3SHugh Dickins 	 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
13191e6decf3SHugh Dickins 	 * and its shmem_writeback() needs them to be split when swapping.
13201e6decf3SHugh Dickins 	 */
13211e6decf3SHugh Dickins 	if (PageTransCompound(page)) {
13221e6decf3SHugh Dickins 		/* Ensure the subpages are still dirty */
13231e6decf3SHugh Dickins 		SetPageDirty(page);
13241e6decf3SHugh Dickins 		if (split_huge_page(page) < 0)
13251e6decf3SHugh Dickins 			goto redirty;
13261e6decf3SHugh Dickins 		ClearPageDirty(page);
13271e6decf3SHugh Dickins 	}
13281e6decf3SHugh Dickins 
13291da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
13301da177e4SLinus Torvalds 	mapping = page->mapping;
13311da177e4SLinus Torvalds 	index = page->index;
13321da177e4SLinus Torvalds 	inode = mapping->host;
13331da177e4SLinus Torvalds 	info = SHMEM_I(inode);
13341da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
13351da177e4SLinus Torvalds 		goto redirty;
1336d9fe526aSHugh Dickins 	if (!total_swap_pages)
13371da177e4SLinus Torvalds 		goto redirty;
13381da177e4SLinus Torvalds 
1339d9fe526aSHugh Dickins 	/*
134097b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
134197b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
134297b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
134397b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
134497b713baSChristoph Hellwig 	 * threads or sync.
1345d9fe526aSHugh Dickins 	 */
134648f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
134748f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
134848f170fbSHugh Dickins 		goto redirty;
134948f170fbSHugh Dickins 	}
13501635f6a7SHugh Dickins 
13511635f6a7SHugh Dickins 	/*
13521635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13531635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
13541635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
13551aac1400SHugh Dickins 	 *
13561aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
13571aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
13581aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
13591aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
13601aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
13611635f6a7SHugh Dickins 	 */
13621635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
13631aac1400SHugh Dickins 		if (inode->i_private) {
13641aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
13651aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
13661aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
13671aac1400SHugh Dickins 			if (shmem_falloc &&
13688e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
13691aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
13701aac1400SHugh Dickins 			    index < shmem_falloc->next)
13711aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
13721aac1400SHugh Dickins 			else
13731aac1400SHugh Dickins 				shmem_falloc = NULL;
13741aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
13751aac1400SHugh Dickins 			if (shmem_falloc)
13761aac1400SHugh Dickins 				goto redirty;
13771aac1400SHugh Dickins 		}
13781635f6a7SHugh Dickins 		clear_highpage(page);
13791635f6a7SHugh Dickins 		flush_dcache_page(page);
13801635f6a7SHugh Dickins 		SetPageUptodate(page);
13811635f6a7SHugh Dickins 	}
13821635f6a7SHugh Dickins 
1383e2e3fdc7SMatthew Wilcox (Oracle) 	swap = folio_alloc_swap(folio);
138448f170fbSHugh Dickins 	if (!swap.val)
138548f170fbSHugh Dickins 		goto redirty;
1386d9fe526aSHugh Dickins 
1387b1dea800SHugh Dickins 	/*
1388b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
13896922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
13906922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1391b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
13926922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
13936922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1394b1dea800SHugh Dickins 	 */
1395b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
139605bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
1397b56a2d8aSVineeth Remanan Pillai 		list_add(&info->swaplist, &shmem_swaplist);
1398b1dea800SHugh Dickins 
13994afab1cdSYang Shi 	if (add_to_swap_cache(page, swap,
14003852f676SJoonsoo Kim 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
14013852f676SJoonsoo Kim 			NULL) == 0) {
14024595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1403267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1404267a4c76SHugh Dickins 		info->swapped++;
14054595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1406267a4c76SHugh Dickins 
1407aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
14086922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
14096922c0c7SHugh Dickins 
14106922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1411d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
14129fab5619SHugh Dickins 		swap_writepage(page, wbc);
14131da177e4SLinus Torvalds 		return 0;
14141da177e4SLinus Torvalds 	}
14151da177e4SLinus Torvalds 
14166922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
141775f6d6d2SMinchan Kim 	put_swap_page(page, swap);
14181da177e4SLinus Torvalds redirty:
14191da177e4SLinus Torvalds 	set_page_dirty(page);
1420d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1421d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1422d9fe526aSHugh Dickins 	unlock_page(page);
1423d9fe526aSHugh Dickins 	return 0;
14241da177e4SLinus Torvalds }
14251da177e4SLinus Torvalds 
142675edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
142771fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1428680d794bSakpm@linux-foundation.org {
1429680d794bSakpm@linux-foundation.org 	char buffer[64];
1430680d794bSakpm@linux-foundation.org 
143171fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1432095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1433095f1fc4SLee Schermerhorn 
1434a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1435095f1fc4SLee Schermerhorn 
1436095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1437680d794bSakpm@linux-foundation.org }
143871fe804bSLee Schermerhorn 
143971fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
144071fe804bSLee Schermerhorn {
144171fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
144271fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
1443bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
144471fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
144571fe804bSLee Schermerhorn 		mpol_get(mpol);
1446bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
144771fe804bSLee Schermerhorn 	}
144871fe804bSLee Schermerhorn 	return mpol;
144971fe804bSLee Schermerhorn }
145075edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
145175edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
145275edd345SHugh Dickins {
145375edd345SHugh Dickins }
145475edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
145575edd345SHugh Dickins {
145675edd345SHugh Dickins 	return NULL;
145775edd345SHugh Dickins }
145875edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
145975edd345SHugh Dickins #ifndef CONFIG_NUMA
146075edd345SHugh Dickins #define vm_policy vm_private_data
146175edd345SHugh Dickins #endif
1462680d794bSakpm@linux-foundation.org 
1463800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1464800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1465800d8c63SKirill A. Shutemov {
1466800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
14672c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1468800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1469800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1470800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1471800d8c63SKirill A. Shutemov }
1472800d8c63SKirill A. Shutemov 
1473800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1474800d8c63SKirill A. Shutemov {
1475800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1476800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1477800d8c63SKirill A. Shutemov }
1478800d8c63SKirill A. Shutemov 
147941ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
148041ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
14811da177e4SLinus Torvalds {
14821da177e4SLinus Torvalds 	struct vm_area_struct pvma;
148318a2f371SMel Gorman 	struct page *page;
14848c63ca5bSWill Deacon 	struct vm_fault vmf = {
14858c63ca5bSWill Deacon 		.vma = &pvma,
14868c63ca5bSWill Deacon 	};
14871da177e4SLinus Torvalds 
1488800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1489e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1490800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
149118a2f371SMel Gorman 
1492800d8c63SKirill A. Shutemov 	return page;
1493800d8c63SKirill A. Shutemov }
149418a2f371SMel Gorman 
149578cc8cdcSRik van Riel /*
149678cc8cdcSRik van Riel  * Make sure huge_gfp is always more limited than limit_gfp.
149778cc8cdcSRik van Riel  * Some of the flags set permissions, while others set limitations.
149878cc8cdcSRik van Riel  */
149978cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
150078cc8cdcSRik van Riel {
150178cc8cdcSRik van Riel 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
150278cc8cdcSRik van Riel 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1503187df5ddSRik van Riel 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1504187df5ddSRik van Riel 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1505187df5ddSRik van Riel 
1506187df5ddSRik van Riel 	/* Allow allocations only from the originally specified zones. */
1507187df5ddSRik van Riel 	result |= zoneflags;
150878cc8cdcSRik van Riel 
150978cc8cdcSRik van Riel 	/*
151078cc8cdcSRik van Riel 	 * Minimize the result gfp by taking the union with the deny flags,
151178cc8cdcSRik van Riel 	 * and the intersection of the allow flags.
151278cc8cdcSRik van Riel 	 */
151378cc8cdcSRik van Riel 	result |= (limit_gfp & denyflags);
151478cc8cdcSRik van Riel 	result |= (huge_gfp & limit_gfp) & allowflags;
151578cc8cdcSRik van Riel 
151678cc8cdcSRik van Riel 	return result;
151778cc8cdcSRik van Riel }
151878cc8cdcSRik van Riel 
151972827e5cSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1520800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1521800d8c63SKirill A. Shutemov {
1522800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
15237b8d046fSMatthew Wilcox 	struct address_space *mapping = info->vfs_inode.i_mapping;
15247b8d046fSMatthew Wilcox 	pgoff_t hindex;
1525dfe98499SMatthew Wilcox (Oracle) 	struct folio *folio;
1526800d8c63SKirill A. Shutemov 
15274620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
15287b8d046fSMatthew Wilcox 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
15297b8d046fSMatthew Wilcox 								XA_PRESENT))
1530800d8c63SKirill A. Shutemov 		return NULL;
1531800d8c63SKirill A. Shutemov 
1532800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1533dfe98499SMatthew Wilcox (Oracle) 	folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
1534800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1535dfe98499SMatthew Wilcox (Oracle) 	if (!folio)
1536dcdf11eeSDavid Rientjes 		count_vm_event(THP_FILE_FALLBACK);
153772827e5cSMatthew Wilcox (Oracle) 	return folio;
153818a2f371SMel Gorman }
153918a2f371SMel Gorman 
15400c023ef5SMatthew Wilcox (Oracle) static struct folio *shmem_alloc_folio(gfp_t gfp,
154118a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
154218a2f371SMel Gorman {
154318a2f371SMel Gorman 	struct vm_area_struct pvma;
15440c023ef5SMatthew Wilcox (Oracle) 	struct folio *folio;
154518a2f371SMel Gorman 
1546800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
15470c023ef5SMatthew Wilcox (Oracle) 	folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
1548800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
154918a2f371SMel Gorman 
15500c023ef5SMatthew Wilcox (Oracle) 	return folio;
155118a2f371SMel Gorman }
155218a2f371SMel Gorman 
155318a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp,
155418a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
155518a2f371SMel Gorman {
15560c023ef5SMatthew Wilcox (Oracle) 	return &shmem_alloc_folio(gfp, info, index)->page;
1557800d8c63SKirill A. Shutemov }
1558800d8c63SKirill A. Shutemov 
1559b1d0ec3aSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1560800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1561800d8c63SKirill A. Shutemov {
15620f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
156372827e5cSMatthew Wilcox (Oracle) 	struct folio *folio;
1564800d8c63SKirill A. Shutemov 	int nr;
1565800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1566800d8c63SKirill A. Shutemov 
1567396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1568800d8c63SKirill A. Shutemov 		huge = false;
1569800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1570800d8c63SKirill A. Shutemov 
15710f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1572800d8c63SKirill A. Shutemov 		goto failed;
1573800d8c63SKirill A. Shutemov 
1574800d8c63SKirill A. Shutemov 	if (huge)
157572827e5cSMatthew Wilcox (Oracle) 		folio = shmem_alloc_hugefolio(gfp, info, index);
1576800d8c63SKirill A. Shutemov 	else
157772827e5cSMatthew Wilcox (Oracle) 		folio = shmem_alloc_folio(gfp, info, index);
157872827e5cSMatthew Wilcox (Oracle) 	if (folio) {
157972827e5cSMatthew Wilcox (Oracle) 		__folio_set_locked(folio);
158072827e5cSMatthew Wilcox (Oracle) 		__folio_set_swapbacked(folio);
1581b1d0ec3aSMatthew Wilcox (Oracle) 		return folio;
158275edd345SHugh Dickins 	}
158318a2f371SMel Gorman 
1584800d8c63SKirill A. Shutemov 	err = -ENOMEM;
15850f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1586800d8c63SKirill A. Shutemov failed:
1587800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
15881da177e4SLinus Torvalds }
158971fe804bSLee Schermerhorn 
15901da177e4SLinus Torvalds /*
1591bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1592bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1593bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1594bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1595bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1596bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1597bde05d1cSHugh Dickins  *
1598bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1599bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1600bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1601bde05d1cSHugh Dickins  */
1602069d849cSMatthew Wilcox (Oracle) static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1603bde05d1cSHugh Dickins {
1604069d849cSMatthew Wilcox (Oracle) 	return folio_zonenum(folio) > gfp_zone(gfp);
1605bde05d1cSHugh Dickins }
1606bde05d1cSHugh Dickins 
1607bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1608bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1609bde05d1cSHugh Dickins {
1610bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
1611d21bba2bSMatthew Wilcox (Oracle) 	struct folio *old, *new;
1612bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1613c1cb20d4SYu Zhao 	swp_entry_t entry;
1614bde05d1cSHugh Dickins 	pgoff_t swap_index;
1615bde05d1cSHugh Dickins 	int error;
1616bde05d1cSHugh Dickins 
1617bde05d1cSHugh Dickins 	oldpage = *pagep;
1618c1cb20d4SYu Zhao 	entry.val = page_private(oldpage);
1619c1cb20d4SYu Zhao 	swap_index = swp_offset(entry);
1620bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1621bde05d1cSHugh Dickins 
1622bde05d1cSHugh Dickins 	/*
1623bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1624bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1625bde05d1cSHugh Dickins 	 */
1626bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1627bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1628bde05d1cSHugh Dickins 	if (!newpage)
1629bde05d1cSHugh Dickins 		return -ENOMEM;
1630bde05d1cSHugh Dickins 
163109cbfeafSKirill A. Shutemov 	get_page(newpage);
1632bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
16330142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1634bde05d1cSHugh Dickins 
16359956edf3SHugh Dickins 	__SetPageLocked(newpage);
16369956edf3SHugh Dickins 	__SetPageSwapBacked(newpage);
1637bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1638c1cb20d4SYu Zhao 	set_page_private(newpage, entry.val);
1639bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1640bde05d1cSHugh Dickins 
1641bde05d1cSHugh Dickins 	/*
1642bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1643bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1644bde05d1cSHugh Dickins 	 */
1645b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
164662f945b6SMatthew Wilcox 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
16470142ef6cSHugh Dickins 	if (!error) {
1648d21bba2bSMatthew Wilcox (Oracle) 		old = page_folio(oldpage);
1649d21bba2bSMatthew Wilcox (Oracle) 		new = page_folio(newpage);
1650d21bba2bSMatthew Wilcox (Oracle) 		mem_cgroup_migrate(old, new);
16510d1c2072SJohannes Weiner 		__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
16520d1c2072SJohannes Weiner 		__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
16530142ef6cSHugh Dickins 	}
1654b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1655bde05d1cSHugh Dickins 
16560142ef6cSHugh Dickins 	if (unlikely(error)) {
16570142ef6cSHugh Dickins 		/*
16580142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
16590142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
16600142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
16610142ef6cSHugh Dickins 		 */
16620142ef6cSHugh Dickins 		oldpage = newpage;
16630142ef6cSHugh Dickins 	} else {
16646058eaecSJohannes Weiner 		lru_cache_add(newpage);
16650142ef6cSHugh Dickins 		*pagep = newpage;
16660142ef6cSHugh Dickins 	}
1667bde05d1cSHugh Dickins 
1668bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1669bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1670bde05d1cSHugh Dickins 
1671bde05d1cSHugh Dickins 	unlock_page(oldpage);
167209cbfeafSKirill A. Shutemov 	put_page(oldpage);
167309cbfeafSKirill A. Shutemov 	put_page(oldpage);
16740142ef6cSHugh Dickins 	return error;
1675bde05d1cSHugh Dickins }
1676bde05d1cSHugh Dickins 
16776cec2b95SMiaohe Lin static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
16786cec2b95SMiaohe Lin 					 struct folio *folio, swp_entry_t swap)
16796cec2b95SMiaohe Lin {
16806cec2b95SMiaohe Lin 	struct address_space *mapping = inode->i_mapping;
16816cec2b95SMiaohe Lin 	struct shmem_inode_info *info = SHMEM_I(inode);
16826cec2b95SMiaohe Lin 	swp_entry_t swapin_error;
16836cec2b95SMiaohe Lin 	void *old;
16846cec2b95SMiaohe Lin 
16856cec2b95SMiaohe Lin 	swapin_error = make_swapin_error_entry(&folio->page);
16866cec2b95SMiaohe Lin 	old = xa_cmpxchg_irq(&mapping->i_pages, index,
16876cec2b95SMiaohe Lin 			     swp_to_radix_entry(swap),
16886cec2b95SMiaohe Lin 			     swp_to_radix_entry(swapin_error), 0);
16896cec2b95SMiaohe Lin 	if (old != swp_to_radix_entry(swap))
16906cec2b95SMiaohe Lin 		return;
16916cec2b95SMiaohe Lin 
16926cec2b95SMiaohe Lin 	folio_wait_writeback(folio);
16936cec2b95SMiaohe Lin 	delete_from_swap_cache(&folio->page);
16946cec2b95SMiaohe Lin 	spin_lock_irq(&info->lock);
16956cec2b95SMiaohe Lin 	/*
16966cec2b95SMiaohe Lin 	 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
16976cec2b95SMiaohe Lin 	 * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
16986cec2b95SMiaohe Lin 	 * shmem_evict_inode.
16996cec2b95SMiaohe Lin 	 */
17006cec2b95SMiaohe Lin 	info->alloced--;
17016cec2b95SMiaohe Lin 	info->swapped--;
17026cec2b95SMiaohe Lin 	shmem_recalc_inode(inode);
17036cec2b95SMiaohe Lin 	spin_unlock_irq(&info->lock);
17046cec2b95SMiaohe Lin 	swap_free(swap);
17056cec2b95SMiaohe Lin }
17066cec2b95SMiaohe Lin 
1707bde05d1cSHugh Dickins /*
1708c5bf121eSVineeth Remanan Pillai  * Swap in the page pointed to by *pagep.
1709c5bf121eSVineeth Remanan Pillai  * Caller has to make sure that *pagep contains a valid swapped page.
1710c5bf121eSVineeth Remanan Pillai  * Returns 0 and the page in pagep if success. On failure, returns the
1711af44c12fSRandy Dunlap  * error code and NULL in *pagep.
17121da177e4SLinus Torvalds  */
1713da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1714da08e9b7SMatthew Wilcox (Oracle) 			     struct folio **foliop, enum sgp_type sgp,
1715c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
17162b740303SSouptick Joarder 			     vm_fault_t *fault_type)
17171da177e4SLinus Torvalds {
17181da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
171923f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
172004f94e3fSDan Schatzberg 	struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1721b1e1ef34SYang Shi 	struct page *page;
1722da08e9b7SMatthew Wilcox (Oracle) 	struct folio *folio = NULL;
17231da177e4SLinus Torvalds 	swp_entry_t swap;
17241da177e4SLinus Torvalds 	int error;
17251da177e4SLinus Torvalds 
1726da08e9b7SMatthew Wilcox (Oracle) 	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1727da08e9b7SMatthew Wilcox (Oracle) 	swap = radix_to_swp_entry(*foliop);
1728da08e9b7SMatthew Wilcox (Oracle) 	*foliop = NULL;
172954af6042SHugh Dickins 
17306cec2b95SMiaohe Lin 	if (is_swapin_error_entry(swap))
17316cec2b95SMiaohe Lin 		return -EIO;
17326cec2b95SMiaohe Lin 
17331da177e4SLinus Torvalds 	/* Look it up and read it in.. */
1734ec560175SHuang Ying 	page = lookup_swap_cache(swap, NULL, 0);
173527ab7006SHugh Dickins 	if (!page) {
17369e18eb29SAndres Lagar-Cavilla 		/* Or update major stats only when swapin succeeds?? */
17379e18eb29SAndres Lagar-Cavilla 		if (fault_type) {
173868da9f05SHugh Dickins 			*fault_type |= VM_FAULT_MAJOR;
17399e18eb29SAndres Lagar-Cavilla 			count_vm_event(PGMAJFAULT);
17402262185cSRoman Gushchin 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
17419e18eb29SAndres Lagar-Cavilla 		}
17429e18eb29SAndres Lagar-Cavilla 		/* Here we actually start the io */
174341ffe5d5SHugh Dickins 		page = shmem_swapin(swap, gfp, info, index);
174427ab7006SHugh Dickins 		if (!page) {
17451da177e4SLinus Torvalds 			error = -ENOMEM;
174654af6042SHugh Dickins 			goto failed;
1747285b2c4fSHugh Dickins 		}
17481da177e4SLinus Torvalds 	}
1749da08e9b7SMatthew Wilcox (Oracle) 	folio = page_folio(page);
17501da177e4SLinus Torvalds 
17511da177e4SLinus Torvalds 	/* We have to do this with page locked to prevent races */
1752da08e9b7SMatthew Wilcox (Oracle) 	folio_lock(folio);
1753da08e9b7SMatthew Wilcox (Oracle) 	if (!folio_test_swapcache(folio) ||
1754da08e9b7SMatthew Wilcox (Oracle) 	    folio_swap_entry(folio).val != swap.val ||
1755d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap)) {
1756c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1757d1899228SHugh Dickins 		goto unlock;
1758bde05d1cSHugh Dickins 	}
1759da08e9b7SMatthew Wilcox (Oracle) 	if (!folio_test_uptodate(folio)) {
17601da177e4SLinus Torvalds 		error = -EIO;
176154af6042SHugh Dickins 		goto failed;
176254af6042SHugh Dickins 	}
1763da08e9b7SMatthew Wilcox (Oracle) 	folio_wait_writeback(folio);
176454af6042SHugh Dickins 
17658a84802eSSteven Price 	/*
17668a84802eSSteven Price 	 * Some architectures may have to restore extra metadata to the
1767da08e9b7SMatthew Wilcox (Oracle) 	 * folio after reading from swap.
17688a84802eSSteven Price 	 */
1769da08e9b7SMatthew Wilcox (Oracle) 	arch_swap_restore(swap, folio);
17708a84802eSSteven Price 
1771069d849cSMatthew Wilcox (Oracle) 	if (shmem_should_replace_folio(folio, gfp)) {
1772bde05d1cSHugh Dickins 		error = shmem_replace_page(&page, gfp, info, index);
1773bde05d1cSHugh Dickins 		if (error)
177454af6042SHugh Dickins 			goto failed;
17751da177e4SLinus Torvalds 	}
17761da177e4SLinus Torvalds 
1777b7dd44a1SMatthew Wilcox (Oracle) 	error = shmem_add_to_page_cache(folio, mapping, index,
17783fea5a49SJohannes Weiner 					swp_to_radix_entry(swap), gfp,
17793fea5a49SJohannes Weiner 					charge_mm);
178054af6042SHugh Dickins 	if (error)
178154af6042SHugh Dickins 		goto failed;
178254af6042SHugh Dickins 
17834595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
178454af6042SHugh Dickins 	info->swapped--;
178554af6042SHugh Dickins 	shmem_recalc_inode(inode);
17864595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
178727ab7006SHugh Dickins 
178866d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1789da08e9b7SMatthew Wilcox (Oracle) 		folio_mark_accessed(folio);
179066d2f4d2SHugh Dickins 
1791da08e9b7SMatthew Wilcox (Oracle) 	delete_from_swap_cache(&folio->page);
1792da08e9b7SMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
179327ab7006SHugh Dickins 	swap_free(swap);
179427ab7006SHugh Dickins 
1795da08e9b7SMatthew Wilcox (Oracle) 	*foliop = folio;
1796c5bf121eSVineeth Remanan Pillai 	return 0;
1797c5bf121eSVineeth Remanan Pillai failed:
1798c5bf121eSVineeth Remanan Pillai 	if (!shmem_confirm_swap(mapping, index, swap))
1799c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
18006cec2b95SMiaohe Lin 	if (error == -EIO)
18016cec2b95SMiaohe Lin 		shmem_set_folio_swapin_error(inode, index, folio, swap);
1802c5bf121eSVineeth Remanan Pillai unlock:
1803da08e9b7SMatthew Wilcox (Oracle) 	if (folio) {
1804da08e9b7SMatthew Wilcox (Oracle) 		folio_unlock(folio);
1805da08e9b7SMatthew Wilcox (Oracle) 		folio_put(folio);
1806c5bf121eSVineeth Remanan Pillai 	}
1807c5bf121eSVineeth Remanan Pillai 
1808c5bf121eSVineeth Remanan Pillai 	return error;
1809c5bf121eSVineeth Remanan Pillai }
1810c5bf121eSVineeth Remanan Pillai 
1811c5bf121eSVineeth Remanan Pillai /*
1812c5bf121eSVineeth Remanan Pillai  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1813c5bf121eSVineeth Remanan Pillai  *
1814c5bf121eSVineeth Remanan Pillai  * If we allocate a new one we do not mark it dirty. That's up to the
1815c5bf121eSVineeth Remanan Pillai  * vm. If we swap it in we mark it dirty since we also free the swap
1816c5bf121eSVineeth Remanan Pillai  * entry since a page cannot live in both the swap and page cache.
1817c5bf121eSVineeth Remanan Pillai  *
1818c949b097SAxel Rasmussen  * vma, vmf, and fault_type are only supplied by shmem_fault:
1819c5bf121eSVineeth Remanan Pillai  * otherwise they are NULL.
1820c5bf121eSVineeth Remanan Pillai  */
1821c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1822c5bf121eSVineeth Remanan Pillai 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1823c5bf121eSVineeth Remanan Pillai 	struct vm_area_struct *vma, struct vm_fault *vmf,
1824c5bf121eSVineeth Remanan Pillai 			vm_fault_t *fault_type)
1825c5bf121eSVineeth Remanan Pillai {
1826c5bf121eSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1827c5bf121eSVineeth Remanan Pillai 	struct shmem_inode_info *info = SHMEM_I(inode);
1828c5bf121eSVineeth Remanan Pillai 	struct shmem_sb_info *sbinfo;
1829c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm;
1830b7dd44a1SMatthew Wilcox (Oracle) 	struct folio *folio;
1831c5bf121eSVineeth Remanan Pillai 	pgoff_t hindex = index;
1832164cc4feSRik van Riel 	gfp_t huge_gfp;
1833c5bf121eSVineeth Remanan Pillai 	int error;
1834c5bf121eSVineeth Remanan Pillai 	int once = 0;
1835c5bf121eSVineeth Remanan Pillai 	int alloced = 0;
1836c5bf121eSVineeth Remanan Pillai 
1837c5bf121eSVineeth Remanan Pillai 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1838c5bf121eSVineeth Remanan Pillai 		return -EFBIG;
1839c5bf121eSVineeth Remanan Pillai repeat:
1840c5bf121eSVineeth Remanan Pillai 	if (sgp <= SGP_CACHE &&
1841c5bf121eSVineeth Remanan Pillai 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1842c5bf121eSVineeth Remanan Pillai 		return -EINVAL;
1843c5bf121eSVineeth Remanan Pillai 	}
1844c5bf121eSVineeth Remanan Pillai 
1845c5bf121eSVineeth Remanan Pillai 	sbinfo = SHMEM_SB(inode->i_sb);
184604f94e3fSDan Schatzberg 	charge_mm = vma ? vma->vm_mm : NULL;
1847c5bf121eSVineeth Remanan Pillai 
1848b1d0ec3aSMatthew Wilcox (Oracle) 	folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0);
1849b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio && vma && userfaultfd_minor(vma)) {
1850b1d0ec3aSMatthew Wilcox (Oracle) 		if (!xa_is_value(folio)) {
1851b1d0ec3aSMatthew Wilcox (Oracle) 			folio_unlock(folio);
1852b1d0ec3aSMatthew Wilcox (Oracle) 			folio_put(folio);
1853c949b097SAxel Rasmussen 		}
1854c949b097SAxel Rasmussen 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1855c949b097SAxel Rasmussen 		return 0;
1856c949b097SAxel Rasmussen 	}
1857c949b097SAxel Rasmussen 
1858b1d0ec3aSMatthew Wilcox (Oracle) 	if (xa_is_value(folio)) {
1859da08e9b7SMatthew Wilcox (Oracle) 		error = shmem_swapin_folio(inode, index, &folio,
1860c5bf121eSVineeth Remanan Pillai 					  sgp, gfp, vma, fault_type);
1861c5bf121eSVineeth Remanan Pillai 		if (error == -EEXIST)
1862c5bf121eSVineeth Remanan Pillai 			goto repeat;
1863c5bf121eSVineeth Remanan Pillai 
1864da08e9b7SMatthew Wilcox (Oracle) 		*pagep = &folio->page;
1865c5bf121eSVineeth Remanan Pillai 		return error;
1866c5bf121eSVineeth Remanan Pillai 	}
1867c5bf121eSVineeth Remanan Pillai 
1868b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio) {
1869b1d0ec3aSMatthew Wilcox (Oracle) 		hindex = folio->index;
1870acdd9f8eSHugh Dickins 		if (sgp == SGP_WRITE)
1871b1d0ec3aSMatthew Wilcox (Oracle) 			folio_mark_accessed(folio);
1872b1d0ec3aSMatthew Wilcox (Oracle) 		if (folio_test_uptodate(folio))
1873acdd9f8eSHugh Dickins 			goto out;
1874acdd9f8eSHugh Dickins 		/* fallocated page */
1875c5bf121eSVineeth Remanan Pillai 		if (sgp != SGP_READ)
1876c5bf121eSVineeth Remanan Pillai 			goto clear;
1877b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
1878b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
1879c5bf121eSVineeth Remanan Pillai 	}
1880c5bf121eSVineeth Remanan Pillai 
1881c5bf121eSVineeth Remanan Pillai 	/*
1882acdd9f8eSHugh Dickins 	 * SGP_READ: succeed on hole, with NULL page, letting caller zero.
1883acdd9f8eSHugh Dickins 	 * SGP_NOALLOC: fail on hole, with NULL page, letting caller fail.
1884acdd9f8eSHugh Dickins 	 */
1885acdd9f8eSHugh Dickins 	*pagep = NULL;
1886acdd9f8eSHugh Dickins 	if (sgp == SGP_READ)
1887acdd9f8eSHugh Dickins 		return 0;
1888acdd9f8eSHugh Dickins 	if (sgp == SGP_NOALLOC)
1889acdd9f8eSHugh Dickins 		return -ENOENT;
1890acdd9f8eSHugh Dickins 
1891acdd9f8eSHugh Dickins 	/*
1892acdd9f8eSHugh Dickins 	 * Fast cache lookup and swap lookup did not find it: allocate.
1893c5bf121eSVineeth Remanan Pillai 	 */
1894c5bf121eSVineeth Remanan Pillai 
1895cfda0526SMike Rapoport 	if (vma && userfaultfd_missing(vma)) {
1896cfda0526SMike Rapoport 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1897cfda0526SMike Rapoport 		return 0;
1898cfda0526SMike Rapoport 	}
1899cfda0526SMike Rapoport 
19005e6e5a12SHugh Dickins 	if (!shmem_is_huge(vma, inode, index))
1901800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
190227d80fa2SKees Cook 
1903164cc4feSRik van Riel 	huge_gfp = vma_thp_gfp_mask(vma);
190478cc8cdcSRik van Riel 	huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1905b1d0ec3aSMatthew Wilcox (Oracle) 	folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
1906b1d0ec3aSMatthew Wilcox (Oracle) 	if (IS_ERR(folio)) {
1907c5bf121eSVineeth Remanan Pillai alloc_nohuge:
1908b1d0ec3aSMatthew Wilcox (Oracle) 		folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
190954af6042SHugh Dickins 	}
1910b1d0ec3aSMatthew Wilcox (Oracle) 	if (IS_ERR(folio)) {
1911779750d2SKirill A. Shutemov 		int retry = 5;
1912c5bf121eSVineeth Remanan Pillai 
1913b1d0ec3aSMatthew Wilcox (Oracle) 		error = PTR_ERR(folio);
1914b1d0ec3aSMatthew Wilcox (Oracle) 		folio = NULL;
1915779750d2SKirill A. Shutemov 		if (error != -ENOSPC)
1916c5bf121eSVineeth Remanan Pillai 			goto unlock;
1917779750d2SKirill A. Shutemov 		/*
1918c5bf121eSVineeth Remanan Pillai 		 * Try to reclaim some space by splitting a huge page
1919779750d2SKirill A. Shutemov 		 * beyond i_size on the filesystem.
1920779750d2SKirill A. Shutemov 		 */
1921779750d2SKirill A. Shutemov 		while (retry--) {
1922779750d2SKirill A. Shutemov 			int ret;
1923c5bf121eSVineeth Remanan Pillai 
1924779750d2SKirill A. Shutemov 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1925779750d2SKirill A. Shutemov 			if (ret == SHRINK_STOP)
1926779750d2SKirill A. Shutemov 				break;
1927779750d2SKirill A. Shutemov 			if (ret)
1928779750d2SKirill A. Shutemov 				goto alloc_nohuge;
1929779750d2SKirill A. Shutemov 		}
1930c5bf121eSVineeth Remanan Pillai 		goto unlock;
1931800d8c63SKirill A. Shutemov 	}
1932800d8c63SKirill A. Shutemov 
1933b1d0ec3aSMatthew Wilcox (Oracle) 	hindex = round_down(index, folio_nr_pages(folio));
1934800d8c63SKirill A. Shutemov 
193566d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1936b1d0ec3aSMatthew Wilcox (Oracle) 		__folio_set_referenced(folio);
193766d2f4d2SHugh Dickins 
1938b7dd44a1SMatthew Wilcox (Oracle) 	error = shmem_add_to_page_cache(folio, mapping, hindex,
19393fea5a49SJohannes Weiner 					NULL, gfp & GFP_RECLAIM_MASK,
19403fea5a49SJohannes Weiner 					charge_mm);
19413fea5a49SJohannes Weiner 	if (error)
1942800d8c63SKirill A. Shutemov 		goto unacct;
1943b1d0ec3aSMatthew Wilcox (Oracle) 	folio_add_lru(folio);
194454af6042SHugh Dickins 
19454595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
1946b1d0ec3aSMatthew Wilcox (Oracle) 	info->alloced += folio_nr_pages(folio);
1947fa020a2bSAndrew Morton 	inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
194854af6042SHugh Dickins 	shmem_recalc_inode(inode);
19494595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
19501635f6a7SHugh Dickins 	alloced = true;
195154af6042SHugh Dickins 
1952b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio_test_pmd_mappable(folio) &&
1953779750d2SKirill A. Shutemov 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1954779750d2SKirill A. Shutemov 			hindex + HPAGE_PMD_NR - 1) {
1955779750d2SKirill A. Shutemov 		/*
1956779750d2SKirill A. Shutemov 		 * Part of the huge page is beyond i_size: subject
1957779750d2SKirill A. Shutemov 		 * to shrink under memory pressure.
1958779750d2SKirill A. Shutemov 		 */
1959779750d2SKirill A. Shutemov 		spin_lock(&sbinfo->shrinklist_lock);
1960d041353dSCong Wang 		/*
1961d041353dSCong Wang 		 * _careful to defend against unlocked access to
1962d041353dSCong Wang 		 * ->shrink_list in shmem_unused_huge_shrink()
1963d041353dSCong Wang 		 */
1964d041353dSCong Wang 		if (list_empty_careful(&info->shrinklist)) {
1965779750d2SKirill A. Shutemov 			list_add_tail(&info->shrinklist,
1966779750d2SKirill A. Shutemov 				      &sbinfo->shrinklist);
1967779750d2SKirill A. Shutemov 			sbinfo->shrinklist_len++;
1968779750d2SKirill A. Shutemov 		}
1969779750d2SKirill A. Shutemov 		spin_unlock(&sbinfo->shrinklist_lock);
1970779750d2SKirill A. Shutemov 	}
1971779750d2SKirill A. Shutemov 
1972ec9516fbSHugh Dickins 	/*
19731635f6a7SHugh Dickins 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
19741635f6a7SHugh Dickins 	 */
19751635f6a7SHugh Dickins 	if (sgp == SGP_FALLOC)
19761635f6a7SHugh Dickins 		sgp = SGP_WRITE;
19771635f6a7SHugh Dickins clear:
19781635f6a7SHugh Dickins 	/*
19791635f6a7SHugh Dickins 	 * Let SGP_WRITE caller clear ends if write does not fill page;
19801635f6a7SHugh Dickins 	 * but SGP_FALLOC on a page fallocated earlier must initialize
19811635f6a7SHugh Dickins 	 * it now, lest undo on failure cancel our earlier guarantee.
1982ec9516fbSHugh Dickins 	 */
1983b1d0ec3aSMatthew Wilcox (Oracle) 	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
1984b1d0ec3aSMatthew Wilcox (Oracle) 		long i, n = folio_nr_pages(folio);
1985800d8c63SKirill A. Shutemov 
1986b1d0ec3aSMatthew Wilcox (Oracle) 		for (i = 0; i < n; i++)
1987b1d0ec3aSMatthew Wilcox (Oracle) 			clear_highpage(folio_page(folio, i));
1988b1d0ec3aSMatthew Wilcox (Oracle) 		flush_dcache_folio(folio);
1989b1d0ec3aSMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
1990ec9516fbSHugh Dickins 	}
1991bde05d1cSHugh Dickins 
199254af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
199375edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
199409cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1995267a4c76SHugh Dickins 		if (alloced) {
1996b1d0ec3aSMatthew Wilcox (Oracle) 			folio_clear_dirty(folio);
1997b1d0ec3aSMatthew Wilcox (Oracle) 			filemap_remove_folio(folio);
19984595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1999267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
20004595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
2001267a4c76SHugh Dickins 		}
200254af6042SHugh Dickins 		error = -EINVAL;
2003267a4c76SHugh Dickins 		goto unlock;
2004ff36b801SShaohua Li 	}
200563ec1973SMatthew Wilcox (Oracle) out:
2006b1d0ec3aSMatthew Wilcox (Oracle) 	*pagep = folio_page(folio, index - hindex);
200754af6042SHugh Dickins 	return 0;
2008d00806b1SNick Piggin 
2009d0217ac0SNick Piggin 	/*
201054af6042SHugh Dickins 	 * Error recovery.
20111da177e4SLinus Torvalds 	 */
201254af6042SHugh Dickins unacct:
2013b1d0ec3aSMatthew Wilcox (Oracle) 	shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
2014800d8c63SKirill A. Shutemov 
2015b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio_test_large(folio)) {
2016b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
2017b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
2018800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
2019800d8c63SKirill A. Shutemov 	}
2020d1899228SHugh Dickins unlock:
2021b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio) {
2022b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
2023b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
202454af6042SHugh Dickins 	}
202554af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
20264595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
202754af6042SHugh Dickins 		shmem_recalc_inode(inode);
20284595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
20291da177e4SLinus Torvalds 		goto repeat;
2030d8dc74f2SAdrian Bunk 	}
20317f4446eeSMatthew Wilcox 	if (error == -EEXIST)
203254af6042SHugh Dickins 		goto repeat;
203354af6042SHugh Dickins 	return error;
20341da177e4SLinus Torvalds }
20351da177e4SLinus Torvalds 
203610d20bd2SLinus Torvalds /*
203710d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
203810d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
203910d20bd2SLinus Torvalds  * target.
204010d20bd2SLinus Torvalds  */
2041ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
204210d20bd2SLinus Torvalds {
204310d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
20442055da97SIngo Molnar 	list_del_init(&wait->entry);
204510d20bd2SLinus Torvalds 	return ret;
204610d20bd2SLinus Torvalds }
204710d20bd2SLinus Torvalds 
204820acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
20491da177e4SLinus Torvalds {
205011bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
2051496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
20529e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
205320acce67SSouptick Joarder 	int err;
205420acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
20551da177e4SLinus Torvalds 
2056f00cdc6dSHugh Dickins 	/*
2057f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
2058f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
20599608703eSJan Kara 	 * locks writers out with its hold on i_rwsem.  So refrain from
20608e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
20618e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
20628e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
20638e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
20648e205f77SHugh Dickins 	 *
20658e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
20668e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
20678e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
20688e205f77SHugh Dickins 	 *
20698e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
20709608703eSJan Kara 	 * standard mutex or completion: but we cannot take i_rwsem in fault,
20718e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
2072f00cdc6dSHugh Dickins 	 */
2073f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
2074f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
2075f00cdc6dSHugh Dickins 
2076f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2077f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
20788e205f77SHugh Dickins 		if (shmem_falloc &&
20798e205f77SHugh Dickins 		    shmem_falloc->waitq &&
20808e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
20818e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
20828897c1b1SKirill A. Shutemov 			struct file *fpin;
20838e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
208410d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
20858e205f77SHugh Dickins 
20868e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
20878897c1b1SKirill A. Shutemov 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
20888897c1b1SKirill A. Shutemov 			if (fpin)
20898e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
20908e205f77SHugh Dickins 
20918e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
20928e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
20938e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
20948e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20958e205f77SHugh Dickins 			schedule();
20968e205f77SHugh Dickins 
20978e205f77SHugh Dickins 			/*
20988e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
20998e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
21008e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
21018e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
21028e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
21038e205f77SHugh Dickins 			 */
21048e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
21058e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
21068e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
21078897c1b1SKirill A. Shutemov 
21088897c1b1SKirill A. Shutemov 			if (fpin)
21098897c1b1SKirill A. Shutemov 				fput(fpin);
21108e205f77SHugh Dickins 			return ret;
2111f00cdc6dSHugh Dickins 		}
21128e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2113f00cdc6dSHugh Dickins 	}
2114f00cdc6dSHugh Dickins 
21155e6e5a12SHugh Dickins 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
2116cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
211720acce67SSouptick Joarder 	if (err)
211820acce67SSouptick Joarder 		return vmf_error(err);
211968da9f05SHugh Dickins 	return ret;
21201da177e4SLinus Torvalds }
21211da177e4SLinus Torvalds 
2122c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2123c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2124c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2125c01d5b30SHugh Dickins {
2126c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2127c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2128c01d5b30SHugh Dickins 	unsigned long addr;
2129c01d5b30SHugh Dickins 	unsigned long offset;
2130c01d5b30SHugh Dickins 	unsigned long inflated_len;
2131c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2132c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2133c01d5b30SHugh Dickins 
2134c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2135c01d5b30SHugh Dickins 		return -ENOMEM;
2136c01d5b30SHugh Dickins 
2137c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2138c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2139c01d5b30SHugh Dickins 
2140396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2141c01d5b30SHugh Dickins 		return addr;
2142c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2143c01d5b30SHugh Dickins 		return addr;
2144c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2145c01d5b30SHugh Dickins 		return addr;
2146c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2147c01d5b30SHugh Dickins 		return addr;
2148c01d5b30SHugh Dickins 
2149c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2150c01d5b30SHugh Dickins 		return addr;
2151c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2152c01d5b30SHugh Dickins 		return addr;
2153c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2154c01d5b30SHugh Dickins 		return addr;
2155c01d5b30SHugh Dickins 	/*
2156c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2157c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
215899158997SKirill A. Shutemov 	 * But if caller specified an address hint and we allocated area there
215999158997SKirill A. Shutemov 	 * successfully, respect that as before.
2160c01d5b30SHugh Dickins 	 */
216199158997SKirill A. Shutemov 	if (uaddr == addr)
2162c01d5b30SHugh Dickins 		return addr;
2163c01d5b30SHugh Dickins 
2164c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2165c01d5b30SHugh Dickins 		struct super_block *sb;
2166c01d5b30SHugh Dickins 
2167c01d5b30SHugh Dickins 		if (file) {
2168c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2169c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2170c01d5b30SHugh Dickins 		} else {
2171c01d5b30SHugh Dickins 			/*
2172c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2173c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2174c01d5b30SHugh Dickins 			 */
2175c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2176c01d5b30SHugh Dickins 				return addr;
2177c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2178c01d5b30SHugh Dickins 		}
21793089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2180c01d5b30SHugh Dickins 			return addr;
2181c01d5b30SHugh Dickins 	}
2182c01d5b30SHugh Dickins 
2183c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2184c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2185c01d5b30SHugh Dickins 		return addr;
2186c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2187c01d5b30SHugh Dickins 		return addr;
2188c01d5b30SHugh Dickins 
2189c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2190c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2191c01d5b30SHugh Dickins 		return addr;
2192c01d5b30SHugh Dickins 	if (inflated_len < len)
2193c01d5b30SHugh Dickins 		return addr;
2194c01d5b30SHugh Dickins 
219599158997SKirill A. Shutemov 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2196c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2197c01d5b30SHugh Dickins 		return addr;
2198c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2199c01d5b30SHugh Dickins 		return addr;
2200c01d5b30SHugh Dickins 
2201c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2202c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2203c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2204c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2205c01d5b30SHugh Dickins 
2206c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2207c01d5b30SHugh Dickins 		return addr;
2208c01d5b30SHugh Dickins 	return inflated_addr;
2209c01d5b30SHugh Dickins }
2210c01d5b30SHugh Dickins 
22111da177e4SLinus Torvalds #ifdef CONFIG_NUMA
221241ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
22131da177e4SLinus Torvalds {
2214496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
221541ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
22161da177e4SLinus Torvalds }
22171da177e4SLinus Torvalds 
2218d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2219d8dc74f2SAdrian Bunk 					  unsigned long addr)
22201da177e4SLinus Torvalds {
2221496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
222241ffe5d5SHugh Dickins 	pgoff_t index;
22231da177e4SLinus Torvalds 
222441ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
222541ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
22261da177e4SLinus Torvalds }
22271da177e4SLinus Torvalds #endif
22281da177e4SLinus Torvalds 
2229d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
22301da177e4SLinus Torvalds {
2231496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
22321da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
22331da177e4SLinus Torvalds 	int retval = -ENOMEM;
22341da177e4SLinus Torvalds 
2235ea0dfeb4SHugh Dickins 	/*
2236ea0dfeb4SHugh Dickins 	 * What serializes the accesses to info->flags?
2237ea0dfeb4SHugh Dickins 	 * ipc_lock_object() when called from shmctl_do_lock(),
2238ea0dfeb4SHugh Dickins 	 * no serialization needed when called from shm_destroy().
2239ea0dfeb4SHugh Dickins 	 */
22401da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
2241d7c9e99aSAlexey Gladkov 		if (!user_shm_lock(inode->i_size, ucounts))
22421da177e4SLinus Torvalds 			goto out_nomem;
22431da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
224489e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
22451da177e4SLinus Torvalds 	}
2246d7c9e99aSAlexey Gladkov 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2247d7c9e99aSAlexey Gladkov 		user_shm_unlock(inode->i_size, ucounts);
22481da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
224989e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
22501da177e4SLinus Torvalds 	}
22511da177e4SLinus Torvalds 	retval = 0;
225289e004eaSLee Schermerhorn 
22531da177e4SLinus Torvalds out_nomem:
22541da177e4SLinus Torvalds 	return retval;
22551da177e4SLinus Torvalds }
22561da177e4SLinus Torvalds 
22579b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
22581da177e4SLinus Torvalds {
2259ab3948f5SJoel Fernandes (Google) 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
226022247efdSPeter Xu 	int ret;
2261ab3948f5SJoel Fernandes (Google) 
226222247efdSPeter Xu 	ret = seal_check_future_write(info->seals, vma);
226322247efdSPeter Xu 	if (ret)
226422247efdSPeter Xu 		return ret;
2265ab3948f5SJoel Fernandes (Google) 
226651b0bff2SCatalin Marinas 	/* arm64 - allow memory tagging on RAM-based files */
226751b0bff2SCatalin Marinas 	vma->vm_flags |= VM_MTE_ALLOWED;
226851b0bff2SCatalin Marinas 
22691da177e4SLinus Torvalds 	file_accessed(file);
22701da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
22711da177e4SLinus Torvalds 	return 0;
22721da177e4SLinus Torvalds }
22731da177e4SLinus Torvalds 
2274454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
227509208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
22761da177e4SLinus Torvalds {
22771da177e4SLinus Torvalds 	struct inode *inode;
22781da177e4SLinus Torvalds 	struct shmem_inode_info *info;
22791da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2280e809d5f0SChris Down 	ino_t ino;
22811da177e4SLinus Torvalds 
2282e809d5f0SChris Down 	if (shmem_reserve_inode(sb, &ino))
22831da177e4SLinus Torvalds 		return NULL;
22841da177e4SLinus Torvalds 
22851da177e4SLinus Torvalds 	inode = new_inode(sb);
22861da177e4SLinus Torvalds 	if (inode) {
2287e809d5f0SChris Down 		inode->i_ino = ino;
228821cb47beSChristian Brauner 		inode_init_owner(&init_user_ns, inode, dir, mode);
22891da177e4SLinus Torvalds 		inode->i_blocks = 0;
2290078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
229146c9a946SArnd Bergmann 		inode->i_generation = prandom_u32();
22921da177e4SLinus Torvalds 		info = SHMEM_I(inode);
22931da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
22941da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
2295af53d3e9SHugh Dickins 		atomic_set(&info->stop_eviction, 0);
229640e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
22970b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2298f7cd16a5SXavier Roche 		info->i_crtime = inode->i_mtime;
2299779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
23001da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
230138f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
230272c04902SAl Viro 		cache_no_acl(inode);
2303ff36da69SMatthew Wilcox (Oracle) 		mapping_set_large_folios(inode->i_mapping);
23041da177e4SLinus Torvalds 
23051da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
23061da177e4SLinus Torvalds 		default:
230739f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
23081da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
23091da177e4SLinus Torvalds 			break;
23101da177e4SLinus Torvalds 		case S_IFREG:
231114fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
23121da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
23131da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
231471fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
231571fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
23161da177e4SLinus Torvalds 			break;
23171da177e4SLinus Torvalds 		case S_IFDIR:
2318d8c76e6fSDave Hansen 			inc_nlink(inode);
23191da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
23201da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
23211da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
23221da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
23231da177e4SLinus Torvalds 			break;
23241da177e4SLinus Torvalds 		case S_IFLNK:
23251da177e4SLinus Torvalds 			/*
23261da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
23271da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
23281da177e4SLinus Torvalds 			 */
232971fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
23301da177e4SLinus Torvalds 			break;
23311da177e4SLinus Torvalds 		}
2332b45d71fbSJoel Fernandes (Google) 
2333b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
23345b04c689SPavel Emelyanov 	} else
23355b04c689SPavel Emelyanov 		shmem_free_inode(sb);
23361da177e4SLinus Torvalds 	return inode;
23371da177e4SLinus Torvalds }
23381da177e4SLinus Torvalds 
23393460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
23403460f6e5SAxel Rasmussen int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
23414c27fe4cSMike Rapoport 			   pmd_t *dst_pmd,
23424c27fe4cSMike Rapoport 			   struct vm_area_struct *dst_vma,
23434c27fe4cSMike Rapoport 			   unsigned long dst_addr,
23444c27fe4cSMike Rapoport 			   unsigned long src_addr,
23458ee79edfSPeter Xu 			   bool zeropage, bool wp_copy,
23464c27fe4cSMike Rapoport 			   struct page **pagep)
23474c27fe4cSMike Rapoport {
23484c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
23494c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
23504c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
23514c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
23524c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
23534c27fe4cSMike Rapoport 	void *page_kaddr;
2354b7dd44a1SMatthew Wilcox (Oracle) 	struct folio *folio;
23554c27fe4cSMike Rapoport 	struct page *page;
23564c27fe4cSMike Rapoport 	int ret;
23573460f6e5SAxel Rasmussen 	pgoff_t max_off;
23584c27fe4cSMike Rapoport 
23597ed9d238SAxel Rasmussen 	if (!shmem_inode_acct_block(inode, 1)) {
23607ed9d238SAxel Rasmussen 		/*
23617ed9d238SAxel Rasmussen 		 * We may have got a page, returned -ENOENT triggering a retry,
23627ed9d238SAxel Rasmussen 		 * and now we find ourselves with -ENOMEM. Release the page, to
23637ed9d238SAxel Rasmussen 		 * avoid a BUG_ON in our caller.
23647ed9d238SAxel Rasmussen 		 */
23657ed9d238SAxel Rasmussen 		if (unlikely(*pagep)) {
23667ed9d238SAxel Rasmussen 			put_page(*pagep);
23677ed9d238SAxel Rasmussen 			*pagep = NULL;
23687ed9d238SAxel Rasmussen 		}
23697d64ae3aSAxel Rasmussen 		return -ENOMEM;
23707ed9d238SAxel Rasmussen 	}
23714c27fe4cSMike Rapoport 
2372cb658a45SAndrea Arcangeli 	if (!*pagep) {
23737d64ae3aSAxel Rasmussen 		ret = -ENOMEM;
23744c27fe4cSMike Rapoport 		page = shmem_alloc_page(gfp, info, pgoff);
23754c27fe4cSMike Rapoport 		if (!page)
23760f079694SMike Rapoport 			goto out_unacct_blocks;
23774c27fe4cSMike Rapoport 
23783460f6e5SAxel Rasmussen 		if (!zeropage) {	/* COPY */
23794c27fe4cSMike Rapoport 			page_kaddr = kmap_atomic(page);
23808d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
23818d103963SMike Rapoport 					     (const void __user *)src_addr,
23824c27fe4cSMike Rapoport 					     PAGE_SIZE);
23834c27fe4cSMike Rapoport 			kunmap_atomic(page_kaddr);
23844c27fe4cSMike Rapoport 
2385c1e8d7c6SMichel Lespinasse 			/* fallback to copy_from_user outside mmap_lock */
23864c27fe4cSMike Rapoport 			if (unlikely(ret)) {
23874c27fe4cSMike Rapoport 				*pagep = page;
23887d64ae3aSAxel Rasmussen 				ret = -ENOENT;
23894c27fe4cSMike Rapoport 				/* don't free the page */
23907d64ae3aSAxel Rasmussen 				goto out_unacct_blocks;
23914c27fe4cSMike Rapoport 			}
239219b482c2SMuchun Song 
239319b482c2SMuchun Song 			flush_dcache_page(page);
23943460f6e5SAxel Rasmussen 		} else {		/* ZEROPAGE */
239519b482c2SMuchun Song 			clear_user_highpage(page, dst_addr);
23968d103963SMike Rapoport 		}
23974c27fe4cSMike Rapoport 	} else {
23984c27fe4cSMike Rapoport 		page = *pagep;
23994c27fe4cSMike Rapoport 		*pagep = NULL;
24004c27fe4cSMike Rapoport 	}
24014c27fe4cSMike Rapoport 
24023460f6e5SAxel Rasmussen 	VM_BUG_ON(PageLocked(page));
24033460f6e5SAxel Rasmussen 	VM_BUG_ON(PageSwapBacked(page));
24049cc90c66SAndrea Arcangeli 	__SetPageLocked(page);
24059cc90c66SAndrea Arcangeli 	__SetPageSwapBacked(page);
2406a425d358SAndrea Arcangeli 	__SetPageUptodate(page);
24079cc90c66SAndrea Arcangeli 
2408e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2409e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
24103460f6e5SAxel Rasmussen 	if (unlikely(pgoff >= max_off))
2411e2a50c1fSAndrea Arcangeli 		goto out_release;
2412e2a50c1fSAndrea Arcangeli 
2413b7dd44a1SMatthew Wilcox (Oracle) 	folio = page_folio(page);
2414b7dd44a1SMatthew Wilcox (Oracle) 	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
24153fea5a49SJohannes Weiner 				      gfp & GFP_RECLAIM_MASK, dst_mm);
24164c27fe4cSMike Rapoport 	if (ret)
24174c27fe4cSMike Rapoport 		goto out_release;
24184c27fe4cSMike Rapoport 
24197d64ae3aSAxel Rasmussen 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
24208ee79edfSPeter Xu 				       page, true, wp_copy);
24217d64ae3aSAxel Rasmussen 	if (ret)
24227d64ae3aSAxel Rasmussen 		goto out_delete_from_cache;
24234c27fe4cSMike Rapoport 
242494b7cc01SYang Shi 	spin_lock_irq(&info->lock);
24254c27fe4cSMike Rapoport 	info->alloced++;
24264c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
24274c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
242894b7cc01SYang Shi 	spin_unlock_irq(&info->lock);
24294c27fe4cSMike Rapoport 
2430e2a50c1fSAndrea Arcangeli 	unlock_page(page);
24317d64ae3aSAxel Rasmussen 	return 0;
24327d64ae3aSAxel Rasmussen out_delete_from_cache:
2433e2a50c1fSAndrea Arcangeli 	delete_from_page_cache(page);
24344c27fe4cSMike Rapoport out_release:
24359cc90c66SAndrea Arcangeli 	unlock_page(page);
24364c27fe4cSMike Rapoport 	put_page(page);
24374c27fe4cSMike Rapoport out_unacct_blocks:
24380f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
24397d64ae3aSAxel Rasmussen 	return ret;
24404c27fe4cSMike Rapoport }
24413460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
24428d103963SMike Rapoport 
24431da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
244492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
244569f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
24461da177e4SLinus Torvalds 
24476d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
24486d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
24496d9d88d0SJarkko Sakkinen #else
24506d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
24516d9d88d0SJarkko Sakkinen #endif
24526d9d88d0SJarkko Sakkinen 
24531da177e4SLinus Torvalds static int
2454800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
24559d6b0cd7SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
2456800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
24571da177e4SLinus Torvalds {
2458800d15a5SNick Piggin 	struct inode *inode = mapping->host;
245940e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
246009cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
2461a7605426SYang Shi 	int ret = 0;
246240e041a2SDavid Herrmann 
24639608703eSJan Kara 	/* i_rwsem is held by caller */
2464ab3948f5SJoel Fernandes (Google) 	if (unlikely(info->seals & (F_SEAL_GROW |
2465ab3948f5SJoel Fernandes (Google) 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2466ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
246740e041a2SDavid Herrmann 			return -EPERM;
246840e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
246940e041a2SDavid Herrmann 			return -EPERM;
247040e041a2SDavid Herrmann 	}
247140e041a2SDavid Herrmann 
2472a7605426SYang Shi 	ret = shmem_getpage(inode, index, pagep, SGP_WRITE);
2473a7605426SYang Shi 
2474a7605426SYang Shi 	if (ret)
2475a7605426SYang Shi 		return ret;
2476a7605426SYang Shi 
2477a7605426SYang Shi 	if (PageHWPoison(*pagep)) {
2478a7605426SYang Shi 		unlock_page(*pagep);
2479a7605426SYang Shi 		put_page(*pagep);
2480a7605426SYang Shi 		*pagep = NULL;
2481a7605426SYang Shi 		return -EIO;
2482a7605426SYang Shi 	}
2483a7605426SYang Shi 
2484a7605426SYang Shi 	return 0;
2485800d15a5SNick Piggin }
2486800d15a5SNick Piggin 
2487800d15a5SNick Piggin static int
2488800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2489800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2490800d15a5SNick Piggin 			struct page *page, void *fsdata)
2491800d15a5SNick Piggin {
2492800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2493800d15a5SNick Piggin 
2494800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2495800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2496800d15a5SNick Piggin 
2497ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2498800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2499800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2500800d8c63SKirill A. Shutemov 			int i;
2501800d8c63SKirill A. Shutemov 
2502800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2503800d8c63SKirill A. Shutemov 				if (head + i == page)
2504800d8c63SKirill A. Shutemov 					continue;
2505800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2506800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2507800d8c63SKirill A. Shutemov 			}
2508800d8c63SKirill A. Shutemov 		}
250909cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
251009cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2511ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
251209cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2513ec9516fbSHugh Dickins 		}
2514800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2515ec9516fbSHugh Dickins 	}
2516d3602444SHugh Dickins 	set_page_dirty(page);
25176746aff7SWu Fengguang 	unlock_page(page);
251809cbfeafSKirill A. Shutemov 	put_page(page);
2519d3602444SHugh Dickins 
2520800d15a5SNick Piggin 	return copied;
25211da177e4SLinus Torvalds }
25221da177e4SLinus Torvalds 
25232ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
25241da177e4SLinus Torvalds {
25256e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
25266e58e79dSAl Viro 	struct inode *inode = file_inode(file);
25271da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
252841ffe5d5SHugh Dickins 	pgoff_t index;
252941ffe5d5SHugh Dickins 	unsigned long offset;
2530f7c1d074SGeert Uytterhoeven 	int error = 0;
2531cb66a7a1SAl Viro 	ssize_t retval = 0;
25326e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2533a0ee5ec5SHugh Dickins 
253409cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
253509cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
25361da177e4SLinus Torvalds 
25371da177e4SLinus Torvalds 	for (;;) {
25381da177e4SLinus Torvalds 		struct page *page = NULL;
253941ffe5d5SHugh Dickins 		pgoff_t end_index;
254041ffe5d5SHugh Dickins 		unsigned long nr, ret;
25411da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
25421da177e4SLinus Torvalds 
254309cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25441da177e4SLinus Torvalds 		if (index > end_index)
25451da177e4SLinus Torvalds 			break;
25461da177e4SLinus Torvalds 		if (index == end_index) {
254709cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25481da177e4SLinus Torvalds 			if (nr <= offset)
25491da177e4SLinus Torvalds 				break;
25501da177e4SLinus Torvalds 		}
25511da177e4SLinus Torvalds 
255256a8c8ebSHugh Dickins 		error = shmem_getpage(inode, index, &page, SGP_READ);
25536e58e79dSAl Viro 		if (error) {
25546e58e79dSAl Viro 			if (error == -EINVAL)
25556e58e79dSAl Viro 				error = 0;
25561da177e4SLinus Torvalds 			break;
25571da177e4SLinus Torvalds 		}
255875edd345SHugh Dickins 		if (page) {
2559d3602444SHugh Dickins 			unlock_page(page);
2560a7605426SYang Shi 
2561a7605426SYang Shi 			if (PageHWPoison(page)) {
2562a7605426SYang Shi 				put_page(page);
2563a7605426SYang Shi 				error = -EIO;
2564a7605426SYang Shi 				break;
2565a7605426SYang Shi 			}
256675edd345SHugh Dickins 		}
25671da177e4SLinus Torvalds 
25681da177e4SLinus Torvalds 		/*
25691da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
25709608703eSJan Kara 		 * are called without i_rwsem protection against truncate
25711da177e4SLinus Torvalds 		 */
257209cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
25731da177e4SLinus Torvalds 		i_size = i_size_read(inode);
257409cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25751da177e4SLinus Torvalds 		if (index == end_index) {
257609cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25771da177e4SLinus Torvalds 			if (nr <= offset) {
25781da177e4SLinus Torvalds 				if (page)
257909cbfeafSKirill A. Shutemov 					put_page(page);
25801da177e4SLinus Torvalds 				break;
25811da177e4SLinus Torvalds 			}
25821da177e4SLinus Torvalds 		}
25831da177e4SLinus Torvalds 		nr -= offset;
25841da177e4SLinus Torvalds 
25851da177e4SLinus Torvalds 		if (page) {
25861da177e4SLinus Torvalds 			/*
25871da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
25881da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
25891da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
25901da177e4SLinus Torvalds 			 */
25911da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
25921da177e4SLinus Torvalds 				flush_dcache_page(page);
25931da177e4SLinus Torvalds 			/*
25941da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
25951da177e4SLinus Torvalds 			 */
25961da177e4SLinus Torvalds 			if (!offset)
25971da177e4SLinus Torvalds 				mark_page_accessed(page);
25981da177e4SLinus Torvalds 			/*
25991da177e4SLinus Torvalds 			 * Ok, we have the page, and it's up-to-date, so
26001da177e4SLinus Torvalds 			 * now we can copy it to user space...
26011da177e4SLinus Torvalds 			 */
26022ba5bbedSAl Viro 			ret = copy_page_to_iter(page, offset, nr, to);
26031bdec44bSHugh Dickins 			put_page(page);
26041bdec44bSHugh Dickins 
2605*fcb14cb1SAl Viro 		} else if (user_backed_iter(to)) {
26061bdec44bSHugh Dickins 			/*
26071bdec44bSHugh Dickins 			 * Copy to user tends to be so well optimized, but
26081bdec44bSHugh Dickins 			 * clear_user() not so much, that it is noticeably
26091bdec44bSHugh Dickins 			 * faster to copy the zero page instead of clearing.
26101bdec44bSHugh Dickins 			 */
26111bdec44bSHugh Dickins 			ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
26121bdec44bSHugh Dickins 		} else {
26131bdec44bSHugh Dickins 			/*
26141bdec44bSHugh Dickins 			 * But submitting the same page twice in a row to
26151bdec44bSHugh Dickins 			 * splice() - or others? - can result in confusion:
26161bdec44bSHugh Dickins 			 * so don't attempt that optimization on pipes etc.
26171bdec44bSHugh Dickins 			 */
26181bdec44bSHugh Dickins 			ret = iov_iter_zero(nr, to);
26191bdec44bSHugh Dickins 		}
26201bdec44bSHugh Dickins 
26216e58e79dSAl Viro 		retval += ret;
26221da177e4SLinus Torvalds 		offset += ret;
262309cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
262409cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
26251da177e4SLinus Torvalds 
26262ba5bbedSAl Viro 		if (!iov_iter_count(to))
26271da177e4SLinus Torvalds 			break;
26286e58e79dSAl Viro 		if (ret < nr) {
26296e58e79dSAl Viro 			error = -EFAULT;
26306e58e79dSAl Viro 			break;
26316e58e79dSAl Viro 		}
26321da177e4SLinus Torvalds 		cond_resched();
26331da177e4SLinus Torvalds 	}
26341da177e4SLinus Torvalds 
263509cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
26366e58e79dSAl Viro 	file_accessed(file);
26376e58e79dSAl Viro 	return retval ? retval : error;
26381da177e4SLinus Torvalds }
26391da177e4SLinus Torvalds 
2640965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2641220f2ac9SHugh Dickins {
2642220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2643220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2644220f2ac9SHugh Dickins 
2645965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2646965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2647220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
264841139aa4SMatthew Wilcox (Oracle) 	if (offset < 0)
264941139aa4SMatthew Wilcox (Oracle) 		return -ENXIO;
265041139aa4SMatthew Wilcox (Oracle) 
26515955102cSAl Viro 	inode_lock(inode);
26529608703eSJan Kara 	/* We're holding i_rwsem so we can access i_size directly */
265341139aa4SMatthew Wilcox (Oracle) 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2654387aae6fSHugh Dickins 	if (offset >= 0)
265546a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
26565955102cSAl Viro 	inode_unlock(inode);
2657220f2ac9SHugh Dickins 	return offset;
2658220f2ac9SHugh Dickins }
2659220f2ac9SHugh Dickins 
266083e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
266183e4fa9cSHugh Dickins 							 loff_t len)
266283e4fa9cSHugh Dickins {
2663496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2664e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
266540e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
26661aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2667d144bf62SHugh Dickins 	pgoff_t start, index, end, undo_fallocend;
2668e2d12e22SHugh Dickins 	int error;
266983e4fa9cSHugh Dickins 
267013ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
267113ace4d0SHugh Dickins 		return -EOPNOTSUPP;
267213ace4d0SHugh Dickins 
26735955102cSAl Viro 	inode_lock(inode);
267483e4fa9cSHugh Dickins 
267583e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
267683e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
267783e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
267883e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
26798e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
268083e4fa9cSHugh Dickins 
26819608703eSJan Kara 		/* protected by i_rwsem */
2682ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
268340e041a2SDavid Herrmann 			error = -EPERM;
268440e041a2SDavid Herrmann 			goto out;
268540e041a2SDavid Herrmann 		}
268640e041a2SDavid Herrmann 
26878e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2688aa71ecd8SChen Jun 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2689f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2690f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2691f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2692f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2693f00cdc6dSHugh Dickins 
269483e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
269583e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
269683e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
269783e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
269883e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
26998e205f77SHugh Dickins 
27008e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
27018e205f77SHugh Dickins 		inode->i_private = NULL;
27028e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
27032055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
27048e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
270583e4fa9cSHugh Dickins 		error = 0;
27068e205f77SHugh Dickins 		goto out;
270783e4fa9cSHugh Dickins 	}
270883e4fa9cSHugh Dickins 
2709e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2710e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2711e2d12e22SHugh Dickins 	if (error)
2712e2d12e22SHugh Dickins 		goto out;
2713e2d12e22SHugh Dickins 
271440e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
271540e041a2SDavid Herrmann 		error = -EPERM;
271640e041a2SDavid Herrmann 		goto out;
271740e041a2SDavid Herrmann 	}
271840e041a2SDavid Herrmann 
271909cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
272009cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2721e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2722e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2723e2d12e22SHugh Dickins 		error = -ENOSPC;
2724e2d12e22SHugh Dickins 		goto out;
2725e2d12e22SHugh Dickins 	}
2726e2d12e22SHugh Dickins 
27278e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
27281aac1400SHugh Dickins 	shmem_falloc.start = start;
27291aac1400SHugh Dickins 	shmem_falloc.next  = start;
27301aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
27311aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
27321aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27331aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
27341aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
27351aac1400SHugh Dickins 
2736d144bf62SHugh Dickins 	/*
2737d144bf62SHugh Dickins 	 * info->fallocend is only relevant when huge pages might be
2738d144bf62SHugh Dickins 	 * involved: to prevent split_huge_page() freeing fallocated
2739d144bf62SHugh Dickins 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
2740d144bf62SHugh Dickins 	 */
2741d144bf62SHugh Dickins 	undo_fallocend = info->fallocend;
2742d144bf62SHugh Dickins 	if (info->fallocend < end)
2743d144bf62SHugh Dickins 		info->fallocend = end;
2744d144bf62SHugh Dickins 
2745050dcb5cSHugh Dickins 	for (index = start; index < end; ) {
2746e2d12e22SHugh Dickins 		struct page *page;
2747e2d12e22SHugh Dickins 
2748e2d12e22SHugh Dickins 		/*
2749e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2750e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2751e2d12e22SHugh Dickins 		 */
2752e2d12e22SHugh Dickins 		if (signal_pending(current))
2753e2d12e22SHugh Dickins 			error = -EINTR;
27541aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
27551aac1400SHugh Dickins 			error = -ENOMEM;
2756e2d12e22SHugh Dickins 		else
27579e18eb29SAndres Lagar-Cavilla 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2758e2d12e22SHugh Dickins 		if (error) {
2759d144bf62SHugh Dickins 			info->fallocend = undo_fallocend;
27601635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
27617f556567SHugh Dickins 			if (index > start) {
27621635f6a7SHugh Dickins 				shmem_undo_range(inode,
276309cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2764b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
27657f556567SHugh Dickins 			}
27661aac1400SHugh Dickins 			goto undone;
2767e2d12e22SHugh Dickins 		}
2768e2d12e22SHugh Dickins 
2769050dcb5cSHugh Dickins 		index++;
2770050dcb5cSHugh Dickins 		/*
2771050dcb5cSHugh Dickins 		 * Here is a more important optimization than it appears:
2772050dcb5cSHugh Dickins 		 * a second SGP_FALLOC on the same huge page will clear it,
2773050dcb5cSHugh Dickins 		 * making it PageUptodate and un-undoable if we fail later.
2774050dcb5cSHugh Dickins 		 */
2775050dcb5cSHugh Dickins 		if (PageTransCompound(page)) {
2776050dcb5cSHugh Dickins 			index = round_up(index, HPAGE_PMD_NR);
2777050dcb5cSHugh Dickins 			/* Beware 32-bit wraparound */
2778050dcb5cSHugh Dickins 			if (!index)
2779050dcb5cSHugh Dickins 				index--;
2780050dcb5cSHugh Dickins 		}
2781050dcb5cSHugh Dickins 
2782e2d12e22SHugh Dickins 		/*
27831aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
27841aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
27851aac1400SHugh Dickins 		 */
27861aac1400SHugh Dickins 		if (!PageUptodate(page))
2787050dcb5cSHugh Dickins 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
2788050dcb5cSHugh Dickins 		shmem_falloc.next = index;
27891aac1400SHugh Dickins 
27901aac1400SHugh Dickins 		/*
27911635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
27921635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
27931635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
2794e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
2795e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2796e2d12e22SHugh Dickins 		 */
2797e2d12e22SHugh Dickins 		set_page_dirty(page);
2798e2d12e22SHugh Dickins 		unlock_page(page);
279909cbfeafSKirill A. Shutemov 		put_page(page);
2800e2d12e22SHugh Dickins 		cond_resched();
2801e2d12e22SHugh Dickins 	}
2802e2d12e22SHugh Dickins 
2803e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2804e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
2805078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
28061aac1400SHugh Dickins undone:
28071aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
28081aac1400SHugh Dickins 	inode->i_private = NULL;
28091aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2810e2d12e22SHugh Dickins out:
28115955102cSAl Viro 	inode_unlock(inode);
281283e4fa9cSHugh Dickins 	return error;
281383e4fa9cSHugh Dickins }
281483e4fa9cSHugh Dickins 
2815726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
28161da177e4SLinus Torvalds {
2817726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
28181da177e4SLinus Torvalds 
28191da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
282009cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
28211da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
28220edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
28231da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
282441ffe5d5SHugh Dickins 		buf->f_bavail =
282541ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
282641ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
28270edd73b3SHugh Dickins 	}
28280edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
28291da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
28301da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
28311da177e4SLinus Torvalds 	}
28321da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
283359cda49eSAmir Goldstein 
283459cda49eSAmir Goldstein 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
283559cda49eSAmir Goldstein 
28361da177e4SLinus Torvalds 	return 0;
28371da177e4SLinus Torvalds }
28381da177e4SLinus Torvalds 
28391da177e4SLinus Torvalds /*
28401da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
28411da177e4SLinus Torvalds  */
28421da177e4SLinus Torvalds static int
2843549c7297SChristian Brauner shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2844549c7297SChristian Brauner 	    struct dentry *dentry, umode_t mode, dev_t dev)
28451da177e4SLinus Torvalds {
28460b0a0806SHugh Dickins 	struct inode *inode;
28471da177e4SLinus Torvalds 	int error = -ENOSPC;
28481da177e4SLinus Torvalds 
2849454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
28501da177e4SLinus Torvalds 	if (inode) {
2851feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2852feda821eSChristoph Hellwig 		if (error)
2853feda821eSChristoph Hellwig 			goto out_iput;
28542a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
28559d8f13baSMimi Zohar 						     &dentry->d_name,
28566d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2857feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2858feda821eSChristoph Hellwig 			goto out_iput;
285937ec43cdSMimi Zohar 
2860718deb6bSAl Viro 		error = 0;
28611da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2862078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
28631da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
28641da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
28651da177e4SLinus Torvalds 	}
28661da177e4SLinus Torvalds 	return error;
2867feda821eSChristoph Hellwig out_iput:
2868feda821eSChristoph Hellwig 	iput(inode);
2869feda821eSChristoph Hellwig 	return error;
28701da177e4SLinus Torvalds }
28711da177e4SLinus Torvalds 
287260545d0dSAl Viro static int
2873549c7297SChristian Brauner shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2874549c7297SChristian Brauner 	      struct dentry *dentry, umode_t mode)
287560545d0dSAl Viro {
287660545d0dSAl Viro 	struct inode *inode;
287760545d0dSAl Viro 	int error = -ENOSPC;
287860545d0dSAl Viro 
287960545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
288060545d0dSAl Viro 	if (inode) {
288160545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
288260545d0dSAl Viro 						     NULL,
288360545d0dSAl Viro 						     shmem_initxattrs, NULL);
2884feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2885feda821eSChristoph Hellwig 			goto out_iput;
2886feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2887feda821eSChristoph Hellwig 		if (error)
2888feda821eSChristoph Hellwig 			goto out_iput;
288960545d0dSAl Viro 		d_tmpfile(dentry, inode);
289060545d0dSAl Viro 	}
289160545d0dSAl Viro 	return error;
2892feda821eSChristoph Hellwig out_iput:
2893feda821eSChristoph Hellwig 	iput(inode);
2894feda821eSChristoph Hellwig 	return error;
289560545d0dSAl Viro }
289660545d0dSAl Viro 
2897549c7297SChristian Brauner static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2898549c7297SChristian Brauner 		       struct dentry *dentry, umode_t mode)
28991da177e4SLinus Torvalds {
29001da177e4SLinus Torvalds 	int error;
29011da177e4SLinus Torvalds 
2902549c7297SChristian Brauner 	if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2903549c7297SChristian Brauner 				 mode | S_IFDIR, 0)))
29041da177e4SLinus Torvalds 		return error;
2905d8c76e6fSDave Hansen 	inc_nlink(dir);
29061da177e4SLinus Torvalds 	return 0;
29071da177e4SLinus Torvalds }
29081da177e4SLinus Torvalds 
2909549c7297SChristian Brauner static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2910549c7297SChristian Brauner 			struct dentry *dentry, umode_t mode, bool excl)
29111da177e4SLinus Torvalds {
2912549c7297SChristian Brauner 	return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
29131da177e4SLinus Torvalds }
29141da177e4SLinus Torvalds 
29151da177e4SLinus Torvalds /*
29161da177e4SLinus Torvalds  * Link a file..
29171da177e4SLinus Torvalds  */
29181da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
29191da177e4SLinus Torvalds {
292075c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
292129b00e60SDarrick J. Wong 	int ret = 0;
29221da177e4SLinus Torvalds 
29231da177e4SLinus Torvalds 	/*
29241da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
29251da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
29261da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
29271062af92SDarrick J. Wong 	 * But if an O_TMPFILE file is linked into the tmpfs, the
29281062af92SDarrick J. Wong 	 * first link must skip that, to get the accounting right.
29291da177e4SLinus Torvalds 	 */
29301062af92SDarrick J. Wong 	if (inode->i_nlink) {
2931e809d5f0SChris Down 		ret = shmem_reserve_inode(inode->i_sb, NULL);
29325b04c689SPavel Emelyanov 		if (ret)
29335b04c689SPavel Emelyanov 			goto out;
29341062af92SDarrick J. Wong 	}
29351da177e4SLinus Torvalds 
29361da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
2937078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2938d8c76e6fSDave Hansen 	inc_nlink(inode);
29397de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
29401da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
29411da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
29425b04c689SPavel Emelyanov out:
29435b04c689SPavel Emelyanov 	return ret;
29441da177e4SLinus Torvalds }
29451da177e4SLinus Torvalds 
29461da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
29471da177e4SLinus Torvalds {
294875c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
29491da177e4SLinus Torvalds 
29505b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
29515b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
29521da177e4SLinus Torvalds 
29531da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
2954078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
29559a53c3a7SDave Hansen 	drop_nlink(inode);
29561da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
29571da177e4SLinus Torvalds 	return 0;
29581da177e4SLinus Torvalds }
29591da177e4SLinus Torvalds 
29601da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
29611da177e4SLinus Torvalds {
29621da177e4SLinus Torvalds 	if (!simple_empty(dentry))
29631da177e4SLinus Torvalds 		return -ENOTEMPTY;
29641da177e4SLinus Torvalds 
296575c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
29669a53c3a7SDave Hansen 	drop_nlink(dir);
29671da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
29681da177e4SLinus Torvalds }
29691da177e4SLinus Torvalds 
2970549c7297SChristian Brauner static int shmem_whiteout(struct user_namespace *mnt_userns,
2971549c7297SChristian Brauner 			  struct inode *old_dir, struct dentry *old_dentry)
297246fdb794SMiklos Szeredi {
297346fdb794SMiklos Szeredi 	struct dentry *whiteout;
297446fdb794SMiklos Szeredi 	int error;
297546fdb794SMiklos Szeredi 
297646fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
297746fdb794SMiklos Szeredi 	if (!whiteout)
297846fdb794SMiklos Szeredi 		return -ENOMEM;
297946fdb794SMiklos Szeredi 
2980549c7297SChristian Brauner 	error = shmem_mknod(&init_user_ns, old_dir, whiteout,
298146fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
298246fdb794SMiklos Szeredi 	dput(whiteout);
298346fdb794SMiklos Szeredi 	if (error)
298446fdb794SMiklos Szeredi 		return error;
298546fdb794SMiklos Szeredi 
298646fdb794SMiklos Szeredi 	/*
298746fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
298846fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
298946fdb794SMiklos Szeredi 	 *
299046fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
299146fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
299246fdb794SMiklos Szeredi 	 */
299346fdb794SMiklos Szeredi 	d_rehash(whiteout);
299446fdb794SMiklos Szeredi 	return 0;
299546fdb794SMiklos Szeredi }
299646fdb794SMiklos Szeredi 
29971da177e4SLinus Torvalds /*
29981da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
29991da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
30001da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
30011da177e4SLinus Torvalds  * gets overwritten.
30021da177e4SLinus Torvalds  */
3003549c7297SChristian Brauner static int shmem_rename2(struct user_namespace *mnt_userns,
3004549c7297SChristian Brauner 			 struct inode *old_dir, struct dentry *old_dentry,
3005549c7297SChristian Brauner 			 struct inode *new_dir, struct dentry *new_dentry,
3006549c7297SChristian Brauner 			 unsigned int flags)
30071da177e4SLinus Torvalds {
300875c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
30091da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
30101da177e4SLinus Torvalds 
301146fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
30123b69ff51SMiklos Szeredi 		return -EINVAL;
30133b69ff51SMiklos Szeredi 
301437456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
30156429e463SLorenz Bauer 		return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
301637456771SMiklos Szeredi 
30171da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
30181da177e4SLinus Torvalds 		return -ENOTEMPTY;
30191da177e4SLinus Torvalds 
302046fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
302146fdb794SMiklos Szeredi 		int error;
302246fdb794SMiklos Szeredi 
3023549c7297SChristian Brauner 		error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
302446fdb794SMiklos Szeredi 		if (error)
302546fdb794SMiklos Szeredi 			return error;
302646fdb794SMiklos Szeredi 	}
302746fdb794SMiklos Szeredi 
302875c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
30291da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3030b928095bSMiklos Szeredi 		if (they_are_dirs) {
303175c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
30329a53c3a7SDave Hansen 			drop_nlink(old_dir);
3033b928095bSMiklos Szeredi 		}
30341da177e4SLinus Torvalds 	} else if (they_are_dirs) {
30359a53c3a7SDave Hansen 		drop_nlink(old_dir);
3036d8c76e6fSDave Hansen 		inc_nlink(new_dir);
30371da177e4SLinus Torvalds 	}
30381da177e4SLinus Torvalds 
30391da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
30401da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
30411da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
30421da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3043078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
30441da177e4SLinus Torvalds 	return 0;
30451da177e4SLinus Torvalds }
30461da177e4SLinus Torvalds 
3047549c7297SChristian Brauner static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3048549c7297SChristian Brauner 			 struct dentry *dentry, const char *symname)
30491da177e4SLinus Torvalds {
30501da177e4SLinus Torvalds 	int error;
30511da177e4SLinus Torvalds 	int len;
30521da177e4SLinus Torvalds 	struct inode *inode;
30539276aad6SHugh Dickins 	struct page *page;
30541da177e4SLinus Torvalds 
30551da177e4SLinus Torvalds 	len = strlen(symname) + 1;
305609cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
30571da177e4SLinus Torvalds 		return -ENAMETOOLONG;
30581da177e4SLinus Torvalds 
30590825a6f9SJoe Perches 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
30600825a6f9SJoe Perches 				VM_NORESERVE);
30611da177e4SLinus Torvalds 	if (!inode)
30621da177e4SLinus Torvalds 		return -ENOSPC;
30631da177e4SLinus Torvalds 
30649d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
30656d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3066343c3d7fSMateusz Nosek 	if (error && error != -EOPNOTSUPP) {
3067570bc1c2SStephen Smalley 		iput(inode);
3068570bc1c2SStephen Smalley 		return error;
3069570bc1c2SStephen Smalley 	}
3070570bc1c2SStephen Smalley 
30711da177e4SLinus Torvalds 	inode->i_size = len-1;
307269f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
30733ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
30743ed47db3SAl Viro 		if (!inode->i_link) {
307569f07ec9SHugh Dickins 			iput(inode);
307669f07ec9SHugh Dickins 			return -ENOMEM;
307769f07ec9SHugh Dickins 		}
307869f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
30791da177e4SLinus Torvalds 	} else {
3080e8ecde25SAl Viro 		inode_nohighmem(inode);
30819e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
30821da177e4SLinus Torvalds 		if (error) {
30831da177e4SLinus Torvalds 			iput(inode);
30841da177e4SLinus Torvalds 			return error;
30851da177e4SLinus Torvalds 		}
308614fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
30871da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
308821fc61c7SAl Viro 		memcpy(page_address(page), symname, len);
3089ec9516fbSHugh Dickins 		SetPageUptodate(page);
30901da177e4SLinus Torvalds 		set_page_dirty(page);
30916746aff7SWu Fengguang 		unlock_page(page);
309209cbfeafSKirill A. Shutemov 		put_page(page);
30931da177e4SLinus Torvalds 	}
30941da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3095078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
30961da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
30971da177e4SLinus Torvalds 	dget(dentry);
30981da177e4SLinus Torvalds 	return 0;
30991da177e4SLinus Torvalds }
31001da177e4SLinus Torvalds 
3101fceef393SAl Viro static void shmem_put_link(void *arg)
3102fceef393SAl Viro {
3103fceef393SAl Viro 	mark_page_accessed(arg);
3104fceef393SAl Viro 	put_page(arg);
3105fceef393SAl Viro }
3106fceef393SAl Viro 
31076b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3108fceef393SAl Viro 				  struct inode *inode,
3109fceef393SAl Viro 				  struct delayed_call *done)
31101da177e4SLinus Torvalds {
31111da177e4SLinus Torvalds 	struct page *page = NULL;
31126b255391SAl Viro 	int error;
31136a6c9904SAl Viro 	if (!dentry) {
31146a6c9904SAl Viro 		page = find_get_page(inode->i_mapping, 0);
31156a6c9904SAl Viro 		if (!page)
31166b255391SAl Viro 			return ERR_PTR(-ECHILD);
3117a7605426SYang Shi 		if (PageHWPoison(page) ||
3118a7605426SYang Shi 		    !PageUptodate(page)) {
31196a6c9904SAl Viro 			put_page(page);
31206a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
31216a6c9904SAl Viro 		}
31226a6c9904SAl Viro 	} else {
31239e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3124680baacbSAl Viro 		if (error)
3125680baacbSAl Viro 			return ERR_PTR(error);
3126a7605426SYang Shi 		if (!page)
3127a7605426SYang Shi 			return ERR_PTR(-ECHILD);
3128a7605426SYang Shi 		if (PageHWPoison(page)) {
3129a7605426SYang Shi 			unlock_page(page);
3130a7605426SYang Shi 			put_page(page);
3131a7605426SYang Shi 			return ERR_PTR(-ECHILD);
3132a7605426SYang Shi 		}
3133d3602444SHugh Dickins 		unlock_page(page);
31341da177e4SLinus Torvalds 	}
3135fceef393SAl Viro 	set_delayed_call(done, shmem_put_link, page);
313621fc61c7SAl Viro 	return page_address(page);
31371da177e4SLinus Torvalds }
31381da177e4SLinus Torvalds 
3139b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3140b09e0fa4SEric Paris /*
3141b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3142b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3143b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3144b09e0fa4SEric Paris  * filesystem level, though.
3145b09e0fa4SEric Paris  */
3146b09e0fa4SEric Paris 
31476d9d88d0SJarkko Sakkinen /*
31486d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
31496d9d88d0SJarkko Sakkinen  */
31506d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
31516d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
31526d9d88d0SJarkko Sakkinen 			    void *fs_info)
31536d9d88d0SJarkko Sakkinen {
31546d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
31556d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
315638f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
31576d9d88d0SJarkko Sakkinen 	size_t len;
31586d9d88d0SJarkko Sakkinen 
31596d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
316038f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
31616d9d88d0SJarkko Sakkinen 		if (!new_xattr)
31626d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31636d9d88d0SJarkko Sakkinen 
31646d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
31656d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
31666d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
31676d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
31683bef735aSChengguang Xu 			kvfree(new_xattr);
31696d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31706d9d88d0SJarkko Sakkinen 		}
31716d9d88d0SJarkko Sakkinen 
31726d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
31736d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
31746d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
31756d9d88d0SJarkko Sakkinen 		       xattr->name, len);
31766d9d88d0SJarkko Sakkinen 
317738f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
31786d9d88d0SJarkko Sakkinen 	}
31796d9d88d0SJarkko Sakkinen 
31806d9d88d0SJarkko Sakkinen 	return 0;
31816d9d88d0SJarkko Sakkinen }
31826d9d88d0SJarkko Sakkinen 
3183aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3184b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3185b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3186aa7c5241SAndreas Gruenbacher {
3187b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3188aa7c5241SAndreas Gruenbacher 
3189aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3190aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3191aa7c5241SAndreas Gruenbacher }
3192aa7c5241SAndreas Gruenbacher 
3193aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3194e65ce2a5SChristian Brauner 				   struct user_namespace *mnt_userns,
319559301226SAl Viro 				   struct dentry *unused, struct inode *inode,
319659301226SAl Viro 				   const char *name, const void *value,
319759301226SAl Viro 				   size_t size, int flags)
3198aa7c5241SAndreas Gruenbacher {
319959301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3200aa7c5241SAndreas Gruenbacher 
3201aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3202a46a2295SDaniel Xu 	return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3203aa7c5241SAndreas Gruenbacher }
3204aa7c5241SAndreas Gruenbacher 
3205aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3206aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3207aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3208aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3209aa7c5241SAndreas Gruenbacher };
3210aa7c5241SAndreas Gruenbacher 
3211aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3212aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3213aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3214aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3215aa7c5241SAndreas Gruenbacher };
3216aa7c5241SAndreas Gruenbacher 
3217b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3218b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3219feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3220feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3221b09e0fa4SEric Paris #endif
3222aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3223aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3224b09e0fa4SEric Paris 	NULL
3225b09e0fa4SEric Paris };
3226b09e0fa4SEric Paris 
3227b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3228b09e0fa4SEric Paris {
322975c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3230786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3231b09e0fa4SEric Paris }
3232b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3233b09e0fa4SEric Paris 
323469f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
3235f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
32366b255391SAl Viro 	.get_link	= simple_get_link,
3237b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3238b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3239b09e0fa4SEric Paris #endif
32401da177e4SLinus Torvalds };
32411da177e4SLinus Torvalds 
324292e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
3243f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
32446b255391SAl Viro 	.get_link	= shmem_get_link,
3245b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3246b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
324739f0247dSAndreas Gruenbacher #endif
3248b09e0fa4SEric Paris };
324939f0247dSAndreas Gruenbacher 
325091828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
325191828a40SDavid M. Grimes {
325291828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
325391828a40SDavid M. Grimes }
325491828a40SDavid M. Grimes 
325591828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
325691828a40SDavid M. Grimes {
325791828a40SDavid M. Grimes 	__u32 *fh = vfh;
325891828a40SDavid M. Grimes 	__u64 inum = fh[2];
325991828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
326091828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
326191828a40SDavid M. Grimes }
326291828a40SDavid M. Grimes 
326312ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
326412ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
326512ba780dSAmir Goldstein {
326612ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
326712ba780dSAmir Goldstein 
326812ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
326912ba780dSAmir Goldstein }
327012ba780dSAmir Goldstein 
327112ba780dSAmir Goldstein 
3272480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3273480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
327491828a40SDavid M. Grimes {
327591828a40SDavid M. Grimes 	struct inode *inode;
3276480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
327735c2a7f4SHugh Dickins 	u64 inum;
327891828a40SDavid M. Grimes 
3279480b116cSChristoph Hellwig 	if (fh_len < 3)
3280480b116cSChristoph Hellwig 		return NULL;
3281480b116cSChristoph Hellwig 
328235c2a7f4SHugh Dickins 	inum = fid->raw[2];
328335c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
328435c2a7f4SHugh Dickins 
3285480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3286480b116cSChristoph Hellwig 			shmem_match, fid->raw);
328791828a40SDavid M. Grimes 	if (inode) {
328812ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
328991828a40SDavid M. Grimes 		iput(inode);
329091828a40SDavid M. Grimes 	}
329191828a40SDavid M. Grimes 
3292480b116cSChristoph Hellwig 	return dentry;
329391828a40SDavid M. Grimes }
329491828a40SDavid M. Grimes 
3295b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3296b0b0382bSAl Viro 				struct inode *parent)
329791828a40SDavid M. Grimes {
32985fe0c237SAneesh Kumar K.V 	if (*len < 3) {
32995fe0c237SAneesh Kumar K.V 		*len = 3;
330094e07a75SNamjae Jeon 		return FILEID_INVALID;
33015fe0c237SAneesh Kumar K.V 	}
330291828a40SDavid M. Grimes 
33031d3382cbSAl Viro 	if (inode_unhashed(inode)) {
330491828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
330591828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
330691828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
330791828a40SDavid M. Grimes 		 * to do it once
330891828a40SDavid M. Grimes 		 */
330991828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
331091828a40SDavid M. Grimes 		spin_lock(&lock);
33111d3382cbSAl Viro 		if (inode_unhashed(inode))
331291828a40SDavid M. Grimes 			__insert_inode_hash(inode,
331391828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
331491828a40SDavid M. Grimes 		spin_unlock(&lock);
331591828a40SDavid M. Grimes 	}
331691828a40SDavid M. Grimes 
331791828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
331891828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
331991828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
332091828a40SDavid M. Grimes 
332191828a40SDavid M. Grimes 	*len = 3;
332291828a40SDavid M. Grimes 	return 1;
332391828a40SDavid M. Grimes }
332491828a40SDavid M. Grimes 
332539655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
332691828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
332791828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3328480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
332991828a40SDavid M. Grimes };
333091828a40SDavid M. Grimes 
3331626c3920SAl Viro enum shmem_param {
3332626c3920SAl Viro 	Opt_gid,
3333626c3920SAl Viro 	Opt_huge,
3334626c3920SAl Viro 	Opt_mode,
3335626c3920SAl Viro 	Opt_mpol,
3336626c3920SAl Viro 	Opt_nr_blocks,
3337626c3920SAl Viro 	Opt_nr_inodes,
3338626c3920SAl Viro 	Opt_size,
3339626c3920SAl Viro 	Opt_uid,
3340ea3271f7SChris Down 	Opt_inode32,
3341ea3271f7SChris Down 	Opt_inode64,
3342626c3920SAl Viro };
33431da177e4SLinus Torvalds 
33445eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = {
33452710c957SAl Viro 	{"never",	SHMEM_HUGE_NEVER },
33462710c957SAl Viro 	{"always",	SHMEM_HUGE_ALWAYS },
33472710c957SAl Viro 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
33482710c957SAl Viro 	{"advise",	SHMEM_HUGE_ADVISE },
33492710c957SAl Viro 	{}
33502710c957SAl Viro };
33512710c957SAl Viro 
3352d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = {
3353626c3920SAl Viro 	fsparam_u32   ("gid",		Opt_gid),
33542710c957SAl Viro 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3355626c3920SAl Viro 	fsparam_u32oct("mode",		Opt_mode),
3356626c3920SAl Viro 	fsparam_string("mpol",		Opt_mpol),
3357626c3920SAl Viro 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3358626c3920SAl Viro 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3359626c3920SAl Viro 	fsparam_string("size",		Opt_size),
3360626c3920SAl Viro 	fsparam_u32   ("uid",		Opt_uid),
3361ea3271f7SChris Down 	fsparam_flag  ("inode32",	Opt_inode32),
3362ea3271f7SChris Down 	fsparam_flag  ("inode64",	Opt_inode64),
3363626c3920SAl Viro 	{}
3364626c3920SAl Viro };
3365626c3920SAl Viro 
3366f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3367626c3920SAl Viro {
3368f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3369626c3920SAl Viro 	struct fs_parse_result result;
3370e04dc423SAl Viro 	unsigned long long size;
3371626c3920SAl Viro 	char *rest;
3372626c3920SAl Viro 	int opt;
3373626c3920SAl Viro 
3374d7167b14SAl Viro 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3375f3235626SDavid Howells 	if (opt < 0)
3376626c3920SAl Viro 		return opt;
3377626c3920SAl Viro 
3378626c3920SAl Viro 	switch (opt) {
3379626c3920SAl Viro 	case Opt_size:
3380626c3920SAl Viro 		size = memparse(param->string, &rest);
3381e04dc423SAl Viro 		if (*rest == '%') {
3382e04dc423SAl Viro 			size <<= PAGE_SHIFT;
3383e04dc423SAl Viro 			size *= totalram_pages();
3384e04dc423SAl Viro 			do_div(size, 100);
3385e04dc423SAl Viro 			rest++;
3386e04dc423SAl Viro 		}
3387e04dc423SAl Viro 		if (*rest)
3388626c3920SAl Viro 			goto bad_value;
3389e04dc423SAl Viro 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3390e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3391626c3920SAl Viro 		break;
3392626c3920SAl Viro 	case Opt_nr_blocks:
3393626c3920SAl Viro 		ctx->blocks = memparse(param->string, &rest);
33940c98c8e1SZhaoLong Wang 		if (*rest || ctx->blocks > S64_MAX)
3395626c3920SAl Viro 			goto bad_value;
3396e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3397626c3920SAl Viro 		break;
3398626c3920SAl Viro 	case Opt_nr_inodes:
3399626c3920SAl Viro 		ctx->inodes = memparse(param->string, &rest);
3400e04dc423SAl Viro 		if (*rest)
3401626c3920SAl Viro 			goto bad_value;
3402e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_INODES;
3403626c3920SAl Viro 		break;
3404626c3920SAl Viro 	case Opt_mode:
3405626c3920SAl Viro 		ctx->mode = result.uint_32 & 07777;
3406626c3920SAl Viro 		break;
3407626c3920SAl Viro 	case Opt_uid:
3408626c3920SAl Viro 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3409e04dc423SAl Viro 		if (!uid_valid(ctx->uid))
3410626c3920SAl Viro 			goto bad_value;
3411626c3920SAl Viro 		break;
3412626c3920SAl Viro 	case Opt_gid:
3413626c3920SAl Viro 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3414e04dc423SAl Viro 		if (!gid_valid(ctx->gid))
3415626c3920SAl Viro 			goto bad_value;
3416626c3920SAl Viro 		break;
3417626c3920SAl Viro 	case Opt_huge:
3418626c3920SAl Viro 		ctx->huge = result.uint_32;
3419626c3920SAl Viro 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3420396bcc52SMatthew Wilcox (Oracle) 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3421626c3920SAl Viro 		      has_transparent_hugepage()))
3422626c3920SAl Viro 			goto unsupported_parameter;
3423e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_HUGE;
3424626c3920SAl Viro 		break;
3425626c3920SAl Viro 	case Opt_mpol:
3426626c3920SAl Viro 		if (IS_ENABLED(CONFIG_NUMA)) {
3427e04dc423SAl Viro 			mpol_put(ctx->mpol);
3428e04dc423SAl Viro 			ctx->mpol = NULL;
3429626c3920SAl Viro 			if (mpol_parse_str(param->string, &ctx->mpol))
3430626c3920SAl Viro 				goto bad_value;
3431626c3920SAl Viro 			break;
3432626c3920SAl Viro 		}
3433626c3920SAl Viro 		goto unsupported_parameter;
3434ea3271f7SChris Down 	case Opt_inode32:
3435ea3271f7SChris Down 		ctx->full_inums = false;
3436ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3437ea3271f7SChris Down 		break;
3438ea3271f7SChris Down 	case Opt_inode64:
3439ea3271f7SChris Down 		if (sizeof(ino_t) < 8) {
3440ea3271f7SChris Down 			return invalfc(fc,
3441ea3271f7SChris Down 				       "Cannot use inode64 with <64bit inums in kernel\n");
3442ea3271f7SChris Down 		}
3443ea3271f7SChris Down 		ctx->full_inums = true;
3444ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3445ea3271f7SChris Down 		break;
3446e04dc423SAl Viro 	}
3447e04dc423SAl Viro 	return 0;
3448e04dc423SAl Viro 
3449626c3920SAl Viro unsupported_parameter:
3450f35aa2bcSAl Viro 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3451626c3920SAl Viro bad_value:
3452f35aa2bcSAl Viro 	return invalfc(fc, "Bad value for '%s'", param->key);
3453e04dc423SAl Viro }
3454e04dc423SAl Viro 
3455f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data)
3456e04dc423SAl Viro {
3457f3235626SDavid Howells 	char *options = data;
3458f3235626SDavid Howells 
345933f37c64SAl Viro 	if (options) {
346033f37c64SAl Viro 		int err = security_sb_eat_lsm_opts(options, &fc->security);
346133f37c64SAl Viro 		if (err)
346233f37c64SAl Viro 			return err;
346333f37c64SAl Viro 	}
346433f37c64SAl Viro 
3465b00dc3adSHugh Dickins 	while (options != NULL) {
3466626c3920SAl Viro 		char *this_char = options;
3467b00dc3adSHugh Dickins 		for (;;) {
3468b00dc3adSHugh Dickins 			/*
3469b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3470b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3471b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3472b00dc3adSHugh Dickins 			 */
3473b00dc3adSHugh Dickins 			options = strchr(options, ',');
3474b00dc3adSHugh Dickins 			if (options == NULL)
3475b00dc3adSHugh Dickins 				break;
3476b00dc3adSHugh Dickins 			options++;
3477b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3478b00dc3adSHugh Dickins 				options[-1] = '\0';
3479b00dc3adSHugh Dickins 				break;
3480b00dc3adSHugh Dickins 			}
3481b00dc3adSHugh Dickins 		}
3482626c3920SAl Viro 		if (*this_char) {
3483626c3920SAl Viro 			char *value = strchr(this_char, '=');
3484f3235626SDavid Howells 			size_t len = 0;
3485626c3920SAl Viro 			int err;
3486626c3920SAl Viro 
3487626c3920SAl Viro 			if (value) {
3488626c3920SAl Viro 				*value++ = '\0';
3489f3235626SDavid Howells 				len = strlen(value);
34901da177e4SLinus Torvalds 			}
3491f3235626SDavid Howells 			err = vfs_parse_fs_string(fc, this_char, value, len);
3492f3235626SDavid Howells 			if (err < 0)
3493f3235626SDavid Howells 				return err;
34941da177e4SLinus Torvalds 		}
3495626c3920SAl Viro 	}
34961da177e4SLinus Torvalds 	return 0;
34971da177e4SLinus Torvalds }
34981da177e4SLinus Torvalds 
3499f3235626SDavid Howells /*
3500f3235626SDavid Howells  * Reconfigure a shmem filesystem.
3501f3235626SDavid Howells  *
3502f3235626SDavid Howells  * Note that we disallow change from limited->unlimited blocks/inodes while any
3503f3235626SDavid Howells  * are in use; but we must separately disallow unlimited->limited, because in
3504f3235626SDavid Howells  * that case we have no record of how much is already in use.
3505f3235626SDavid Howells  */
3506f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc)
35071da177e4SLinus Torvalds {
3508f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3509f3235626SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
35100edd73b3SHugh Dickins 	unsigned long inodes;
3511bf11b9a8SSebastian Andrzej Siewior 	struct mempolicy *mpol = NULL;
3512f3235626SDavid Howells 	const char *err;
35130edd73b3SHugh Dickins 
3514bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock(&sbinfo->stat_lock);
35150edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
35160c98c8e1SZhaoLong Wang 
3517f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3518f3235626SDavid Howells 		if (!sbinfo->max_blocks) {
3519f3235626SDavid Howells 			err = "Cannot retroactively limit size";
35200edd73b3SHugh Dickins 			goto out;
35210b5071ddSAl Viro 		}
3522f3235626SDavid Howells 		if (percpu_counter_compare(&sbinfo->used_blocks,
3523f3235626SDavid Howells 					   ctx->blocks) > 0) {
3524f3235626SDavid Howells 			err = "Too small a size for current use";
35250b5071ddSAl Viro 			goto out;
3526f3235626SDavid Howells 		}
3527f3235626SDavid Howells 	}
3528f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3529f3235626SDavid Howells 		if (!sbinfo->max_inodes) {
3530f3235626SDavid Howells 			err = "Cannot retroactively limit inodes";
35310b5071ddSAl Viro 			goto out;
35320b5071ddSAl Viro 		}
3533f3235626SDavid Howells 		if (ctx->inodes < inodes) {
3534f3235626SDavid Howells 			err = "Too few inodes for current use";
3535f3235626SDavid Howells 			goto out;
3536f3235626SDavid Howells 		}
3537f3235626SDavid Howells 	}
35380edd73b3SHugh Dickins 
3539ea3271f7SChris Down 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3540ea3271f7SChris Down 	    sbinfo->next_ino > UINT_MAX) {
3541ea3271f7SChris Down 		err = "Current inum too high to switch to 32-bit inums";
3542ea3271f7SChris Down 		goto out;
3543ea3271f7SChris Down 	}
3544ea3271f7SChris Down 
3545f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_HUGE)
3546f3235626SDavid Howells 		sbinfo->huge = ctx->huge;
3547ea3271f7SChris Down 	if (ctx->seen & SHMEM_SEEN_INUMS)
3548ea3271f7SChris Down 		sbinfo->full_inums = ctx->full_inums;
3549f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3550f3235626SDavid Howells 		sbinfo->max_blocks  = ctx->blocks;
3551f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_INODES) {
3552f3235626SDavid Howells 		sbinfo->max_inodes  = ctx->inodes;
3553f3235626SDavid Howells 		sbinfo->free_inodes = ctx->inodes - inodes;
35540b5071ddSAl Viro 	}
355571fe804bSLee Schermerhorn 
35565f00110fSGreg Thelen 	/*
35575f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
35585f00110fSGreg Thelen 	 */
3559f3235626SDavid Howells 	if (ctx->mpol) {
3560bf11b9a8SSebastian Andrzej Siewior 		mpol = sbinfo->mpol;
3561f3235626SDavid Howells 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3562f3235626SDavid Howells 		ctx->mpol = NULL;
35635f00110fSGreg Thelen 	}
3564bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3565bf11b9a8SSebastian Andrzej Siewior 	mpol_put(mpol);
3566f3235626SDavid Howells 	return 0;
35670edd73b3SHugh Dickins out:
3568bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3569f35aa2bcSAl Viro 	return invalfc(fc, "%s", err);
35701da177e4SLinus Torvalds }
3571680d794bSakpm@linux-foundation.org 
357234c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3573680d794bSakpm@linux-foundation.org {
357434c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3575680d794bSakpm@linux-foundation.org 
3576680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3577680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
357809cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3579680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3580680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
35810825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
358209208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
35838751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
35848751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
35858751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
35868751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
35878751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
35888751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3589ea3271f7SChris Down 
3590ea3271f7SChris Down 	/*
3591ea3271f7SChris Down 	 * Showing inode{64,32} might be useful even if it's the system default,
3592ea3271f7SChris Down 	 * since then people don't have to resort to checking both here and
3593ea3271f7SChris Down 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
3594ea3271f7SChris Down 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3595ea3271f7SChris Down 	 *
3596ea3271f7SChris Down 	 * We hide it when inode64 isn't the default and we are using 32-bit
3597ea3271f7SChris Down 	 * inodes, since that probably just means the feature isn't even under
3598ea3271f7SChris Down 	 * consideration.
3599ea3271f7SChris Down 	 *
3600ea3271f7SChris Down 	 * As such:
3601ea3271f7SChris Down 	 *
3602ea3271f7SChris Down 	 *                     +-----------------+-----------------+
3603ea3271f7SChris Down 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3604ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3605ea3271f7SChris Down 	 *  | full_inums=true  | show            | show            |
3606ea3271f7SChris Down 	 *  | full_inums=false | show            | hide            |
3607ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3608ea3271f7SChris Down 	 *
3609ea3271f7SChris Down 	 */
3610ea3271f7SChris Down 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3611ea3271f7SChris Down 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3612396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
36135a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
36145a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
36155a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
36165a6e75f8SKirill A. Shutemov #endif
361771fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3618680d794bSakpm@linux-foundation.org 	return 0;
3619680d794bSakpm@linux-foundation.org }
36209183df25SDavid Herrmann 
3621680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
36221da177e4SLinus Torvalds 
36231da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
36241da177e4SLinus Torvalds {
3625602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3626602586a8SHugh Dickins 
3627e809d5f0SChris Down 	free_percpu(sbinfo->ino_batch);
3628602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
362949cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3630602586a8SHugh Dickins 	kfree(sbinfo);
36311da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
36321da177e4SLinus Torvalds }
36331da177e4SLinus Torvalds 
3634f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
36351da177e4SLinus Torvalds {
3636f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
36371da177e4SLinus Torvalds 	struct inode *inode;
36380edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3639680d794bSakpm@linux-foundation.org 
3640680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3641425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3642680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3643680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3644680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3645680d794bSakpm@linux-foundation.org 
3646680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
36471da177e4SLinus Torvalds 
36480edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
36491da177e4SLinus Torvalds 	/*
36501da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
36511da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
36521da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
36531da177e4SLinus Torvalds 	 */
36541751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3655f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3656f3235626SDavid Howells 			ctx->blocks = shmem_default_max_blocks();
3657f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3658f3235626SDavid Howells 			ctx->inodes = shmem_default_max_inodes();
3659ea3271f7SChris Down 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
3660ea3271f7SChris Down 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3661ca4e0519SAl Viro 	} else {
36621751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
36631da177e4SLinus Torvalds 	}
366491828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
36651751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOSEC;
36660edd73b3SHugh Dickins #else
36671751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
36680edd73b3SHugh Dickins #endif
3669f3235626SDavid Howells 	sbinfo->max_blocks = ctx->blocks;
3670f3235626SDavid Howells 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3671e809d5f0SChris Down 	if (sb->s_flags & SB_KERNMOUNT) {
3672e809d5f0SChris Down 		sbinfo->ino_batch = alloc_percpu(ino_t);
3673e809d5f0SChris Down 		if (!sbinfo->ino_batch)
3674e809d5f0SChris Down 			goto failed;
3675e809d5f0SChris Down 	}
3676f3235626SDavid Howells 	sbinfo->uid = ctx->uid;
3677f3235626SDavid Howells 	sbinfo->gid = ctx->gid;
3678ea3271f7SChris Down 	sbinfo->full_inums = ctx->full_inums;
3679f3235626SDavid Howells 	sbinfo->mode = ctx->mode;
3680f3235626SDavid Howells 	sbinfo->huge = ctx->huge;
3681f3235626SDavid Howells 	sbinfo->mpol = ctx->mpol;
3682f3235626SDavid Howells 	ctx->mpol = NULL;
36831da177e4SLinus Torvalds 
3684bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock_init(&sbinfo->stat_lock);
3685908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3686602586a8SHugh Dickins 		goto failed;
3687779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3688779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
36891da177e4SLinus Torvalds 
3690285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
369109cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
369209cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
36931da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
36941da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3695cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3696b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
369739f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3698b09e0fa4SEric Paris #endif
3699b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
37001751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
370139f0247dSAndreas Gruenbacher #endif
37022b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
37030edd73b3SHugh Dickins 
3704454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
37051da177e4SLinus Torvalds 	if (!inode)
37061da177e4SLinus Torvalds 		goto failed;
3707680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3708680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3709318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3710318ceed0SAl Viro 	if (!sb->s_root)
371148fde701SAl Viro 		goto failed;
37121da177e4SLinus Torvalds 	return 0;
37131da177e4SLinus Torvalds 
37141da177e4SLinus Torvalds failed:
37151da177e4SLinus Torvalds 	shmem_put_super(sb);
3716f2b346e4SMiaohe Lin 	return -ENOMEM;
37171da177e4SLinus Torvalds }
37181da177e4SLinus Torvalds 
3719f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc)
3720f3235626SDavid Howells {
3721f3235626SDavid Howells 	return get_tree_nodev(fc, shmem_fill_super);
3722f3235626SDavid Howells }
3723f3235626SDavid Howells 
3724f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc)
3725f3235626SDavid Howells {
3726f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3727f3235626SDavid Howells 
3728f3235626SDavid Howells 	if (ctx) {
3729f3235626SDavid Howells 		mpol_put(ctx->mpol);
3730f3235626SDavid Howells 		kfree(ctx);
3731f3235626SDavid Howells 	}
3732f3235626SDavid Howells }
3733f3235626SDavid Howells 
3734f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = {
3735f3235626SDavid Howells 	.free			= shmem_free_fc,
3736f3235626SDavid Howells 	.get_tree		= shmem_get_tree,
3737f3235626SDavid Howells #ifdef CONFIG_TMPFS
3738f3235626SDavid Howells 	.parse_monolithic	= shmem_parse_options,
3739f3235626SDavid Howells 	.parse_param		= shmem_parse_one,
3740f3235626SDavid Howells 	.reconfigure		= shmem_reconfigure,
3741f3235626SDavid Howells #endif
3742f3235626SDavid Howells };
3743f3235626SDavid Howells 
3744fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
37451da177e4SLinus Torvalds 
37461da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
37471da177e4SLinus Torvalds {
374841ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
3749fd60b288SMuchun Song 	info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
375041ffe5d5SHugh Dickins 	if (!info)
37511da177e4SLinus Torvalds 		return NULL;
375241ffe5d5SHugh Dickins 	return &info->vfs_inode;
37531da177e4SLinus Torvalds }
37541da177e4SLinus Torvalds 
375574b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode)
3756fa0d7e3dSNick Piggin {
375784e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
37583ed47db3SAl Viro 		kfree(inode->i_link);
3759fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3760fa0d7e3dSNick Piggin }
3761fa0d7e3dSNick Piggin 
37621da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
37631da177e4SLinus Torvalds {
376409208d15SAl Viro 	if (S_ISREG(inode->i_mode))
37651da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
37661da177e4SLinus Torvalds }
37671da177e4SLinus Torvalds 
376841ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
37691da177e4SLinus Torvalds {
377041ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
377141ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
37721da177e4SLinus Torvalds }
37731da177e4SLinus Torvalds 
37749a8ec03eSweiping zhang static void shmem_init_inodecache(void)
37751da177e4SLinus Torvalds {
37761da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
37771da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
37785d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
37791da177e4SLinus Torvalds }
37801da177e4SLinus Torvalds 
378141ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
37821da177e4SLinus Torvalds {
37831a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
37841da177e4SLinus Torvalds }
37851da177e4SLinus Torvalds 
3786a7605426SYang Shi /* Keep the page in page cache instead of truncating it */
3787a7605426SYang Shi static int shmem_error_remove_page(struct address_space *mapping,
3788a7605426SYang Shi 				   struct page *page)
3789a7605426SYang Shi {
3790a7605426SYang Shi 	return 0;
3791a7605426SYang Shi }
3792a7605426SYang Shi 
379330e6a51dSHui Su const struct address_space_operations shmem_aops = {
37941da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
379546de8b97SMatthew Wilcox (Oracle) 	.dirty_folio	= noop_dirty_folio,
37961da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3797800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3798800d15a5SNick Piggin 	.write_end	= shmem_write_end,
37991da177e4SLinus Torvalds #endif
38001c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
380154184650SMatthew Wilcox (Oracle) 	.migrate_folio	= migrate_folio,
38021c93923cSAndrew Morton #endif
3803a7605426SYang Shi 	.error_remove_page = shmem_error_remove_page,
38041da177e4SLinus Torvalds };
380530e6a51dSHui Su EXPORT_SYMBOL(shmem_aops);
38061da177e4SLinus Torvalds 
380715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
38081da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3809c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
38101da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3811220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
38122ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
38138174202bSAl Viro 	.write_iter	= generic_file_write_iter,
38141b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
381582c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3816f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
381783e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
38181da177e4SLinus Torvalds #endif
38191da177e4SLinus Torvalds };
38201da177e4SLinus Torvalds 
382192e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
382244a30220SYu Zhao 	.getattr	= shmem_getattr,
382394c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3824b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3825b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3826feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3827b09e0fa4SEric Paris #endif
38281da177e4SLinus Torvalds };
38291da177e4SLinus Torvalds 
383092e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
38311da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3832f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
38331da177e4SLinus Torvalds 	.create		= shmem_create,
38341da177e4SLinus Torvalds 	.lookup		= simple_lookup,
38351da177e4SLinus Torvalds 	.link		= shmem_link,
38361da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
38371da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
38381da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
38391da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
38401da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
38412773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
384260545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
38431da177e4SLinus Torvalds #endif
3844b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3845b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3846b09e0fa4SEric Paris #endif
384739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
384894c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3849feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
385039f0247dSAndreas Gruenbacher #endif
385139f0247dSAndreas Gruenbacher };
385239f0247dSAndreas Gruenbacher 
385392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3854f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
3855b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3856b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3857b09e0fa4SEric Paris #endif
385839f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
385994c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3860feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
386139f0247dSAndreas Gruenbacher #endif
38621da177e4SLinus Torvalds };
38631da177e4SLinus Torvalds 
3864759b9775SHugh Dickins static const struct super_operations shmem_ops = {
38651da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
386674b1da56SAl Viro 	.free_inode	= shmem_free_in_core_inode,
38671da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
38681da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
38691da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
3870680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
38711da177e4SLinus Torvalds #endif
38721f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
38731da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
38741da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3875396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3876779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3877779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3878779750d2SKirill A. Shutemov #endif
38791da177e4SLinus Torvalds };
38801da177e4SLinus Torvalds 
3881f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
388254cb8821SNick Piggin 	.fault		= shmem_fault,
3883d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
38841da177e4SLinus Torvalds #ifdef CONFIG_NUMA
38851da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
38861da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
38871da177e4SLinus Torvalds #endif
38881da177e4SLinus Torvalds };
38891da177e4SLinus Torvalds 
3890f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc)
38911da177e4SLinus Torvalds {
3892f3235626SDavid Howells 	struct shmem_options *ctx;
3893f3235626SDavid Howells 
3894f3235626SDavid Howells 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3895f3235626SDavid Howells 	if (!ctx)
3896f3235626SDavid Howells 		return -ENOMEM;
3897f3235626SDavid Howells 
3898f3235626SDavid Howells 	ctx->mode = 0777 | S_ISVTX;
3899f3235626SDavid Howells 	ctx->uid = current_fsuid();
3900f3235626SDavid Howells 	ctx->gid = current_fsgid();
3901f3235626SDavid Howells 
3902f3235626SDavid Howells 	fc->fs_private = ctx;
3903f3235626SDavid Howells 	fc->ops = &shmem_fs_context_ops;
3904f3235626SDavid Howells 	return 0;
39051da177e4SLinus Torvalds }
39061da177e4SLinus Torvalds 
390741ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
39081da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
39091da177e4SLinus Torvalds 	.name		= "tmpfs",
3910f3235626SDavid Howells 	.init_fs_context = shmem_init_fs_context,
3911f3235626SDavid Howells #ifdef CONFIG_TMPFS
3912d7167b14SAl Viro 	.parameters	= shmem_fs_parameters,
3913f3235626SDavid Howells #endif
39141da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
3915ff36da69SMatthew Wilcox (Oracle) 	.fs_flags	= FS_USERNS_MOUNT,
39161da177e4SLinus Torvalds };
39171da177e4SLinus Torvalds 
39189096bbe9SMiaohe Lin void __init shmem_init(void)
39191da177e4SLinus Torvalds {
39201da177e4SLinus Torvalds 	int error;
39211da177e4SLinus Torvalds 
39229a8ec03eSweiping zhang 	shmem_init_inodecache();
39231da177e4SLinus Torvalds 
392441ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
39251da177e4SLinus Torvalds 	if (error) {
39261170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
39271da177e4SLinus Torvalds 		goto out2;
39281da177e4SLinus Torvalds 	}
392995dc112aSGreg Kroah-Hartman 
3930ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
39311da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
39321da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
39331170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
39341da177e4SLinus Torvalds 		goto out1;
39351da177e4SLinus Torvalds 	}
39365a6e75f8SKirill A. Shutemov 
3937396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3938435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
39395a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
39405a6e75f8SKirill A. Shutemov 	else
39415e6e5a12SHugh Dickins 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
39425a6e75f8SKirill A. Shutemov #endif
39439096bbe9SMiaohe Lin 	return;
39441da177e4SLinus Torvalds 
39451da177e4SLinus Torvalds out1:
394641ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
39471da177e4SLinus Torvalds out2:
394841ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
39491da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
39501da177e4SLinus Torvalds }
3951853ac43aSMatt Mackall 
3952396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
39535a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
39545a6e75f8SKirill A. Shutemov 				  struct kobj_attribute *attr, char *buf)
39555a6e75f8SKirill A. Shutemov {
395626083eb6SColin Ian King 	static const int values[] = {
39575a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
39585a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
39595a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
39605a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
39615a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
39625a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
39635a6e75f8SKirill A. Shutemov 	};
396479d4d38aSJoe Perches 	int len = 0;
396579d4d38aSJoe Perches 	int i;
39665a6e75f8SKirill A. Shutemov 
396779d4d38aSJoe Perches 	for (i = 0; i < ARRAY_SIZE(values); i++) {
396879d4d38aSJoe Perches 		len += sysfs_emit_at(buf, len,
396979d4d38aSJoe Perches 				     shmem_huge == values[i] ? "%s[%s]" : "%s%s",
397079d4d38aSJoe Perches 				     i ? " " : "",
39715a6e75f8SKirill A. Shutemov 				     shmem_format_huge(values[i]));
39725a6e75f8SKirill A. Shutemov 	}
397379d4d38aSJoe Perches 
397479d4d38aSJoe Perches 	len += sysfs_emit_at(buf, len, "\n");
397579d4d38aSJoe Perches 
397679d4d38aSJoe Perches 	return len;
39775a6e75f8SKirill A. Shutemov }
39785a6e75f8SKirill A. Shutemov 
39795a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
39805a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
39815a6e75f8SKirill A. Shutemov {
39825a6e75f8SKirill A. Shutemov 	char tmp[16];
39835a6e75f8SKirill A. Shutemov 	int huge;
39845a6e75f8SKirill A. Shutemov 
39855a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
39865a6e75f8SKirill A. Shutemov 		return -EINVAL;
39875a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
39885a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
39895a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
39905a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
39915a6e75f8SKirill A. Shutemov 
39925a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
39935a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
39945a6e75f8SKirill A. Shutemov 		return -EINVAL;
39955a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
39965a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
39975a6e75f8SKirill A. Shutemov 		return -EINVAL;
39985a6e75f8SKirill A. Shutemov 
39995a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
4000435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
40015a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
40025a6e75f8SKirill A. Shutemov 	return count;
40035a6e75f8SKirill A. Shutemov }
40045a6e75f8SKirill A. Shutemov 
40054bfa8adaSMiaohe Lin struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
4006396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4007f3f0e1d2SKirill A. Shutemov 
4008853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
4009853ac43aSMatt Mackall 
4010853ac43aSMatt Mackall /*
4011853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4012853ac43aSMatt Mackall  *
4013853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
4014853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
4015853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
4016853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
4017853ac43aSMatt Mackall  */
4018853ac43aSMatt Mackall 
401941ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
4020853ac43aSMatt Mackall 	.name		= "tmpfs",
4021f3235626SDavid Howells 	.init_fs_context = ramfs_init_fs_context,
4022d7167b14SAl Viro 	.parameters	= ramfs_fs_parameters,
4023853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
40242b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
4025853ac43aSMatt Mackall };
4026853ac43aSMatt Mackall 
40279096bbe9SMiaohe Lin void __init shmem_init(void)
4028853ac43aSMatt Mackall {
402941ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4030853ac43aSMatt Mackall 
403141ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
4032853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
4033853ac43aSMatt Mackall }
4034853ac43aSMatt Mackall 
403510a9c496SChristoph Hellwig int shmem_unuse(unsigned int type)
4036853ac43aSMatt Mackall {
4037853ac43aSMatt Mackall 	return 0;
4038853ac43aSMatt Mackall }
4039853ac43aSMatt Mackall 
4040d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
40413f96b79aSHugh Dickins {
40423f96b79aSHugh Dickins 	return 0;
40433f96b79aSHugh Dickins }
40443f96b79aSHugh Dickins 
404524513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
404624513264SHugh Dickins {
404724513264SHugh Dickins }
404824513264SHugh Dickins 
4049c01d5b30SHugh Dickins #ifdef CONFIG_MMU
4050c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4051c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
4052c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
4053c01d5b30SHugh Dickins {
4054c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4055c01d5b30SHugh Dickins }
4056c01d5b30SHugh Dickins #endif
4057c01d5b30SHugh Dickins 
405841ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
405994c1e62dSHugh Dickins {
406041ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
406194c1e62dSHugh Dickins }
406294c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
406394c1e62dSHugh Dickins 
4064853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
40650b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
4066454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
40670b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
40680b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
4069853ac43aSMatt Mackall 
4070853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4071853ac43aSMatt Mackall 
4072853ac43aSMatt Mackall /* common code */
40731da177e4SLinus Torvalds 
4074703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4075c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
40761da177e4SLinus Torvalds {
40771da177e4SLinus Torvalds 	struct inode *inode;
407893dec2daSAl Viro 	struct file *res;
40791da177e4SLinus Torvalds 
4080703321b6SMatthew Auld 	if (IS_ERR(mnt))
4081703321b6SMatthew Auld 		return ERR_CAST(mnt);
40821da177e4SLinus Torvalds 
4083285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
40841da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
40851da177e4SLinus Torvalds 
40861da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
40871da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
40881da177e4SLinus Torvalds 
408993dec2daSAl Viro 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
409093dec2daSAl Viro 				flags);
4091dac2d1f6SAl Viro 	if (unlikely(!inode)) {
4092dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
4093dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
4094dac2d1f6SAl Viro 	}
4095c7277090SEric Paris 	inode->i_flags |= i_flags;
40961da177e4SLinus Torvalds 	inode->i_size = size;
40976d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
409826567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
409993dec2daSAl Viro 	if (!IS_ERR(res))
410093dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
41014b42af81SAl Viro 				&shmem_file_operations);
41026b4d0b27SAl Viro 	if (IS_ERR(res))
410393dec2daSAl Viro 		iput(inode);
41046b4d0b27SAl Viro 	return res;
41051da177e4SLinus Torvalds }
4106c7277090SEric Paris 
4107c7277090SEric Paris /**
4108c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4109c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
4110c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
4111e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
4112e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
4113c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4114c7277090SEric Paris  * @size: size to be set for the file
4115c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4116c7277090SEric Paris  */
4117c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4118c7277090SEric Paris {
4119703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4120c7277090SEric Paris }
4121c7277090SEric Paris 
4122c7277090SEric Paris /**
4123c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4124c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4125c7277090SEric Paris  * @size: size to be set for the file
4126c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4127c7277090SEric Paris  */
4128c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4129c7277090SEric Paris {
4130703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4131c7277090SEric Paris }
4132395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
41331da177e4SLinus Torvalds 
413446711810SRandy Dunlap /**
4135703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4136703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
4137703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
4138703321b6SMatthew Auld  * @size: size to be set for the file
4139703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4140703321b6SMatthew Auld  */
4141703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4142703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
4143703321b6SMatthew Auld {
4144703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
4145703321b6SMatthew Auld }
4146703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4147703321b6SMatthew Auld 
4148703321b6SMatthew Auld /**
41491da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
415045e55300SPeter Collingbourne  * @vma: the vma to be mmapped is prepared by do_mmap
41511da177e4SLinus Torvalds  */
41521da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
41531da177e4SLinus Torvalds {
41541da177e4SLinus Torvalds 	struct file *file;
41551da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
41561da177e4SLinus Torvalds 
415766fc1303SHugh Dickins 	/*
4158c1e8d7c6SMichel Lespinasse 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
415966fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
416066fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
416166fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
416266fc1303SHugh Dickins 	 */
4163703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
41641da177e4SLinus Torvalds 	if (IS_ERR(file))
41651da177e4SLinus Torvalds 		return PTR_ERR(file);
41661da177e4SLinus Torvalds 
41671da177e4SLinus Torvalds 	if (vma->vm_file)
41681da177e4SLinus Torvalds 		fput(vma->vm_file);
41691da177e4SLinus Torvalds 	vma->vm_file = file;
41701da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
4171f3f0e1d2SKirill A. Shutemov 
41721da177e4SLinus Torvalds 	return 0;
41731da177e4SLinus Torvalds }
4174d9d90e5eSHugh Dickins 
4175d9d90e5eSHugh Dickins /**
4176d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4177d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4178d9d90e5eSHugh Dickins  * @index:	the page index
4179d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4180d9d90e5eSHugh Dickins  *
4181d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4182d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
41837e0a1265SMatthew Wilcox (Oracle)  * But read_cache_page_gfp() uses the ->read_folio() method: which does not
4184d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4185d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4186d9d90e5eSHugh Dickins  *
418768da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
418868da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4189d9d90e5eSHugh Dickins  */
4190d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4191d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4192d9d90e5eSHugh Dickins {
419368da9f05SHugh Dickins #ifdef CONFIG_SHMEM
419468da9f05SHugh Dickins 	struct inode *inode = mapping->host;
41959276aad6SHugh Dickins 	struct page *page;
419668da9f05SHugh Dickins 	int error;
419768da9f05SHugh Dickins 
419830e6a51dSHui Su 	BUG_ON(!shmem_mapping(mapping));
41999e18eb29SAndres Lagar-Cavilla 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4200cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
420168da9f05SHugh Dickins 	if (error)
4202a7605426SYang Shi 		return ERR_PTR(error);
4203a7605426SYang Shi 
420468da9f05SHugh Dickins 	unlock_page(page);
4205a7605426SYang Shi 	if (PageHWPoison(page)) {
4206a7605426SYang Shi 		put_page(page);
4207a7605426SYang Shi 		return ERR_PTR(-EIO);
4208a7605426SYang Shi 	}
4209a7605426SYang Shi 
421068da9f05SHugh Dickins 	return page;
421168da9f05SHugh Dickins #else
421268da9f05SHugh Dickins 	/*
421368da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
421468da9f05SHugh Dickins 	 */
4215d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
421668da9f05SHugh Dickins #endif
4217d9d90e5eSHugh Dickins }
4218d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4219