xref: /openbmc/linux/mm/shmem.c (revision 923e2f0e7c30db5c1ee5d680050ab781e6c114fb)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31e408e695STheodore Ts'o #include <linux/fileattr.h>
32853ac43aSMatt Mackall #include <linux/mm.h>
3346c9a946SArnd Bergmann #include <linux/random.h>
34174cd4b1SIngo Molnar #include <linux/sched/signal.h>
35b95f1b31SPaul Gortmaker #include <linux/export.h>
36853ac43aSMatt Mackall #include <linux/swap.h>
37e2e40f2cSChristoph Hellwig #include <linux/uio.h>
38749df87bSMike Kravetz #include <linux/hugetlb.h>
39626c3920SAl Viro #include <linux/fs_parser.h>
4086a2f3f2SMiaohe Lin #include <linux/swapfile.h>
41014bb1deSNeilBrown #include "swap.h"
4295cc09d6SAndrea Arcangeli 
43853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
44853ac43aSMatt Mackall 
45853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
461da177e4SLinus Torvalds /*
471da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
481da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
491da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
501da177e4SLinus Torvalds  */
511da177e4SLinus Torvalds 
5239f0247dSAndreas Gruenbacher #include <linux/xattr.h>
53a5694255SChristoph Hellwig #include <linux/exportfs.h>
541c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
55feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
561da177e4SLinus Torvalds #include <linux/mman.h>
571da177e4SLinus Torvalds #include <linux/string.h>
581da177e4SLinus Torvalds #include <linux/slab.h>
591da177e4SLinus Torvalds #include <linux/backing-dev.h>
601da177e4SLinus Torvalds #include <linux/shmem_fs.h>
611da177e4SLinus Torvalds #include <linux/writeback.h>
62bda97eabSHugh Dickins #include <linux/pagevec.h>
6341ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6483e4fa9cSHugh Dickins #include <linux/falloc.h>
65708e3508SHugh Dickins #include <linux/splice.h>
661da177e4SLinus Torvalds #include <linux/security.h>
671da177e4SLinus Torvalds #include <linux/swapops.h>
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/namei.h>
70b00dc3adSHugh Dickins #include <linux/ctype.h>
71304dbdb7SLee Schermerhorn #include <linux/migrate.h>
72c1f60a5aSChristoph Lameter #include <linux/highmem.h>
73680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7492562927SMimi Zohar #include <linux/magic.h>
759183df25SDavid Herrmann #include <linux/syscalls.h>
7640e041a2SDavid Herrmann #include <linux/fcntl.h>
779183df25SDavid Herrmann #include <uapi/linux/memfd.h>
78cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
794c27fe4cSMike Rapoport #include <linux/rmap.h>
802b4db796SAmir Goldstein #include <linux/uuid.h>
81304dbdb7SLee Schermerhorn 
827c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
831da177e4SLinus Torvalds 
84dd56b046SMel Gorman #include "internal.h"
85dd56b046SMel Gorman 
8609cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8709cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
901da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
911da177e4SLinus Torvalds 
9269f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9369f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9469f07ec9SHugh Dickins 
951aac1400SHugh Dickins /*
96f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
979608703eSJan Kara  * inode->i_private (with i_rwsem making sure that it has only one user at
98f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
991aac1400SHugh Dickins  */
1001aac1400SHugh Dickins struct shmem_falloc {
1018e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1021aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1031aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1041aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1051aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1061aac1400SHugh Dickins };
1071aac1400SHugh Dickins 
1080b5071ddSAl Viro struct shmem_options {
1090b5071ddSAl Viro 	unsigned long long blocks;
1100b5071ddSAl Viro 	unsigned long long inodes;
1110b5071ddSAl Viro 	struct mempolicy *mpol;
1120b5071ddSAl Viro 	kuid_t uid;
1130b5071ddSAl Viro 	kgid_t gid;
1140b5071ddSAl Viro 	umode_t mode;
115ea3271f7SChris Down 	bool full_inums;
1160b5071ddSAl Viro 	int huge;
1170b5071ddSAl Viro 	int seen;
1180b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1
1190b5071ddSAl Viro #define SHMEM_SEEN_INODES 2
1200b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4
121ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8
1220b5071ddSAl Viro };
1230b5071ddSAl Viro 
124b76db735SAndrew Morton #ifdef CONFIG_TMPFS
125680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
126680d794bSakpm@linux-foundation.org {
127ca79b0c2SArun KS 	return totalram_pages() / 2;
128680d794bSakpm@linux-foundation.org }
129680d794bSakpm@linux-foundation.org 
130680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
131680d794bSakpm@linux-foundation.org {
132ca79b0c2SArun KS 	unsigned long nr_pages = totalram_pages();
133ca79b0c2SArun KS 
134ca79b0c2SArun KS 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
135680d794bSakpm@linux-foundation.org }
136b76db735SAndrew Morton #endif
137680d794bSakpm@linux-foundation.org 
138da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
139da08e9b7SMatthew Wilcox (Oracle) 			     struct folio **foliop, enum sgp_type sgp,
140c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
141c5bf121eSVineeth Remanan Pillai 			     vm_fault_t *fault_type);
1421da177e4SLinus Torvalds 
1431da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds 	return sb->s_fs_info;
1461da177e4SLinus Torvalds }
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds /*
1491da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1501da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1511da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1521da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1531da177e4SLinus Torvalds  */
1541da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1551da177e4SLinus Torvalds {
1560b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
157191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1581da177e4SLinus Torvalds }
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1611da177e4SLinus Torvalds {
1620b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1631da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1641da177e4SLinus Torvalds }
1651da177e4SLinus Torvalds 
16677142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
16777142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
16877142517SKonstantin Khlebnikov {
16977142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
17077142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
17177142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
17277142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
17377142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
17477142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
17577142517SKonstantin Khlebnikov 	}
17677142517SKonstantin Khlebnikov 	return 0;
17777142517SKonstantin Khlebnikov }
17877142517SKonstantin Khlebnikov 
1791da177e4SLinus Torvalds /*
1801da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
18175edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
182*923e2f0eSMatthew Wilcox (Oracle)  * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1831da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1841da177e4SLinus Torvalds  */
185800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
1861da177e4SLinus Torvalds {
187800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
188800d8c63SKirill A. Shutemov 		return 0;
189800d8c63SKirill A. Shutemov 
190800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
191800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
1921da177e4SLinus Torvalds }
1931da177e4SLinus Torvalds 
1941da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
1951da177e4SLinus Torvalds {
1960b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
19709cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
1981da177e4SLinus Torvalds }
1991da177e4SLinus Torvalds 
2000f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
2010f079694SMike Rapoport {
2020f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2030f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2040f079694SMike Rapoport 
2050f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
2060f079694SMike Rapoport 		return false;
2070f079694SMike Rapoport 
2080f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2090f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2100f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2110f079694SMike Rapoport 			goto unacct;
2120f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2130f079694SMike Rapoport 	}
2140f079694SMike Rapoport 
2150f079694SMike Rapoport 	return true;
2160f079694SMike Rapoport 
2170f079694SMike Rapoport unacct:
2180f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2190f079694SMike Rapoport 	return false;
2200f079694SMike Rapoport }
2210f079694SMike Rapoport 
2220f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2230f079694SMike Rapoport {
2240f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2250f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2260f079694SMike Rapoport 
2270f079694SMike Rapoport 	if (sbinfo->max_blocks)
2280f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2290f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2300f079694SMike Rapoport }
2310f079694SMike Rapoport 
232759b9775SHugh Dickins static const struct super_operations shmem_ops;
23330e6a51dSHui Su const struct address_space_operations shmem_aops;
23415ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
23592e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
23692e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
23792e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
238f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
239779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2401da177e4SLinus Torvalds 
241b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
242b0506e48SMike Rapoport {
243b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
244b0506e48SMike Rapoport }
245b0506e48SMike Rapoport 
2461da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
247cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2481da177e4SLinus Torvalds 
249e809d5f0SChris Down /*
250e809d5f0SChris Down  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
251e809d5f0SChris Down  * produces a novel ino for the newly allocated inode.
252e809d5f0SChris Down  *
253e809d5f0SChris Down  * It may also be called when making a hard link to permit the space needed by
254e809d5f0SChris Down  * each dentry. However, in that case, no new inode number is needed since that
255e809d5f0SChris Down  * internally draws from another pool of inode numbers (currently global
256e809d5f0SChris Down  * get_next_ino()). This case is indicated by passing NULL as inop.
257e809d5f0SChris Down  */
258e809d5f0SChris Down #define SHMEM_INO_BATCH 1024
259e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
2605b04c689SPavel Emelyanov {
2615b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
262e809d5f0SChris Down 	ino_t ino;
263e809d5f0SChris Down 
264e809d5f0SChris Down 	if (!(sb->s_flags & SB_KERNMOUNT)) {
265bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
266bb3e96d6SByron Stanoszek 		if (sbinfo->max_inodes) {
2675b04c689SPavel Emelyanov 			if (!sbinfo->free_inodes) {
268bf11b9a8SSebastian Andrzej Siewior 				raw_spin_unlock(&sbinfo->stat_lock);
2695b04c689SPavel Emelyanov 				return -ENOSPC;
2705b04c689SPavel Emelyanov 			}
2715b04c689SPavel Emelyanov 			sbinfo->free_inodes--;
272bb3e96d6SByron Stanoszek 		}
273e809d5f0SChris Down 		if (inop) {
274e809d5f0SChris Down 			ino = sbinfo->next_ino++;
275e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
276e809d5f0SChris Down 				ino = sbinfo->next_ino++;
277ea3271f7SChris Down 			if (unlikely(!sbinfo->full_inums &&
278ea3271f7SChris Down 				     ino > UINT_MAX)) {
279e809d5f0SChris Down 				/*
280e809d5f0SChris Down 				 * Emulate get_next_ino uint wraparound for
281e809d5f0SChris Down 				 * compatibility
282e809d5f0SChris Down 				 */
283ea3271f7SChris Down 				if (IS_ENABLED(CONFIG_64BIT))
284ea3271f7SChris Down 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
285ea3271f7SChris Down 						__func__, MINOR(sb->s_dev));
286ea3271f7SChris Down 				sbinfo->next_ino = 1;
287ea3271f7SChris Down 				ino = sbinfo->next_ino++;
2885b04c689SPavel Emelyanov 			}
289e809d5f0SChris Down 			*inop = ino;
290e809d5f0SChris Down 		}
291bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
292e809d5f0SChris Down 	} else if (inop) {
293e809d5f0SChris Down 		/*
294e809d5f0SChris Down 		 * __shmem_file_setup, one of our callers, is lock-free: it
295e809d5f0SChris Down 		 * doesn't hold stat_lock in shmem_reserve_inode since
296e809d5f0SChris Down 		 * max_inodes is always 0, and is called from potentially
297e809d5f0SChris Down 		 * unknown contexts. As such, use a per-cpu batched allocator
298e809d5f0SChris Down 		 * which doesn't require the per-sb stat_lock unless we are at
299e809d5f0SChris Down 		 * the batch boundary.
300ea3271f7SChris Down 		 *
301ea3271f7SChris Down 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
302ea3271f7SChris Down 		 * shmem mounts are not exposed to userspace, so we don't need
303ea3271f7SChris Down 		 * to worry about things like glibc compatibility.
304e809d5f0SChris Down 		 */
305e809d5f0SChris Down 		ino_t *next_ino;
306bf11b9a8SSebastian Andrzej Siewior 
307e809d5f0SChris Down 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
308e809d5f0SChris Down 		ino = *next_ino;
309e809d5f0SChris Down 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
310bf11b9a8SSebastian Andrzej Siewior 			raw_spin_lock(&sbinfo->stat_lock);
311e809d5f0SChris Down 			ino = sbinfo->next_ino;
312e809d5f0SChris Down 			sbinfo->next_ino += SHMEM_INO_BATCH;
313bf11b9a8SSebastian Andrzej Siewior 			raw_spin_unlock(&sbinfo->stat_lock);
314e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
315e809d5f0SChris Down 				ino++;
316e809d5f0SChris Down 		}
317e809d5f0SChris Down 		*inop = ino;
318e809d5f0SChris Down 		*next_ino = ++ino;
319e809d5f0SChris Down 		put_cpu();
320e809d5f0SChris Down 	}
321e809d5f0SChris Down 
3225b04c689SPavel Emelyanov 	return 0;
3235b04c689SPavel Emelyanov }
3245b04c689SPavel Emelyanov 
3255b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
3265b04c689SPavel Emelyanov {
3275b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3285b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
329bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
3305b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
331bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
3325b04c689SPavel Emelyanov 	}
3335b04c689SPavel Emelyanov }
3345b04c689SPavel Emelyanov 
33546711810SRandy Dunlap /**
33641ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
3371da177e4SLinus Torvalds  * @inode: inode to recalc
3381da177e4SLinus Torvalds  *
3391da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
3401da177e4SLinus Torvalds  * undirtied hole pages behind our back.
3411da177e4SLinus Torvalds  *
3421da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
3431da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
3441da177e4SLinus Torvalds  *
3451da177e4SLinus Torvalds  * It has to be called with the spinlock held.
3461da177e4SLinus Torvalds  */
3471da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
3481da177e4SLinus Torvalds {
3491da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
3501da177e4SLinus Torvalds 	long freed;
3511da177e4SLinus Torvalds 
3521da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
3531da177e4SLinus Torvalds 	if (freed > 0) {
3541da177e4SLinus Torvalds 		info->alloced -= freed;
35554af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
3560f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
3571da177e4SLinus Torvalds 	}
3581da177e4SLinus Torvalds }
3591da177e4SLinus Torvalds 
360800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
361800d8c63SKirill A. Shutemov {
362800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3634595ef88SKirill A. Shutemov 	unsigned long flags;
364800d8c63SKirill A. Shutemov 
3650f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
366800d8c63SKirill A. Shutemov 		return false;
367b1cc94abSMike Rapoport 
368aaa52e34SHugh Dickins 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
369aaa52e34SHugh Dickins 	inode->i_mapping->nrpages += pages;
370aaa52e34SHugh Dickins 
3714595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
372800d8c63SKirill A. Shutemov 	info->alloced += pages;
373800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
374800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3754595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
376800d8c63SKirill A. Shutemov 
377800d8c63SKirill A. Shutemov 	return true;
378800d8c63SKirill A. Shutemov }
379800d8c63SKirill A. Shutemov 
380800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
381800d8c63SKirill A. Shutemov {
382800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3834595ef88SKirill A. Shutemov 	unsigned long flags;
384800d8c63SKirill A. Shutemov 
3856ffcd825SMatthew Wilcox (Oracle) 	/* nrpages adjustment done by __filemap_remove_folio() or caller */
386aaa52e34SHugh Dickins 
3874595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
388800d8c63SKirill A. Shutemov 	info->alloced -= pages;
389800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
390800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3914595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
392800d8c63SKirill A. Shutemov 
3930f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
394800d8c63SKirill A. Shutemov }
395800d8c63SKirill A. Shutemov 
3967a5d0fbbSHugh Dickins /*
39762f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
3987a5d0fbbSHugh Dickins  */
39962f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
4007a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
4017a5d0fbbSHugh Dickins {
40262f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
4036dbaf22cSJohannes Weiner 	void *item;
4047a5d0fbbSHugh Dickins 
4057a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
4066dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
40762f945b6SMatthew Wilcox 	item = xas_load(&xas);
4087a5d0fbbSHugh Dickins 	if (item != expected)
4097a5d0fbbSHugh Dickins 		return -ENOENT;
41062f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
4117a5d0fbbSHugh Dickins 	return 0;
4127a5d0fbbSHugh Dickins }
4137a5d0fbbSHugh Dickins 
4147a5d0fbbSHugh Dickins /*
415d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
416d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
417d1899228SHugh Dickins  *
418d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
419d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
420d1899228SHugh Dickins  */
421d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
422d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
423d1899228SHugh Dickins {
424a12831bfSMatthew Wilcox 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
425d1899228SHugh Dickins }
426d1899228SHugh Dickins 
427d1899228SHugh Dickins /*
4285a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
4295a6e75f8SKirill A. Shutemov  *
4305a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
4315a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
4325a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
4335a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
4345a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
4355a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
4365a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
4375a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
4385a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
4395a6e75f8SKirill A. Shutemov  */
4405a6e75f8SKirill A. Shutemov 
4415a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
4425a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
4435a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
4445a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
4455a6e75f8SKirill A. Shutemov 
4465a6e75f8SKirill A. Shutemov /*
4475a6e75f8SKirill A. Shutemov  * Special values.
4485a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
4495a6e75f8SKirill A. Shutemov  *
4505a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
4515a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
4525a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
4535a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
4545a6e75f8SKirill A. Shutemov  *
4555a6e75f8SKirill A. Shutemov  */
4565a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
4575a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
4585a6e75f8SKirill A. Shutemov 
459396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4605a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
4615a6e75f8SKirill A. Shutemov 
4625e6e5a12SHugh Dickins static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
4635a6e75f8SKirill A. Shutemov 
4645e6e5a12SHugh Dickins bool shmem_is_huge(struct vm_area_struct *vma,
4655e6e5a12SHugh Dickins 		   struct inode *inode, pgoff_t index)
466c852023eSHugh Dickins {
467c852023eSHugh Dickins 	loff_t i_size;
468c852023eSHugh Dickins 
469f7cd16a5SXavier Roche 	if (!S_ISREG(inode->i_mode))
470f7cd16a5SXavier Roche 		return false;
4715e6e5a12SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
4725e6e5a12SHugh Dickins 		return false;
4735e6e5a12SHugh Dickins 	if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
4745e6e5a12SHugh Dickins 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
475c852023eSHugh Dickins 		return false;
476c852023eSHugh Dickins 	if (shmem_huge == SHMEM_HUGE_FORCE)
477c852023eSHugh Dickins 		return true;
4785e6e5a12SHugh Dickins 
4795e6e5a12SHugh Dickins 	switch (SHMEM_SB(inode->i_sb)->huge) {
480c852023eSHugh Dickins 	case SHMEM_HUGE_ALWAYS:
481c852023eSHugh Dickins 		return true;
482c852023eSHugh Dickins 	case SHMEM_HUGE_WITHIN_SIZE:
483de6ee659SLiu Yuntao 		index = round_up(index + 1, HPAGE_PMD_NR);
484c852023eSHugh Dickins 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
485de6ee659SLiu Yuntao 		if (i_size >> PAGE_SHIFT >= index)
486c852023eSHugh Dickins 			return true;
487c852023eSHugh Dickins 		fallthrough;
488c852023eSHugh Dickins 	case SHMEM_HUGE_ADVISE:
4895e6e5a12SHugh Dickins 		if (vma && (vma->vm_flags & VM_HUGEPAGE))
4905e6e5a12SHugh Dickins 			return true;
4915e6e5a12SHugh Dickins 		fallthrough;
492c852023eSHugh Dickins 	default:
493c852023eSHugh Dickins 		return false;
494c852023eSHugh Dickins 	}
495c852023eSHugh Dickins }
4965a6e75f8SKirill A. Shutemov 
497e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS)
4985a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
4995a6e75f8SKirill A. Shutemov {
5005a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
5015a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
5025a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
5035a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
5045a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
5055a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
5065a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
5075a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
5085a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
5095a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
5105a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
5115a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
5125a6e75f8SKirill A. Shutemov 	return -EINVAL;
5135a6e75f8SKirill A. Shutemov }
514e5f2249aSArnd Bergmann #endif
5155a6e75f8SKirill A. Shutemov 
516e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
5175a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
5185a6e75f8SKirill A. Shutemov {
5195a6e75f8SKirill A. Shutemov 	switch (huge) {
5205a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
5215a6e75f8SKirill A. Shutemov 		return "never";
5225a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
5235a6e75f8SKirill A. Shutemov 		return "always";
5245a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
5255a6e75f8SKirill A. Shutemov 		return "within_size";
5265a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
5275a6e75f8SKirill A. Shutemov 		return "advise";
5285a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
5295a6e75f8SKirill A. Shutemov 		return "deny";
5305a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
5315a6e75f8SKirill A. Shutemov 		return "force";
5325a6e75f8SKirill A. Shutemov 	default:
5335a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
5345a6e75f8SKirill A. Shutemov 		return "bad_val";
5355a6e75f8SKirill A. Shutemov 	}
5365a6e75f8SKirill A. Shutemov }
537f1f5929cSJérémy Lefaure #endif
5385a6e75f8SKirill A. Shutemov 
539779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
540779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
541779750d2SKirill A. Shutemov {
542779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
543253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
544779750d2SKirill A. Shutemov 	struct inode *inode;
545779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
54605624571SMatthew Wilcox (Oracle) 	struct folio *folio;
547779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
54862c9827cSGang Li 	int split = 0;
549779750d2SKirill A. Shutemov 
550779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
551779750d2SKirill A. Shutemov 		return SHRINK_STOP;
552779750d2SKirill A. Shutemov 
553779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
554779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
555779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
556779750d2SKirill A. Shutemov 
557779750d2SKirill A. Shutemov 		/* pin the inode */
558779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
559779750d2SKirill A. Shutemov 
560779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
561779750d2SKirill A. Shutemov 		if (!inode) {
562779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
563779750d2SKirill A. Shutemov 			goto next;
564779750d2SKirill A. Shutemov 		}
565779750d2SKirill A. Shutemov 
566779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
567779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
568779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
569253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
570779750d2SKirill A. Shutemov 			goto next;
571779750d2SKirill A. Shutemov 		}
572779750d2SKirill A. Shutemov 
573779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
574779750d2SKirill A. Shutemov next:
57562c9827cSGang Li 		sbinfo->shrinklist_len--;
576779750d2SKirill A. Shutemov 		if (!--batch)
577779750d2SKirill A. Shutemov 			break;
578779750d2SKirill A. Shutemov 	}
579779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
580779750d2SKirill A. Shutemov 
581253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
582253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
583253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
584253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
585253fd0f0SKirill A. Shutemov 		iput(inode);
586253fd0f0SKirill A. Shutemov 	}
587253fd0f0SKirill A. Shutemov 
588779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
589779750d2SKirill A. Shutemov 		int ret;
59005624571SMatthew Wilcox (Oracle) 		pgoff_t index;
591779750d2SKirill A. Shutemov 
592779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
593779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
594779750d2SKirill A. Shutemov 
595b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
59662c9827cSGang Li 			goto move_back;
597779750d2SKirill A. Shutemov 
59805624571SMatthew Wilcox (Oracle) 		index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
59905624571SMatthew Wilcox (Oracle) 		folio = filemap_get_folio(inode->i_mapping, index);
60005624571SMatthew Wilcox (Oracle) 		if (!folio)
601779750d2SKirill A. Shutemov 			goto drop;
602779750d2SKirill A. Shutemov 
603b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
60405624571SMatthew Wilcox (Oracle) 		if (!folio_test_large(folio)) {
60505624571SMatthew Wilcox (Oracle) 			folio_put(folio);
606779750d2SKirill A. Shutemov 			goto drop;
607779750d2SKirill A. Shutemov 		}
608779750d2SKirill A. Shutemov 
609b3cd54b2SKirill A. Shutemov 		/*
61062c9827cSGang Li 		 * Move the inode on the list back to shrinklist if we failed
61162c9827cSGang Li 		 * to lock the page at this time.
612b3cd54b2SKirill A. Shutemov 		 *
613b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
614b3cd54b2SKirill A. Shutemov 		 * reclaim path.
615b3cd54b2SKirill A. Shutemov 		 */
61605624571SMatthew Wilcox (Oracle) 		if (!folio_trylock(folio)) {
61705624571SMatthew Wilcox (Oracle) 			folio_put(folio);
61862c9827cSGang Li 			goto move_back;
619b3cd54b2SKirill A. Shutemov 		}
620b3cd54b2SKirill A. Shutemov 
621d788f5b3SMatthew Wilcox (Oracle) 		ret = split_folio(folio);
62205624571SMatthew Wilcox (Oracle) 		folio_unlock(folio);
62305624571SMatthew Wilcox (Oracle) 		folio_put(folio);
624779750d2SKirill A. Shutemov 
62562c9827cSGang Li 		/* If split failed move the inode on the list back to shrinklist */
626b3cd54b2SKirill A. Shutemov 		if (ret)
62762c9827cSGang Li 			goto move_back;
628779750d2SKirill A. Shutemov 
629779750d2SKirill A. Shutemov 		split++;
630779750d2SKirill A. Shutemov drop:
631779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
63262c9827cSGang Li 		goto put;
63362c9827cSGang Li move_back:
63462c9827cSGang Li 		/*
63562c9827cSGang Li 		 * Make sure the inode is either on the global list or deleted
63662c9827cSGang Li 		 * from any local list before iput() since it could be deleted
63762c9827cSGang Li 		 * in another thread once we put the inode (then the local list
63862c9827cSGang Li 		 * is corrupted).
63962c9827cSGang Li 		 */
64062c9827cSGang Li 		spin_lock(&sbinfo->shrinklist_lock);
64162c9827cSGang Li 		list_move(&info->shrinklist, &sbinfo->shrinklist);
64262c9827cSGang Li 		sbinfo->shrinklist_len++;
64362c9827cSGang Li 		spin_unlock(&sbinfo->shrinklist_lock);
64462c9827cSGang Li put:
645779750d2SKirill A. Shutemov 		iput(inode);
646779750d2SKirill A. Shutemov 	}
647779750d2SKirill A. Shutemov 
648779750d2SKirill A. Shutemov 	return split;
649779750d2SKirill A. Shutemov }
650779750d2SKirill A. Shutemov 
651779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
652779750d2SKirill A. Shutemov 		struct shrink_control *sc)
653779750d2SKirill A. Shutemov {
654779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
655779750d2SKirill A. Shutemov 
656779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
657779750d2SKirill A. Shutemov 		return SHRINK_STOP;
658779750d2SKirill A. Shutemov 
659779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
660779750d2SKirill A. Shutemov }
661779750d2SKirill A. Shutemov 
662779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
663779750d2SKirill A. Shutemov 		struct shrink_control *sc)
664779750d2SKirill A. Shutemov {
665779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
666779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
667779750d2SKirill A. Shutemov }
668396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
6695a6e75f8SKirill A. Shutemov 
6705a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
6715a6e75f8SKirill A. Shutemov 
6725e6e5a12SHugh Dickins bool shmem_is_huge(struct vm_area_struct *vma,
6735e6e5a12SHugh Dickins 		   struct inode *inode, pgoff_t index)
6745e6e5a12SHugh Dickins {
6755e6e5a12SHugh Dickins 	return false;
6765e6e5a12SHugh Dickins }
6775e6e5a12SHugh Dickins 
678779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
679779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
680779750d2SKirill A. Shutemov {
681779750d2SKirill A. Shutemov 	return 0;
682779750d2SKirill A. Shutemov }
683396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6845a6e75f8SKirill A. Shutemov 
6855a6e75f8SKirill A. Shutemov /*
6862bb876b5SMatthew Wilcox (Oracle)  * Like filemap_add_folio, but error if expected item has gone.
68746f65ec1SHugh Dickins  */
688b7dd44a1SMatthew Wilcox (Oracle) static int shmem_add_to_page_cache(struct folio *folio,
68946f65ec1SHugh Dickins 				   struct address_space *mapping,
6903fea5a49SJohannes Weiner 				   pgoff_t index, void *expected, gfp_t gfp,
6913fea5a49SJohannes Weiner 				   struct mm_struct *charge_mm)
69246f65ec1SHugh Dickins {
693b7dd44a1SMatthew Wilcox (Oracle) 	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
694b7dd44a1SMatthew Wilcox (Oracle) 	long nr = folio_nr_pages(folio);
6953fea5a49SJohannes Weiner 	int error;
69646f65ec1SHugh Dickins 
697b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
698b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
699b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
700b7dd44a1SMatthew Wilcox (Oracle) 	VM_BUG_ON(expected && folio_test_large(folio));
70146f65ec1SHugh Dickins 
702b7dd44a1SMatthew Wilcox (Oracle) 	folio_ref_add(folio, nr);
703b7dd44a1SMatthew Wilcox (Oracle) 	folio->mapping = mapping;
704b7dd44a1SMatthew Wilcox (Oracle) 	folio->index = index;
70546f65ec1SHugh Dickins 
706b7dd44a1SMatthew Wilcox (Oracle) 	if (!folio_test_swapcache(folio)) {
707b7dd44a1SMatthew Wilcox (Oracle) 		error = mem_cgroup_charge(folio, charge_mm, gfp);
7083fea5a49SJohannes Weiner 		if (error) {
709b7dd44a1SMatthew Wilcox (Oracle) 			if (folio_test_pmd_mappable(folio)) {
7103fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK);
7113fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
7123fea5a49SJohannes Weiner 			}
7133fea5a49SJohannes Weiner 			goto error;
7143fea5a49SJohannes Weiner 		}
7154c6355b2SJohannes Weiner 	}
716b7dd44a1SMatthew Wilcox (Oracle) 	folio_throttle_swaprate(folio, gfp);
7173fea5a49SJohannes Weiner 
718552446a4SMatthew Wilcox 	do {
719552446a4SMatthew Wilcox 		xas_lock_irq(&xas);
7206b24ca4aSMatthew Wilcox (Oracle) 		if (expected != xas_find_conflict(&xas)) {
721552446a4SMatthew Wilcox 			xas_set_err(&xas, -EEXIST);
7226b24ca4aSMatthew Wilcox (Oracle) 			goto unlock;
7236b24ca4aSMatthew Wilcox (Oracle) 		}
7246b24ca4aSMatthew Wilcox (Oracle) 		if (expected && xas_find_conflict(&xas)) {
7256b24ca4aSMatthew Wilcox (Oracle) 			xas_set_err(&xas, -EEXIST);
7266b24ca4aSMatthew Wilcox (Oracle) 			goto unlock;
7276b24ca4aSMatthew Wilcox (Oracle) 		}
728b7dd44a1SMatthew Wilcox (Oracle) 		xas_store(&xas, folio);
729552446a4SMatthew Wilcox 		if (xas_error(&xas))
730552446a4SMatthew Wilcox 			goto unlock;
731b7dd44a1SMatthew Wilcox (Oracle) 		if (folio_test_pmd_mappable(folio)) {
732800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
733b7dd44a1SMatthew Wilcox (Oracle) 			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
734552446a4SMatthew Wilcox 		}
735552446a4SMatthew Wilcox 		mapping->nrpages += nr;
736b7dd44a1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
737b7dd44a1SMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
738552446a4SMatthew Wilcox unlock:
739552446a4SMatthew Wilcox 		xas_unlock_irq(&xas);
740552446a4SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
741552446a4SMatthew Wilcox 
742552446a4SMatthew Wilcox 	if (xas_error(&xas)) {
7433fea5a49SJohannes Weiner 		error = xas_error(&xas);
7443fea5a49SJohannes Weiner 		goto error;
74546f65ec1SHugh Dickins 	}
746552446a4SMatthew Wilcox 
747552446a4SMatthew Wilcox 	return 0;
7483fea5a49SJohannes Weiner error:
749b7dd44a1SMatthew Wilcox (Oracle) 	folio->mapping = NULL;
750b7dd44a1SMatthew Wilcox (Oracle) 	folio_ref_sub(folio, nr);
7513fea5a49SJohannes Weiner 	return error;
75246f65ec1SHugh Dickins }
75346f65ec1SHugh Dickins 
75446f65ec1SHugh Dickins /*
7554cd400fdSMatthew Wilcox (Oracle)  * Like delete_from_page_cache, but substitutes swap for @folio.
7566922c0c7SHugh Dickins  */
7574cd400fdSMatthew Wilcox (Oracle) static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
7586922c0c7SHugh Dickins {
7594cd400fdSMatthew Wilcox (Oracle) 	struct address_space *mapping = folio->mapping;
7604cd400fdSMatthew Wilcox (Oracle) 	long nr = folio_nr_pages(folio);
7616922c0c7SHugh Dickins 	int error;
7626922c0c7SHugh Dickins 
763b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
7644cd400fdSMatthew Wilcox (Oracle) 	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
7654cd400fdSMatthew Wilcox (Oracle) 	folio->mapping = NULL;
7664cd400fdSMatthew Wilcox (Oracle) 	mapping->nrpages -= nr;
7674cd400fdSMatthew Wilcox (Oracle) 	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
7684cd400fdSMatthew Wilcox (Oracle) 	__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
769b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
7704cd400fdSMatthew Wilcox (Oracle) 	folio_put(folio);
7716922c0c7SHugh Dickins 	BUG_ON(error);
7726922c0c7SHugh Dickins }
7736922c0c7SHugh Dickins 
7746922c0c7SHugh Dickins /*
775c121d3bbSMatthew Wilcox  * Remove swap entry from page cache, free the swap and its page cache.
7767a5d0fbbSHugh Dickins  */
7777a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
7787a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
7797a5d0fbbSHugh Dickins {
7806dbaf22cSJohannes Weiner 	void *old;
7817a5d0fbbSHugh Dickins 
78255f3f7eaSMatthew Wilcox 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
7836dbaf22cSJohannes Weiner 	if (old != radswap)
7846dbaf22cSJohannes Weiner 		return -ENOENT;
7857a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
7866dbaf22cSJohannes Weiner 	return 0;
7877a5d0fbbSHugh Dickins }
7887a5d0fbbSHugh Dickins 
7897a5d0fbbSHugh Dickins /*
7906a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
79148131e03SVlastimil Babka  * given offsets are swapped out.
7926a15a370SVlastimil Babka  *
7939608703eSJan Kara  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
7946a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
7956a15a370SVlastimil Babka  */
79648131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
79748131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
7986a15a370SVlastimil Babka {
7997ae3424fSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
8006a15a370SVlastimil Babka 	struct page *page;
80148131e03SVlastimil Babka 	unsigned long swapped = 0;
8026a15a370SVlastimil Babka 
8036a15a370SVlastimil Babka 	rcu_read_lock();
8047ae3424fSMatthew Wilcox 	xas_for_each(&xas, page, end - 1) {
8057ae3424fSMatthew Wilcox 		if (xas_retry(&xas, page))
8062cf938aaSMatthew Wilcox 			continue;
8073159f943SMatthew Wilcox 		if (xa_is_value(page))
8086a15a370SVlastimil Babka 			swapped++;
8096a15a370SVlastimil Babka 
8106a15a370SVlastimil Babka 		if (need_resched()) {
8117ae3424fSMatthew Wilcox 			xas_pause(&xas);
8126a15a370SVlastimil Babka 			cond_resched_rcu();
8136a15a370SVlastimil Babka 		}
8146a15a370SVlastimil Babka 	}
8156a15a370SVlastimil Babka 
8166a15a370SVlastimil Babka 	rcu_read_unlock();
8176a15a370SVlastimil Babka 
8186a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
8196a15a370SVlastimil Babka }
8206a15a370SVlastimil Babka 
8216a15a370SVlastimil Babka /*
82248131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
82348131e03SVlastimil Babka  * given vma is swapped out.
82448131e03SVlastimil Babka  *
8259608703eSJan Kara  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
82648131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
82748131e03SVlastimil Babka  */
82848131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
82948131e03SVlastimil Babka {
83048131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
83148131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
83248131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
83348131e03SVlastimil Babka 	unsigned long swapped;
83448131e03SVlastimil Babka 
83548131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
83648131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
83748131e03SVlastimil Babka 
83848131e03SVlastimil Babka 	/*
83948131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
84048131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
84148131e03SVlastimil Babka 	 * already track.
84248131e03SVlastimil Babka 	 */
84348131e03SVlastimil Babka 	if (!swapped)
84448131e03SVlastimil Babka 		return 0;
84548131e03SVlastimil Babka 
84648131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
84748131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
84848131e03SVlastimil Babka 
84948131e03SVlastimil Babka 	/* Here comes the more involved part */
85002399c88SPeter Xu 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
85102399c88SPeter Xu 					vma->vm_pgoff + vma_pages(vma));
85248131e03SVlastimil Babka }
85348131e03SVlastimil Babka 
85448131e03SVlastimil Babka /*
85524513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
85624513264SHugh Dickins  */
85724513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
85824513264SHugh Dickins {
859105c988fSMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
86024513264SHugh Dickins 	pgoff_t index = 0;
86124513264SHugh Dickins 
862105c988fSMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
86324513264SHugh Dickins 	/*
86424513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
86524513264SHugh Dickins 	 */
866105c988fSMatthew Wilcox (Oracle) 	while (!mapping_unevictable(mapping) &&
867105c988fSMatthew Wilcox (Oracle) 	       filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
868105c988fSMatthew Wilcox (Oracle) 		check_move_unevictable_folios(&fbatch);
869105c988fSMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
87024513264SHugh Dickins 		cond_resched();
87124513264SHugh Dickins 	}
8727a5d0fbbSHugh Dickins }
8737a5d0fbbSHugh Dickins 
874b9a8a419SMatthew Wilcox (Oracle) static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
87571725ed1SHugh Dickins {
876b9a8a419SMatthew Wilcox (Oracle) 	struct folio *folio;
87771725ed1SHugh Dickins 
878b9a8a419SMatthew Wilcox (Oracle) 	/*
879a7f5862cSMatthew Wilcox (Oracle) 	 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
880b9a8a419SMatthew Wilcox (Oracle) 	 * beyond i_size, and reports fallocated pages as holes.
881b9a8a419SMatthew Wilcox (Oracle) 	 */
882b9a8a419SMatthew Wilcox (Oracle) 	folio = __filemap_get_folio(inode->i_mapping, index,
883b9a8a419SMatthew Wilcox (Oracle) 					FGP_ENTRY | FGP_LOCK, 0);
884b9a8a419SMatthew Wilcox (Oracle) 	if (!xa_is_value(folio))
885b9a8a419SMatthew Wilcox (Oracle) 		return folio;
886b9a8a419SMatthew Wilcox (Oracle) 	/*
887b9a8a419SMatthew Wilcox (Oracle) 	 * But read a page back from swap if any of it is within i_size
888b9a8a419SMatthew Wilcox (Oracle) 	 * (although in some cases this is just a waste of time).
889b9a8a419SMatthew Wilcox (Oracle) 	 */
890a7f5862cSMatthew Wilcox (Oracle) 	folio = NULL;
891a7f5862cSMatthew Wilcox (Oracle) 	shmem_get_folio(inode, index, &folio, SGP_READ);
892a7f5862cSMatthew Wilcox (Oracle) 	return folio;
89371725ed1SHugh Dickins }
89471725ed1SHugh Dickins 
89571725ed1SHugh Dickins /*
8967f4446eeSMatthew Wilcox  * Remove range of pages and swap entries from page cache, and free them.
8971635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
8987a5d0fbbSHugh Dickins  */
8991635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
9001635f6a7SHugh Dickins 								 bool unfalloc)
9011da177e4SLinus Torvalds {
902285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
9031da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
90409cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
90509cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
9060e499ed3SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
9077a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
908b9a8a419SMatthew Wilcox (Oracle) 	struct folio *folio;
909b9a8a419SMatthew Wilcox (Oracle) 	bool same_folio;
9107a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
911285b2c4fSHugh Dickins 	pgoff_t index;
912bda97eabSHugh Dickins 	int i;
9131da177e4SLinus Torvalds 
91483e4fa9cSHugh Dickins 	if (lend == -1)
91583e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
916bda97eabSHugh Dickins 
917d144bf62SHugh Dickins 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
918d144bf62SHugh Dickins 		info->fallocend = start;
919d144bf62SHugh Dickins 
92051dcbdacSMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
921bda97eabSHugh Dickins 	index = start;
9225c211ba2SMatthew Wilcox (Oracle) 	while (index < end && find_lock_entries(mapping, index, end - 1,
92351dcbdacSMatthew Wilcox (Oracle) 			&fbatch, indices)) {
92451dcbdacSMatthew Wilcox (Oracle) 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
925b9a8a419SMatthew Wilcox (Oracle) 			folio = fbatch.folios[i];
926bda97eabSHugh Dickins 
9277a5d0fbbSHugh Dickins 			index = indices[i];
928bda97eabSHugh Dickins 
9297b774aabSMatthew Wilcox (Oracle) 			if (xa_is_value(folio)) {
9301635f6a7SHugh Dickins 				if (unfalloc)
9311635f6a7SHugh Dickins 					continue;
9327a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
9337b774aabSMatthew Wilcox (Oracle) 								index, folio);
9347a5d0fbbSHugh Dickins 				continue;
9357a5d0fbbSHugh Dickins 			}
9367b774aabSMatthew Wilcox (Oracle) 			index += folio_nr_pages(folio) - 1;
9377a5d0fbbSHugh Dickins 
9387b774aabSMatthew Wilcox (Oracle) 			if (!unfalloc || !folio_test_uptodate(folio))
9391e84a3d9SMatthew Wilcox (Oracle) 				truncate_inode_folio(mapping, folio);
9407b774aabSMatthew Wilcox (Oracle) 			folio_unlock(folio);
941bda97eabSHugh Dickins 		}
94251dcbdacSMatthew Wilcox (Oracle) 		folio_batch_remove_exceptionals(&fbatch);
94351dcbdacSMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
944bda97eabSHugh Dickins 		cond_resched();
945bda97eabSHugh Dickins 		index++;
946bda97eabSHugh Dickins 	}
947bda97eabSHugh Dickins 
948b9a8a419SMatthew Wilcox (Oracle) 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
949b9a8a419SMatthew Wilcox (Oracle) 	folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
950b9a8a419SMatthew Wilcox (Oracle) 	if (folio) {
951b9a8a419SMatthew Wilcox (Oracle) 		same_folio = lend < folio_pos(folio) + folio_size(folio);
952b9a8a419SMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
953b9a8a419SMatthew Wilcox (Oracle) 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
954b9a8a419SMatthew Wilcox (Oracle) 			start = folio->index + folio_nr_pages(folio);
955b9a8a419SMatthew Wilcox (Oracle) 			if (same_folio)
956b9a8a419SMatthew Wilcox (Oracle) 				end = folio->index;
95783e4fa9cSHugh Dickins 		}
958b9a8a419SMatthew Wilcox (Oracle) 		folio_unlock(folio);
959b9a8a419SMatthew Wilcox (Oracle) 		folio_put(folio);
960b9a8a419SMatthew Wilcox (Oracle) 		folio = NULL;
961bda97eabSHugh Dickins 	}
962b9a8a419SMatthew Wilcox (Oracle) 
963b9a8a419SMatthew Wilcox (Oracle) 	if (!same_folio)
964b9a8a419SMatthew Wilcox (Oracle) 		folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
965b9a8a419SMatthew Wilcox (Oracle) 	if (folio) {
966b9a8a419SMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
967b9a8a419SMatthew Wilcox (Oracle) 		if (!truncate_inode_partial_folio(folio, lstart, lend))
968b9a8a419SMatthew Wilcox (Oracle) 			end = folio->index;
969b9a8a419SMatthew Wilcox (Oracle) 		folio_unlock(folio);
970b9a8a419SMatthew Wilcox (Oracle) 		folio_put(folio);
971bda97eabSHugh Dickins 	}
972bda97eabSHugh Dickins 
973bda97eabSHugh Dickins 	index = start;
974b1a36650SHugh Dickins 	while (index < end) {
975bda97eabSHugh Dickins 		cond_resched();
9760cd6144aSJohannes Weiner 
9770e499ed3SMatthew Wilcox (Oracle) 		if (!find_get_entries(mapping, index, end - 1, &fbatch,
978cf2039afSMatthew Wilcox (Oracle) 				indices)) {
979b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
980b1a36650SHugh Dickins 			if (index == start || end != -1)
981bda97eabSHugh Dickins 				break;
982b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
983bda97eabSHugh Dickins 			index = start;
984bda97eabSHugh Dickins 			continue;
985bda97eabSHugh Dickins 		}
9860e499ed3SMatthew Wilcox (Oracle) 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
987b9a8a419SMatthew Wilcox (Oracle) 			folio = fbatch.folios[i];
988bda97eabSHugh Dickins 
9897a5d0fbbSHugh Dickins 			index = indices[i];
9900e499ed3SMatthew Wilcox (Oracle) 			if (xa_is_value(folio)) {
9911635f6a7SHugh Dickins 				if (unfalloc)
9921635f6a7SHugh Dickins 					continue;
9930e499ed3SMatthew Wilcox (Oracle) 				if (shmem_free_swap(mapping, index, folio)) {
994b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
995b1a36650SHugh Dickins 					index--;
996b1a36650SHugh Dickins 					break;
997b1a36650SHugh Dickins 				}
998b1a36650SHugh Dickins 				nr_swaps_freed++;
9997a5d0fbbSHugh Dickins 				continue;
10007a5d0fbbSHugh Dickins 			}
10017a5d0fbbSHugh Dickins 
10020e499ed3SMatthew Wilcox (Oracle) 			folio_lock(folio);
1003800d8c63SKirill A. Shutemov 
10040e499ed3SMatthew Wilcox (Oracle) 			if (!unfalloc || !folio_test_uptodate(folio)) {
10050e499ed3SMatthew Wilcox (Oracle) 				if (folio_mapping(folio) != mapping) {
1006b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
10070e499ed3SMatthew Wilcox (Oracle) 					folio_unlock(folio);
1008b1a36650SHugh Dickins 					index--;
1009b1a36650SHugh Dickins 					break;
10107a5d0fbbSHugh Dickins 				}
10110e499ed3SMatthew Wilcox (Oracle) 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
10120e499ed3SMatthew Wilcox (Oracle) 						folio);
10130e499ed3SMatthew Wilcox (Oracle) 				truncate_inode_folio(mapping, folio);
101471725ed1SHugh Dickins 			}
1015b9a8a419SMatthew Wilcox (Oracle) 			index = folio->index + folio_nr_pages(folio) - 1;
10160e499ed3SMatthew Wilcox (Oracle) 			folio_unlock(folio);
1017bda97eabSHugh Dickins 		}
10180e499ed3SMatthew Wilcox (Oracle) 		folio_batch_remove_exceptionals(&fbatch);
10190e499ed3SMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
1020bda97eabSHugh Dickins 		index++;
1021bda97eabSHugh Dickins 	}
102294c1e62dSHugh Dickins 
10234595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
10247a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
10251da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
10264595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
10271635f6a7SHugh Dickins }
10281da177e4SLinus Torvalds 
10291635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
10301635f6a7SHugh Dickins {
10311635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
1032078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
10331da177e4SLinus Torvalds }
103494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
10351da177e4SLinus Torvalds 
1036549c7297SChristian Brauner static int shmem_getattr(struct user_namespace *mnt_userns,
1037549c7297SChristian Brauner 			 const struct path *path, struct kstat *stat,
1038a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
103944a30220SYu Zhao {
1040a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
104144a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
104244a30220SYu Zhao 
1043d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
10444595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
104544a30220SYu Zhao 		shmem_recalc_inode(inode);
10464595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1047d0424c42SHugh Dickins 	}
1048e408e695STheodore Ts'o 	if (info->fsflags & FS_APPEND_FL)
1049e408e695STheodore Ts'o 		stat->attributes |= STATX_ATTR_APPEND;
1050e408e695STheodore Ts'o 	if (info->fsflags & FS_IMMUTABLE_FL)
1051e408e695STheodore Ts'o 		stat->attributes |= STATX_ATTR_IMMUTABLE;
1052e408e695STheodore Ts'o 	if (info->fsflags & FS_NODUMP_FL)
1053e408e695STheodore Ts'o 		stat->attributes |= STATX_ATTR_NODUMP;
1054e408e695STheodore Ts'o 	stat->attributes_mask |= (STATX_ATTR_APPEND |
1055e408e695STheodore Ts'o 			STATX_ATTR_IMMUTABLE |
1056e408e695STheodore Ts'o 			STATX_ATTR_NODUMP);
10570d56a451SChristian Brauner 	generic_fillattr(&init_user_ns, inode, stat);
105889fdcd26SYang Shi 
1059a7fddc36SHugh Dickins 	if (shmem_is_huge(NULL, inode, 0))
106089fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
106189fdcd26SYang Shi 
1062f7cd16a5SXavier Roche 	if (request_mask & STATX_BTIME) {
1063f7cd16a5SXavier Roche 		stat->result_mask |= STATX_BTIME;
1064f7cd16a5SXavier Roche 		stat->btime.tv_sec = info->i_crtime.tv_sec;
1065f7cd16a5SXavier Roche 		stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1066f7cd16a5SXavier Roche 	}
1067f7cd16a5SXavier Roche 
106844a30220SYu Zhao 	return 0;
106944a30220SYu Zhao }
107044a30220SYu Zhao 
1071549c7297SChristian Brauner static int shmem_setattr(struct user_namespace *mnt_userns,
1072549c7297SChristian Brauner 			 struct dentry *dentry, struct iattr *attr)
10731da177e4SLinus Torvalds {
107475c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
107540e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
10761da177e4SLinus Torvalds 	int error;
10771da177e4SLinus Torvalds 
10782f221d6fSChristian Brauner 	error = setattr_prepare(&init_user_ns, dentry, attr);
1079db78b877SChristoph Hellwig 	if (error)
1080db78b877SChristoph Hellwig 		return error;
1081db78b877SChristoph Hellwig 
108294c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
108394c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
108494c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
10853889e6e7Snpiggin@suse.de 
10869608703eSJan Kara 		/* protected by i_rwsem */
108740e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
108840e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
108940e041a2SDavid Herrmann 			return -EPERM;
109040e041a2SDavid Herrmann 
109194c1e62dSHugh Dickins 		if (newsize != oldsize) {
109277142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
109377142517SKonstantin Khlebnikov 					oldsize, newsize);
109477142517SKonstantin Khlebnikov 			if (error)
109577142517SKonstantin Khlebnikov 				return error;
109694c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1097078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
109894c1e62dSHugh Dickins 		}
1099afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
110094c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1101d0424c42SHugh Dickins 			if (oldsize > holebegin)
1102d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1103d0424c42SHugh Dickins 							holebegin, 0, 1);
1104d0424c42SHugh Dickins 			if (info->alloced)
1105d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1106d0424c42SHugh Dickins 							newsize, (loff_t)-1);
110794c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1108d0424c42SHugh Dickins 			if (oldsize > holebegin)
1109d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1110d0424c42SHugh Dickins 							holebegin, 0, 1);
111194c1e62dSHugh Dickins 		}
11121da177e4SLinus Torvalds 	}
11131da177e4SLinus Torvalds 
11142f221d6fSChristian Brauner 	setattr_copy(&init_user_ns, inode, attr);
1115db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1116e65ce2a5SChristian Brauner 		error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
11171da177e4SLinus Torvalds 	return error;
11181da177e4SLinus Torvalds }
11191da177e4SLinus Torvalds 
11201f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
11211da177e4SLinus Torvalds {
11221da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1123779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
11241da177e4SLinus Torvalds 
112530e6a51dSHui Su 	if (shmem_mapping(inode->i_mapping)) {
11261da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
11271da177e4SLinus Torvalds 		inode->i_size = 0;
1128bc786390SHugh Dickins 		mapping_set_exiting(inode->i_mapping);
11293889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1130779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1131779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1132779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1133779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1134779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1135779750d2SKirill A. Shutemov 			}
1136779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1137779750d2SKirill A. Shutemov 		}
1138af53d3e9SHugh Dickins 		while (!list_empty(&info->swaplist)) {
1139af53d3e9SHugh Dickins 			/* Wait while shmem_unuse() is scanning this inode... */
1140af53d3e9SHugh Dickins 			wait_var_event(&info->stop_eviction,
1141af53d3e9SHugh Dickins 				       !atomic_read(&info->stop_eviction));
1142cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
1143af53d3e9SHugh Dickins 			/* ...but beware of the race if we peeked too early */
1144af53d3e9SHugh Dickins 			if (!atomic_read(&info->stop_eviction))
11451da177e4SLinus Torvalds 				list_del_init(&info->swaplist);
1146cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
11471da177e4SLinus Torvalds 		}
11483ed47db3SAl Viro 	}
1149b09e0fa4SEric Paris 
115038f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
11510f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
11525b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1153dbd5768fSJan Kara 	clear_inode(inode);
11541da177e4SLinus Torvalds }
11551da177e4SLinus Torvalds 
1156b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1157da08e9b7SMatthew Wilcox (Oracle) 				   pgoff_t start, struct folio_batch *fbatch,
1158da08e9b7SMatthew Wilcox (Oracle) 				   pgoff_t *indices, unsigned int type)
1159478922e2SMatthew Wilcox {
1160b56a2d8aSVineeth Remanan Pillai 	XA_STATE(xas, &mapping->i_pages, start);
1161da08e9b7SMatthew Wilcox (Oracle) 	struct folio *folio;
116287039546SHugh Dickins 	swp_entry_t entry;
1163478922e2SMatthew Wilcox 
1164478922e2SMatthew Wilcox 	rcu_read_lock();
1165da08e9b7SMatthew Wilcox (Oracle) 	xas_for_each(&xas, folio, ULONG_MAX) {
1166da08e9b7SMatthew Wilcox (Oracle) 		if (xas_retry(&xas, folio))
11675b9c98f3SMike Kravetz 			continue;
1168b56a2d8aSVineeth Remanan Pillai 
1169da08e9b7SMatthew Wilcox (Oracle) 		if (!xa_is_value(folio))
1170478922e2SMatthew Wilcox 			continue;
1171b56a2d8aSVineeth Remanan Pillai 
1172da08e9b7SMatthew Wilcox (Oracle) 		entry = radix_to_swp_entry(folio);
11736cec2b95SMiaohe Lin 		/*
11746cec2b95SMiaohe Lin 		 * swapin error entries can be found in the mapping. But they're
11756cec2b95SMiaohe Lin 		 * deliberately ignored here as we've done everything we can do.
11766cec2b95SMiaohe Lin 		 */
117787039546SHugh Dickins 		if (swp_type(entry) != type)
1178b56a2d8aSVineeth Remanan Pillai 			continue;
1179b56a2d8aSVineeth Remanan Pillai 
1180e384200eSHugh Dickins 		indices[folio_batch_count(fbatch)] = xas.xa_index;
1181da08e9b7SMatthew Wilcox (Oracle) 		if (!folio_batch_add(fbatch, folio))
1182da08e9b7SMatthew Wilcox (Oracle) 			break;
1183b56a2d8aSVineeth Remanan Pillai 
1184b56a2d8aSVineeth Remanan Pillai 		if (need_resched()) {
1185e21a2955SMatthew Wilcox 			xas_pause(&xas);
1186478922e2SMatthew Wilcox 			cond_resched_rcu();
1187478922e2SMatthew Wilcox 		}
1188b56a2d8aSVineeth Remanan Pillai 	}
1189478922e2SMatthew Wilcox 	rcu_read_unlock();
1190e21a2955SMatthew Wilcox 
1191da08e9b7SMatthew Wilcox (Oracle) 	return xas.xa_index;
1192b56a2d8aSVineeth Remanan Pillai }
1193b56a2d8aSVineeth Remanan Pillai 
1194b56a2d8aSVineeth Remanan Pillai /*
1195b56a2d8aSVineeth Remanan Pillai  * Move the swapped pages for an inode to page cache. Returns the count
1196b56a2d8aSVineeth Remanan Pillai  * of pages swapped in, or the error in case of failure.
1197b56a2d8aSVineeth Remanan Pillai  */
1198da08e9b7SMatthew Wilcox (Oracle) static int shmem_unuse_swap_entries(struct inode *inode,
1199da08e9b7SMatthew Wilcox (Oracle) 		struct folio_batch *fbatch, pgoff_t *indices)
1200b56a2d8aSVineeth Remanan Pillai {
1201b56a2d8aSVineeth Remanan Pillai 	int i = 0;
1202b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
1203b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1204b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1205b56a2d8aSVineeth Remanan Pillai 
1206da08e9b7SMatthew Wilcox (Oracle) 	for (i = 0; i < folio_batch_count(fbatch); i++) {
1207da08e9b7SMatthew Wilcox (Oracle) 		struct folio *folio = fbatch->folios[i];
1208b56a2d8aSVineeth Remanan Pillai 
1209da08e9b7SMatthew Wilcox (Oracle) 		if (!xa_is_value(folio))
1210b56a2d8aSVineeth Remanan Pillai 			continue;
1211da08e9b7SMatthew Wilcox (Oracle) 		error = shmem_swapin_folio(inode, indices[i],
1212da08e9b7SMatthew Wilcox (Oracle) 					  &folio, SGP_CACHE,
1213b56a2d8aSVineeth Remanan Pillai 					  mapping_gfp_mask(mapping),
1214b56a2d8aSVineeth Remanan Pillai 					  NULL, NULL);
1215b56a2d8aSVineeth Remanan Pillai 		if (error == 0) {
1216da08e9b7SMatthew Wilcox (Oracle) 			folio_unlock(folio);
1217da08e9b7SMatthew Wilcox (Oracle) 			folio_put(folio);
1218b56a2d8aSVineeth Remanan Pillai 			ret++;
1219b56a2d8aSVineeth Remanan Pillai 		}
1220b56a2d8aSVineeth Remanan Pillai 		if (error == -ENOMEM)
1221b56a2d8aSVineeth Remanan Pillai 			break;
1222b56a2d8aSVineeth Remanan Pillai 		error = 0;
1223b56a2d8aSVineeth Remanan Pillai 	}
1224b56a2d8aSVineeth Remanan Pillai 	return error ? error : ret;
1225478922e2SMatthew Wilcox }
1226478922e2SMatthew Wilcox 
122746f65ec1SHugh Dickins /*
122846f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
122946f65ec1SHugh Dickins  */
123010a9c496SChristoph Hellwig static int shmem_unuse_inode(struct inode *inode, unsigned int type)
12311da177e4SLinus Torvalds {
1232b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1233b56a2d8aSVineeth Remanan Pillai 	pgoff_t start = 0;
1234da08e9b7SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
1235b56a2d8aSVineeth Remanan Pillai 	pgoff_t indices[PAGEVEC_SIZE];
1236b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
12371da177e4SLinus Torvalds 
1238b56a2d8aSVineeth Remanan Pillai 	do {
1239da08e9b7SMatthew Wilcox (Oracle) 		folio_batch_init(&fbatch);
1240da08e9b7SMatthew Wilcox (Oracle) 		shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1241da08e9b7SMatthew Wilcox (Oracle) 		if (folio_batch_count(&fbatch) == 0) {
1242b56a2d8aSVineeth Remanan Pillai 			ret = 0;
1243778dd893SHugh Dickins 			break;
1244b56a2d8aSVineeth Remanan Pillai 		}
1245b56a2d8aSVineeth Remanan Pillai 
1246da08e9b7SMatthew Wilcox (Oracle) 		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1247b56a2d8aSVineeth Remanan Pillai 		if (ret < 0)
1248b56a2d8aSVineeth Remanan Pillai 			break;
1249b56a2d8aSVineeth Remanan Pillai 
1250da08e9b7SMatthew Wilcox (Oracle) 		start = indices[folio_batch_count(&fbatch) - 1];
1251b56a2d8aSVineeth Remanan Pillai 	} while (true);
1252b56a2d8aSVineeth Remanan Pillai 
1253b56a2d8aSVineeth Remanan Pillai 	return ret;
1254b56a2d8aSVineeth Remanan Pillai }
1255b56a2d8aSVineeth Remanan Pillai 
1256b56a2d8aSVineeth Remanan Pillai /*
1257b56a2d8aSVineeth Remanan Pillai  * Read all the shared memory data that resides in the swap
1258b56a2d8aSVineeth Remanan Pillai  * device 'type' back into memory, so the swap device can be
1259b56a2d8aSVineeth Remanan Pillai  * unused.
1260b56a2d8aSVineeth Remanan Pillai  */
126110a9c496SChristoph Hellwig int shmem_unuse(unsigned int type)
1262b56a2d8aSVineeth Remanan Pillai {
1263b56a2d8aSVineeth Remanan Pillai 	struct shmem_inode_info *info, *next;
1264b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1265b56a2d8aSVineeth Remanan Pillai 
1266b56a2d8aSVineeth Remanan Pillai 	if (list_empty(&shmem_swaplist))
1267b56a2d8aSVineeth Remanan Pillai 		return 0;
1268b56a2d8aSVineeth Remanan Pillai 
1269b56a2d8aSVineeth Remanan Pillai 	mutex_lock(&shmem_swaplist_mutex);
1270b56a2d8aSVineeth Remanan Pillai 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1271b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped) {
1272b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1273b56a2d8aSVineeth Remanan Pillai 			continue;
1274b56a2d8aSVineeth Remanan Pillai 		}
1275af53d3e9SHugh Dickins 		/*
1276af53d3e9SHugh Dickins 		 * Drop the swaplist mutex while searching the inode for swap;
1277af53d3e9SHugh Dickins 		 * but before doing so, make sure shmem_evict_inode() will not
1278af53d3e9SHugh Dickins 		 * remove placeholder inode from swaplist, nor let it be freed
1279af53d3e9SHugh Dickins 		 * (igrab() would protect from unlink, but not from unmount).
1280af53d3e9SHugh Dickins 		 */
1281af53d3e9SHugh Dickins 		atomic_inc(&info->stop_eviction);
1282b56a2d8aSVineeth Remanan Pillai 		mutex_unlock(&shmem_swaplist_mutex);
1283b56a2d8aSVineeth Remanan Pillai 
128410a9c496SChristoph Hellwig 		error = shmem_unuse_inode(&info->vfs_inode, type);
1285b56a2d8aSVineeth Remanan Pillai 		cond_resched();
1286b56a2d8aSVineeth Remanan Pillai 
1287b56a2d8aSVineeth Remanan Pillai 		mutex_lock(&shmem_swaplist_mutex);
1288b56a2d8aSVineeth Remanan Pillai 		next = list_next_entry(info, swaplist);
1289b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped)
1290b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1291af53d3e9SHugh Dickins 		if (atomic_dec_and_test(&info->stop_eviction))
1292af53d3e9SHugh Dickins 			wake_up_var(&info->stop_eviction);
1293b56a2d8aSVineeth Remanan Pillai 		if (error)
1294b56a2d8aSVineeth Remanan Pillai 			break;
12951da177e4SLinus Torvalds 	}
1296cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1297778dd893SHugh Dickins 
1298778dd893SHugh Dickins 	return error;
12991da177e4SLinus Torvalds }
13001da177e4SLinus Torvalds 
13011da177e4SLinus Torvalds /*
13021da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
13031da177e4SLinus Torvalds  */
13041da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
13051da177e4SLinus Torvalds {
1306e2e3fdc7SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
13071da177e4SLinus Torvalds 	struct shmem_inode_info *info;
13081da177e4SLinus Torvalds 	struct address_space *mapping;
13091da177e4SLinus Torvalds 	struct inode *inode;
13106922c0c7SHugh Dickins 	swp_entry_t swap;
13116922c0c7SHugh Dickins 	pgoff_t index;
13121da177e4SLinus Torvalds 
13131e6decf3SHugh Dickins 	/*
13141e6decf3SHugh Dickins 	 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
13151e6decf3SHugh Dickins 	 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
13161e6decf3SHugh Dickins 	 * and its shmem_writeback() needs them to be split when swapping.
13171e6decf3SHugh Dickins 	 */
1318f530ed0eSMatthew Wilcox (Oracle) 	if (folio_test_large(folio)) {
13191e6decf3SHugh Dickins 		/* Ensure the subpages are still dirty */
1320f530ed0eSMatthew Wilcox (Oracle) 		folio_test_set_dirty(folio);
13211e6decf3SHugh Dickins 		if (split_huge_page(page) < 0)
13221e6decf3SHugh Dickins 			goto redirty;
1323f530ed0eSMatthew Wilcox (Oracle) 		folio = page_folio(page);
1324f530ed0eSMatthew Wilcox (Oracle) 		folio_clear_dirty(folio);
13251e6decf3SHugh Dickins 	}
13261e6decf3SHugh Dickins 
1327f530ed0eSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
1328f530ed0eSMatthew Wilcox (Oracle) 	mapping = folio->mapping;
1329f530ed0eSMatthew Wilcox (Oracle) 	index = folio->index;
13301da177e4SLinus Torvalds 	inode = mapping->host;
13311da177e4SLinus Torvalds 	info = SHMEM_I(inode);
13321da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
13331da177e4SLinus Torvalds 		goto redirty;
1334d9fe526aSHugh Dickins 	if (!total_swap_pages)
13351da177e4SLinus Torvalds 		goto redirty;
13361da177e4SLinus Torvalds 
1337d9fe526aSHugh Dickins 	/*
133897b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
133997b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
134097b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
134197b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
134297b713baSChristoph Hellwig 	 * threads or sync.
1343d9fe526aSHugh Dickins 	 */
134448f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
134548f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
134648f170fbSHugh Dickins 		goto redirty;
134748f170fbSHugh Dickins 	}
13481635f6a7SHugh Dickins 
13491635f6a7SHugh Dickins 	/*
13501635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13511635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
1352f530ed0eSMatthew Wilcox (Oracle) 	 * fallocated folio arriving here is now to initialize it and write it.
13531aac1400SHugh Dickins 	 *
1354f530ed0eSMatthew Wilcox (Oracle) 	 * That's okay for a folio already fallocated earlier, but if we have
13551aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
1356f530ed0eSMatthew Wilcox (Oracle) 	 * of this folio in case we have to undo it, and (b) it may not be a
13571aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
1358f530ed0eSMatthew Wilcox (Oracle) 	 * reactivate the folio, and let shmem_fallocate() quit when too many.
13591635f6a7SHugh Dickins 	 */
1360f530ed0eSMatthew Wilcox (Oracle) 	if (!folio_test_uptodate(folio)) {
13611aac1400SHugh Dickins 		if (inode->i_private) {
13621aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
13631aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
13641aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
13651aac1400SHugh Dickins 			if (shmem_falloc &&
13668e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
13671aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
13681aac1400SHugh Dickins 			    index < shmem_falloc->next)
13691aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
13701aac1400SHugh Dickins 			else
13711aac1400SHugh Dickins 				shmem_falloc = NULL;
13721aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
13731aac1400SHugh Dickins 			if (shmem_falloc)
13741aac1400SHugh Dickins 				goto redirty;
13751aac1400SHugh Dickins 		}
1376f530ed0eSMatthew Wilcox (Oracle) 		folio_zero_range(folio, 0, folio_size(folio));
1377f530ed0eSMatthew Wilcox (Oracle) 		flush_dcache_folio(folio);
1378f530ed0eSMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
13791635f6a7SHugh Dickins 	}
13801635f6a7SHugh Dickins 
1381e2e3fdc7SMatthew Wilcox (Oracle) 	swap = folio_alloc_swap(folio);
138248f170fbSHugh Dickins 	if (!swap.val)
138348f170fbSHugh Dickins 		goto redirty;
1384d9fe526aSHugh Dickins 
1385b1dea800SHugh Dickins 	/*
1386b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1387f530ed0eSMatthew Wilcox (Oracle) 	 * if it's not already there.  Do it now before the folio is
13886922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1389b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
13906922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
13916922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1392b1dea800SHugh Dickins 	 */
1393b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
139405bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
1395b56a2d8aSVineeth Remanan Pillai 		list_add(&info->swaplist, &shmem_swaplist);
1396b1dea800SHugh Dickins 
1397a4c366f0SMatthew Wilcox (Oracle) 	if (add_to_swap_cache(folio, swap,
13983852f676SJoonsoo Kim 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
13993852f676SJoonsoo Kim 			NULL) == 0) {
14004595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1401267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1402267a4c76SHugh Dickins 		info->swapped++;
14034595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1404267a4c76SHugh Dickins 
1405aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
14064cd400fdSMatthew Wilcox (Oracle) 		shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
14076922c0c7SHugh Dickins 
14086922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1409f530ed0eSMatthew Wilcox (Oracle) 		BUG_ON(folio_mapped(folio));
1410f530ed0eSMatthew Wilcox (Oracle) 		swap_writepage(&folio->page, wbc);
14111da177e4SLinus Torvalds 		return 0;
14121da177e4SLinus Torvalds 	}
14131da177e4SLinus Torvalds 
14146922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
14154081f744SMatthew Wilcox (Oracle) 	put_swap_folio(folio, swap);
14161da177e4SLinus Torvalds redirty:
1417f530ed0eSMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
1418d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1419f530ed0eSMatthew Wilcox (Oracle) 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
1420f530ed0eSMatthew Wilcox (Oracle) 	folio_unlock(folio);
1421d9fe526aSHugh Dickins 	return 0;
14221da177e4SLinus Torvalds }
14231da177e4SLinus Torvalds 
142475edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
142571fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1426680d794bSakpm@linux-foundation.org {
1427680d794bSakpm@linux-foundation.org 	char buffer[64];
1428680d794bSakpm@linux-foundation.org 
142971fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1430095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1431095f1fc4SLee Schermerhorn 
1432a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1433095f1fc4SLee Schermerhorn 
1434095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1435680d794bSakpm@linux-foundation.org }
143671fe804bSLee Schermerhorn 
143771fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
143871fe804bSLee Schermerhorn {
143971fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
144071fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
1441bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
144271fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
144371fe804bSLee Schermerhorn 		mpol_get(mpol);
1444bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
144571fe804bSLee Schermerhorn 	}
144671fe804bSLee Schermerhorn 	return mpol;
144771fe804bSLee Schermerhorn }
144875edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
144975edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
145075edd345SHugh Dickins {
145175edd345SHugh Dickins }
145275edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
145375edd345SHugh Dickins {
145475edd345SHugh Dickins 	return NULL;
145575edd345SHugh Dickins }
145675edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
145775edd345SHugh Dickins #ifndef CONFIG_NUMA
145875edd345SHugh Dickins #define vm_policy vm_private_data
145975edd345SHugh Dickins #endif
1460680d794bSakpm@linux-foundation.org 
1461800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1462800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1463800d8c63SKirill A. Shutemov {
1464800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
14652c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1466800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1467800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1468800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1469800d8c63SKirill A. Shutemov }
1470800d8c63SKirill A. Shutemov 
1471800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1472800d8c63SKirill A. Shutemov {
1473800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1474800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1475800d8c63SKirill A. Shutemov }
1476800d8c63SKirill A. Shutemov 
14775739a81cSMatthew Wilcox (Oracle) static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
147841ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
14791da177e4SLinus Torvalds {
14801da177e4SLinus Torvalds 	struct vm_area_struct pvma;
148118a2f371SMel Gorman 	struct page *page;
14828c63ca5bSWill Deacon 	struct vm_fault vmf = {
14838c63ca5bSWill Deacon 		.vma = &pvma,
14848c63ca5bSWill Deacon 	};
14851da177e4SLinus Torvalds 
1486800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1487e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1488800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
148918a2f371SMel Gorman 
14905739a81cSMatthew Wilcox (Oracle) 	if (!page)
14915739a81cSMatthew Wilcox (Oracle) 		return NULL;
14925739a81cSMatthew Wilcox (Oracle) 	return page_folio(page);
1493800d8c63SKirill A. Shutemov }
149418a2f371SMel Gorman 
149578cc8cdcSRik van Riel /*
149678cc8cdcSRik van Riel  * Make sure huge_gfp is always more limited than limit_gfp.
149778cc8cdcSRik van Riel  * Some of the flags set permissions, while others set limitations.
149878cc8cdcSRik van Riel  */
149978cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
150078cc8cdcSRik van Riel {
150178cc8cdcSRik van Riel 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
150278cc8cdcSRik van Riel 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1503187df5ddSRik van Riel 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1504187df5ddSRik van Riel 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1505187df5ddSRik van Riel 
1506187df5ddSRik van Riel 	/* Allow allocations only from the originally specified zones. */
1507187df5ddSRik van Riel 	result |= zoneflags;
150878cc8cdcSRik van Riel 
150978cc8cdcSRik van Riel 	/*
151078cc8cdcSRik van Riel 	 * Minimize the result gfp by taking the union with the deny flags,
151178cc8cdcSRik van Riel 	 * and the intersection of the allow flags.
151278cc8cdcSRik van Riel 	 */
151378cc8cdcSRik van Riel 	result |= (limit_gfp & denyflags);
151478cc8cdcSRik van Riel 	result |= (huge_gfp & limit_gfp) & allowflags;
151578cc8cdcSRik van Riel 
151678cc8cdcSRik van Riel 	return result;
151778cc8cdcSRik van Riel }
151878cc8cdcSRik van Riel 
151972827e5cSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1520800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1521800d8c63SKirill A. Shutemov {
1522800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
15237b8d046fSMatthew Wilcox 	struct address_space *mapping = info->vfs_inode.i_mapping;
15247b8d046fSMatthew Wilcox 	pgoff_t hindex;
1525dfe98499SMatthew Wilcox (Oracle) 	struct folio *folio;
1526800d8c63SKirill A. Shutemov 
15274620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
15287b8d046fSMatthew Wilcox 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
15297b8d046fSMatthew Wilcox 								XA_PRESENT))
1530800d8c63SKirill A. Shutemov 		return NULL;
1531800d8c63SKirill A. Shutemov 
1532800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1533dfe98499SMatthew Wilcox (Oracle) 	folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
1534800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1535dfe98499SMatthew Wilcox (Oracle) 	if (!folio)
1536dcdf11eeSDavid Rientjes 		count_vm_event(THP_FILE_FALLBACK);
153772827e5cSMatthew Wilcox (Oracle) 	return folio;
153818a2f371SMel Gorman }
153918a2f371SMel Gorman 
15400c023ef5SMatthew Wilcox (Oracle) static struct folio *shmem_alloc_folio(gfp_t gfp,
154118a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
154218a2f371SMel Gorman {
154318a2f371SMel Gorman 	struct vm_area_struct pvma;
15440c023ef5SMatthew Wilcox (Oracle) 	struct folio *folio;
154518a2f371SMel Gorman 
1546800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
15470c023ef5SMatthew Wilcox (Oracle) 	folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
1548800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
154918a2f371SMel Gorman 
15500c023ef5SMatthew Wilcox (Oracle) 	return folio;
155118a2f371SMel Gorman }
155218a2f371SMel Gorman 
1553b1d0ec3aSMatthew Wilcox (Oracle) static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1554800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1555800d8c63SKirill A. Shutemov {
15560f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
155772827e5cSMatthew Wilcox (Oracle) 	struct folio *folio;
1558800d8c63SKirill A. Shutemov 	int nr;
1559800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1560800d8c63SKirill A. Shutemov 
1561396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1562800d8c63SKirill A. Shutemov 		huge = false;
1563800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1564800d8c63SKirill A. Shutemov 
15650f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1566800d8c63SKirill A. Shutemov 		goto failed;
1567800d8c63SKirill A. Shutemov 
1568800d8c63SKirill A. Shutemov 	if (huge)
156972827e5cSMatthew Wilcox (Oracle) 		folio = shmem_alloc_hugefolio(gfp, info, index);
1570800d8c63SKirill A. Shutemov 	else
157172827e5cSMatthew Wilcox (Oracle) 		folio = shmem_alloc_folio(gfp, info, index);
157272827e5cSMatthew Wilcox (Oracle) 	if (folio) {
157372827e5cSMatthew Wilcox (Oracle) 		__folio_set_locked(folio);
157472827e5cSMatthew Wilcox (Oracle) 		__folio_set_swapbacked(folio);
1575b1d0ec3aSMatthew Wilcox (Oracle) 		return folio;
157675edd345SHugh Dickins 	}
157718a2f371SMel Gorman 
1578800d8c63SKirill A. Shutemov 	err = -ENOMEM;
15790f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1580800d8c63SKirill A. Shutemov failed:
1581800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
15821da177e4SLinus Torvalds }
158371fe804bSLee Schermerhorn 
15841da177e4SLinus Torvalds /*
1585bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1586fc26babbSMatthew Wilcox (Oracle)  * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1587bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1588bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1589bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1590bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1591bde05d1cSHugh Dickins  *
1592bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1593bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1594bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1595bde05d1cSHugh Dickins  */
1596069d849cSMatthew Wilcox (Oracle) static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1597bde05d1cSHugh Dickins {
1598069d849cSMatthew Wilcox (Oracle) 	return folio_zonenum(folio) > gfp_zone(gfp);
1599bde05d1cSHugh Dickins }
1600bde05d1cSHugh Dickins 
16010d698e25SMatthew Wilcox (Oracle) static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1602bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1603bde05d1cSHugh Dickins {
1604d21bba2bSMatthew Wilcox (Oracle) 	struct folio *old, *new;
1605bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1606c1cb20d4SYu Zhao 	swp_entry_t entry;
1607bde05d1cSHugh Dickins 	pgoff_t swap_index;
1608bde05d1cSHugh Dickins 	int error;
1609bde05d1cSHugh Dickins 
16100d698e25SMatthew Wilcox (Oracle) 	old = *foliop;
1611907ea17eSMatthew Wilcox (Oracle) 	entry = folio_swap_entry(old);
1612c1cb20d4SYu Zhao 	swap_index = swp_offset(entry);
1613907ea17eSMatthew Wilcox (Oracle) 	swap_mapping = swap_address_space(entry);
1614bde05d1cSHugh Dickins 
1615bde05d1cSHugh Dickins 	/*
1616bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1617bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1618bde05d1cSHugh Dickins 	 */
1619bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1620907ea17eSMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(folio_test_large(old), old);
1621907ea17eSMatthew Wilcox (Oracle) 	new = shmem_alloc_folio(gfp, info, index);
1622907ea17eSMatthew Wilcox (Oracle) 	if (!new)
1623bde05d1cSHugh Dickins 		return -ENOMEM;
1624bde05d1cSHugh Dickins 
1625907ea17eSMatthew Wilcox (Oracle) 	folio_get(new);
1626907ea17eSMatthew Wilcox (Oracle) 	folio_copy(new, old);
1627907ea17eSMatthew Wilcox (Oracle) 	flush_dcache_folio(new);
1628bde05d1cSHugh Dickins 
1629907ea17eSMatthew Wilcox (Oracle) 	__folio_set_locked(new);
1630907ea17eSMatthew Wilcox (Oracle) 	__folio_set_swapbacked(new);
1631907ea17eSMatthew Wilcox (Oracle) 	folio_mark_uptodate(new);
1632907ea17eSMatthew Wilcox (Oracle) 	folio_set_swap_entry(new, entry);
1633907ea17eSMatthew Wilcox (Oracle) 	folio_set_swapcache(new);
1634bde05d1cSHugh Dickins 
1635bde05d1cSHugh Dickins 	/*
1636bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1637bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1638bde05d1cSHugh Dickins 	 */
1639b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
1640907ea17eSMatthew Wilcox (Oracle) 	error = shmem_replace_entry(swap_mapping, swap_index, old, new);
16410142ef6cSHugh Dickins 	if (!error) {
1642d21bba2bSMatthew Wilcox (Oracle) 		mem_cgroup_migrate(old, new);
1643907ea17eSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
1644907ea17eSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(new, NR_SHMEM, 1);
1645907ea17eSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
1646907ea17eSMatthew Wilcox (Oracle) 		__lruvec_stat_mod_folio(old, NR_SHMEM, -1);
16470142ef6cSHugh Dickins 	}
1648b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1649bde05d1cSHugh Dickins 
16500142ef6cSHugh Dickins 	if (unlikely(error)) {
16510142ef6cSHugh Dickins 		/*
16520142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
16530142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
16540142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
16550142ef6cSHugh Dickins 		 */
1656907ea17eSMatthew Wilcox (Oracle) 		old = new;
16570142ef6cSHugh Dickins 	} else {
1658907ea17eSMatthew Wilcox (Oracle) 		folio_add_lru(new);
16590d698e25SMatthew Wilcox (Oracle) 		*foliop = new;
16600142ef6cSHugh Dickins 	}
1661bde05d1cSHugh Dickins 
1662907ea17eSMatthew Wilcox (Oracle) 	folio_clear_swapcache(old);
1663907ea17eSMatthew Wilcox (Oracle) 	old->private = NULL;
1664bde05d1cSHugh Dickins 
1665907ea17eSMatthew Wilcox (Oracle) 	folio_unlock(old);
1666907ea17eSMatthew Wilcox (Oracle) 	folio_put_refs(old, 2);
16670142ef6cSHugh Dickins 	return error;
1668bde05d1cSHugh Dickins }
1669bde05d1cSHugh Dickins 
16706cec2b95SMiaohe Lin static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
16716cec2b95SMiaohe Lin 					 struct folio *folio, swp_entry_t swap)
16726cec2b95SMiaohe Lin {
16736cec2b95SMiaohe Lin 	struct address_space *mapping = inode->i_mapping;
16746cec2b95SMiaohe Lin 	struct shmem_inode_info *info = SHMEM_I(inode);
16756cec2b95SMiaohe Lin 	swp_entry_t swapin_error;
16766cec2b95SMiaohe Lin 	void *old;
16776cec2b95SMiaohe Lin 
16786cec2b95SMiaohe Lin 	swapin_error = make_swapin_error_entry(&folio->page);
16796cec2b95SMiaohe Lin 	old = xa_cmpxchg_irq(&mapping->i_pages, index,
16806cec2b95SMiaohe Lin 			     swp_to_radix_entry(swap),
16816cec2b95SMiaohe Lin 			     swp_to_radix_entry(swapin_error), 0);
16826cec2b95SMiaohe Lin 	if (old != swp_to_radix_entry(swap))
16836cec2b95SMiaohe Lin 		return;
16846cec2b95SMiaohe Lin 
16856cec2b95SMiaohe Lin 	folio_wait_writeback(folio);
168675fa68a5SMatthew Wilcox (Oracle) 	delete_from_swap_cache(folio);
16876cec2b95SMiaohe Lin 	spin_lock_irq(&info->lock);
16886cec2b95SMiaohe Lin 	/*
16896cec2b95SMiaohe Lin 	 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
16906cec2b95SMiaohe Lin 	 * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
16916cec2b95SMiaohe Lin 	 * shmem_evict_inode.
16926cec2b95SMiaohe Lin 	 */
16936cec2b95SMiaohe Lin 	info->alloced--;
16946cec2b95SMiaohe Lin 	info->swapped--;
16956cec2b95SMiaohe Lin 	shmem_recalc_inode(inode);
16966cec2b95SMiaohe Lin 	spin_unlock_irq(&info->lock);
16976cec2b95SMiaohe Lin 	swap_free(swap);
16986cec2b95SMiaohe Lin }
16996cec2b95SMiaohe Lin 
1700bde05d1cSHugh Dickins /*
1701833de10fSMiaohe Lin  * Swap in the folio pointed to by *foliop.
1702833de10fSMiaohe Lin  * Caller has to make sure that *foliop contains a valid swapped folio.
1703833de10fSMiaohe Lin  * Returns 0 and the folio in foliop if success. On failure, returns the
1704833de10fSMiaohe Lin  * error code and NULL in *foliop.
17051da177e4SLinus Torvalds  */
1706da08e9b7SMatthew Wilcox (Oracle) static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1707da08e9b7SMatthew Wilcox (Oracle) 			     struct folio **foliop, enum sgp_type sgp,
1708c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
17092b740303SSouptick Joarder 			     vm_fault_t *fault_type)
17101da177e4SLinus Torvalds {
17111da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
171223f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
171304f94e3fSDan Schatzberg 	struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1714da08e9b7SMatthew Wilcox (Oracle) 	struct folio *folio = NULL;
17151da177e4SLinus Torvalds 	swp_entry_t swap;
17161da177e4SLinus Torvalds 	int error;
17171da177e4SLinus Torvalds 
1718da08e9b7SMatthew Wilcox (Oracle) 	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1719da08e9b7SMatthew Wilcox (Oracle) 	swap = radix_to_swp_entry(*foliop);
1720da08e9b7SMatthew Wilcox (Oracle) 	*foliop = NULL;
172154af6042SHugh Dickins 
17226cec2b95SMiaohe Lin 	if (is_swapin_error_entry(swap))
17236cec2b95SMiaohe Lin 		return -EIO;
17246cec2b95SMiaohe Lin 
17251da177e4SLinus Torvalds 	/* Look it up and read it in.. */
17265739a81cSMatthew Wilcox (Oracle) 	folio = swap_cache_get_folio(swap, NULL, 0);
17275739a81cSMatthew Wilcox (Oracle) 	if (!folio) {
17289e18eb29SAndres Lagar-Cavilla 		/* Or update major stats only when swapin succeeds?? */
17299e18eb29SAndres Lagar-Cavilla 		if (fault_type) {
173068da9f05SHugh Dickins 			*fault_type |= VM_FAULT_MAJOR;
17319e18eb29SAndres Lagar-Cavilla 			count_vm_event(PGMAJFAULT);
17322262185cSRoman Gushchin 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
17339e18eb29SAndres Lagar-Cavilla 		}
17349e18eb29SAndres Lagar-Cavilla 		/* Here we actually start the io */
17355739a81cSMatthew Wilcox (Oracle) 		folio = shmem_swapin(swap, gfp, info, index);
17365739a81cSMatthew Wilcox (Oracle) 		if (!folio) {
17371da177e4SLinus Torvalds 			error = -ENOMEM;
173854af6042SHugh Dickins 			goto failed;
1739285b2c4fSHugh Dickins 		}
17401da177e4SLinus Torvalds 	}
17411da177e4SLinus Torvalds 
1742833de10fSMiaohe Lin 	/* We have to do this with folio locked to prevent races */
1743da08e9b7SMatthew Wilcox (Oracle) 	folio_lock(folio);
1744da08e9b7SMatthew Wilcox (Oracle) 	if (!folio_test_swapcache(folio) ||
1745da08e9b7SMatthew Wilcox (Oracle) 	    folio_swap_entry(folio).val != swap.val ||
1746d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap)) {
1747c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1748d1899228SHugh Dickins 		goto unlock;
1749bde05d1cSHugh Dickins 	}
1750da08e9b7SMatthew Wilcox (Oracle) 	if (!folio_test_uptodate(folio)) {
17511da177e4SLinus Torvalds 		error = -EIO;
175254af6042SHugh Dickins 		goto failed;
175354af6042SHugh Dickins 	}
1754da08e9b7SMatthew Wilcox (Oracle) 	folio_wait_writeback(folio);
175554af6042SHugh Dickins 
17568a84802eSSteven Price 	/*
17578a84802eSSteven Price 	 * Some architectures may have to restore extra metadata to the
1758da08e9b7SMatthew Wilcox (Oracle) 	 * folio after reading from swap.
17598a84802eSSteven Price 	 */
1760da08e9b7SMatthew Wilcox (Oracle) 	arch_swap_restore(swap, folio);
17618a84802eSSteven Price 
1762069d849cSMatthew Wilcox (Oracle) 	if (shmem_should_replace_folio(folio, gfp)) {
17630d698e25SMatthew Wilcox (Oracle) 		error = shmem_replace_folio(&folio, gfp, info, index);
1764bde05d1cSHugh Dickins 		if (error)
176554af6042SHugh Dickins 			goto failed;
17661da177e4SLinus Torvalds 	}
17671da177e4SLinus Torvalds 
1768b7dd44a1SMatthew Wilcox (Oracle) 	error = shmem_add_to_page_cache(folio, mapping, index,
17693fea5a49SJohannes Weiner 					swp_to_radix_entry(swap), gfp,
17703fea5a49SJohannes Weiner 					charge_mm);
177154af6042SHugh Dickins 	if (error)
177254af6042SHugh Dickins 		goto failed;
177354af6042SHugh Dickins 
17744595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
177554af6042SHugh Dickins 	info->swapped--;
177654af6042SHugh Dickins 	shmem_recalc_inode(inode);
17774595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
177827ab7006SHugh Dickins 
177966d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1780da08e9b7SMatthew Wilcox (Oracle) 		folio_mark_accessed(folio);
178166d2f4d2SHugh Dickins 
178275fa68a5SMatthew Wilcox (Oracle) 	delete_from_swap_cache(folio);
1783da08e9b7SMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
178427ab7006SHugh Dickins 	swap_free(swap);
178527ab7006SHugh Dickins 
1786da08e9b7SMatthew Wilcox (Oracle) 	*foliop = folio;
1787c5bf121eSVineeth Remanan Pillai 	return 0;
1788c5bf121eSVineeth Remanan Pillai failed:
1789c5bf121eSVineeth Remanan Pillai 	if (!shmem_confirm_swap(mapping, index, swap))
1790c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
17916cec2b95SMiaohe Lin 	if (error == -EIO)
17926cec2b95SMiaohe Lin 		shmem_set_folio_swapin_error(inode, index, folio, swap);
1793c5bf121eSVineeth Remanan Pillai unlock:
1794da08e9b7SMatthew Wilcox (Oracle) 	if (folio) {
1795da08e9b7SMatthew Wilcox (Oracle) 		folio_unlock(folio);
1796da08e9b7SMatthew Wilcox (Oracle) 		folio_put(folio);
1797c5bf121eSVineeth Remanan Pillai 	}
1798c5bf121eSVineeth Remanan Pillai 
1799c5bf121eSVineeth Remanan Pillai 	return error;
1800c5bf121eSVineeth Remanan Pillai }
1801c5bf121eSVineeth Remanan Pillai 
1802c5bf121eSVineeth Remanan Pillai /*
1803fc26babbSMatthew Wilcox (Oracle)  * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1804c5bf121eSVineeth Remanan Pillai  *
1805c5bf121eSVineeth Remanan Pillai  * If we allocate a new one we do not mark it dirty. That's up to the
1806c5bf121eSVineeth Remanan Pillai  * vm. If we swap it in we mark it dirty since we also free the swap
1807c5bf121eSVineeth Remanan Pillai  * entry since a page cannot live in both the swap and page cache.
1808c5bf121eSVineeth Remanan Pillai  *
1809c949b097SAxel Rasmussen  * vma, vmf, and fault_type are only supplied by shmem_fault:
1810c5bf121eSVineeth Remanan Pillai  * otherwise they are NULL.
1811c5bf121eSVineeth Remanan Pillai  */
1812fc26babbSMatthew Wilcox (Oracle) static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
1813fc26babbSMatthew Wilcox (Oracle) 		struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
1814c5bf121eSVineeth Remanan Pillai 		struct vm_area_struct *vma, struct vm_fault *vmf,
1815c5bf121eSVineeth Remanan Pillai 		vm_fault_t *fault_type)
1816c5bf121eSVineeth Remanan Pillai {
1817c5bf121eSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1818c5bf121eSVineeth Remanan Pillai 	struct shmem_inode_info *info = SHMEM_I(inode);
1819c5bf121eSVineeth Remanan Pillai 	struct shmem_sb_info *sbinfo;
1820c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm;
1821b7dd44a1SMatthew Wilcox (Oracle) 	struct folio *folio;
1822c5bf121eSVineeth Remanan Pillai 	pgoff_t hindex = index;
1823164cc4feSRik van Riel 	gfp_t huge_gfp;
1824c5bf121eSVineeth Remanan Pillai 	int error;
1825c5bf121eSVineeth Remanan Pillai 	int once = 0;
1826c5bf121eSVineeth Remanan Pillai 	int alloced = 0;
1827c5bf121eSVineeth Remanan Pillai 
1828c5bf121eSVineeth Remanan Pillai 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1829c5bf121eSVineeth Remanan Pillai 		return -EFBIG;
1830c5bf121eSVineeth Remanan Pillai repeat:
1831c5bf121eSVineeth Remanan Pillai 	if (sgp <= SGP_CACHE &&
1832c5bf121eSVineeth Remanan Pillai 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1833c5bf121eSVineeth Remanan Pillai 		return -EINVAL;
1834c5bf121eSVineeth Remanan Pillai 	}
1835c5bf121eSVineeth Remanan Pillai 
1836c5bf121eSVineeth Remanan Pillai 	sbinfo = SHMEM_SB(inode->i_sb);
183704f94e3fSDan Schatzberg 	charge_mm = vma ? vma->vm_mm : NULL;
1838c5bf121eSVineeth Remanan Pillai 
1839b1d0ec3aSMatthew Wilcox (Oracle) 	folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0);
1840b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio && vma && userfaultfd_minor(vma)) {
1841b1d0ec3aSMatthew Wilcox (Oracle) 		if (!xa_is_value(folio)) {
1842b1d0ec3aSMatthew Wilcox (Oracle) 			folio_unlock(folio);
1843b1d0ec3aSMatthew Wilcox (Oracle) 			folio_put(folio);
1844c949b097SAxel Rasmussen 		}
1845c949b097SAxel Rasmussen 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1846c949b097SAxel Rasmussen 		return 0;
1847c949b097SAxel Rasmussen 	}
1848c949b097SAxel Rasmussen 
1849b1d0ec3aSMatthew Wilcox (Oracle) 	if (xa_is_value(folio)) {
1850da08e9b7SMatthew Wilcox (Oracle) 		error = shmem_swapin_folio(inode, index, &folio,
1851c5bf121eSVineeth Remanan Pillai 					  sgp, gfp, vma, fault_type);
1852c5bf121eSVineeth Remanan Pillai 		if (error == -EEXIST)
1853c5bf121eSVineeth Remanan Pillai 			goto repeat;
1854c5bf121eSVineeth Remanan Pillai 
1855fc26babbSMatthew Wilcox (Oracle) 		*foliop = folio;
1856c5bf121eSVineeth Remanan Pillai 		return error;
1857c5bf121eSVineeth Remanan Pillai 	}
1858c5bf121eSVineeth Remanan Pillai 
1859b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio) {
1860b1d0ec3aSMatthew Wilcox (Oracle) 		hindex = folio->index;
1861acdd9f8eSHugh Dickins 		if (sgp == SGP_WRITE)
1862b1d0ec3aSMatthew Wilcox (Oracle) 			folio_mark_accessed(folio);
1863b1d0ec3aSMatthew Wilcox (Oracle) 		if (folio_test_uptodate(folio))
1864acdd9f8eSHugh Dickins 			goto out;
1865fc26babbSMatthew Wilcox (Oracle) 		/* fallocated folio */
1866c5bf121eSVineeth Remanan Pillai 		if (sgp != SGP_READ)
1867c5bf121eSVineeth Remanan Pillai 			goto clear;
1868b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
1869b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
1870c5bf121eSVineeth Remanan Pillai 	}
1871c5bf121eSVineeth Remanan Pillai 
1872c5bf121eSVineeth Remanan Pillai 	/*
1873fc26babbSMatthew Wilcox (Oracle) 	 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
1874fc26babbSMatthew Wilcox (Oracle) 	 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
1875acdd9f8eSHugh Dickins 	 */
1876fc26babbSMatthew Wilcox (Oracle) 	*foliop = NULL;
1877acdd9f8eSHugh Dickins 	if (sgp == SGP_READ)
1878acdd9f8eSHugh Dickins 		return 0;
1879acdd9f8eSHugh Dickins 	if (sgp == SGP_NOALLOC)
1880acdd9f8eSHugh Dickins 		return -ENOENT;
1881acdd9f8eSHugh Dickins 
1882acdd9f8eSHugh Dickins 	/*
1883acdd9f8eSHugh Dickins 	 * Fast cache lookup and swap lookup did not find it: allocate.
1884c5bf121eSVineeth Remanan Pillai 	 */
1885c5bf121eSVineeth Remanan Pillai 
1886cfda0526SMike Rapoport 	if (vma && userfaultfd_missing(vma)) {
1887cfda0526SMike Rapoport 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1888cfda0526SMike Rapoport 		return 0;
1889cfda0526SMike Rapoport 	}
1890cfda0526SMike Rapoport 
18915e6e5a12SHugh Dickins 	if (!shmem_is_huge(vma, inode, index))
1892800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
189327d80fa2SKees Cook 
1894164cc4feSRik van Riel 	huge_gfp = vma_thp_gfp_mask(vma);
189578cc8cdcSRik van Riel 	huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1896b1d0ec3aSMatthew Wilcox (Oracle) 	folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
1897b1d0ec3aSMatthew Wilcox (Oracle) 	if (IS_ERR(folio)) {
1898c5bf121eSVineeth Remanan Pillai alloc_nohuge:
1899b1d0ec3aSMatthew Wilcox (Oracle) 		folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
190054af6042SHugh Dickins 	}
1901b1d0ec3aSMatthew Wilcox (Oracle) 	if (IS_ERR(folio)) {
1902779750d2SKirill A. Shutemov 		int retry = 5;
1903c5bf121eSVineeth Remanan Pillai 
1904b1d0ec3aSMatthew Wilcox (Oracle) 		error = PTR_ERR(folio);
1905b1d0ec3aSMatthew Wilcox (Oracle) 		folio = NULL;
1906779750d2SKirill A. Shutemov 		if (error != -ENOSPC)
1907c5bf121eSVineeth Remanan Pillai 			goto unlock;
1908779750d2SKirill A. Shutemov 		/*
1909fc26babbSMatthew Wilcox (Oracle) 		 * Try to reclaim some space by splitting a large folio
1910779750d2SKirill A. Shutemov 		 * beyond i_size on the filesystem.
1911779750d2SKirill A. Shutemov 		 */
1912779750d2SKirill A. Shutemov 		while (retry--) {
1913779750d2SKirill A. Shutemov 			int ret;
1914c5bf121eSVineeth Remanan Pillai 
1915779750d2SKirill A. Shutemov 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1916779750d2SKirill A. Shutemov 			if (ret == SHRINK_STOP)
1917779750d2SKirill A. Shutemov 				break;
1918779750d2SKirill A. Shutemov 			if (ret)
1919779750d2SKirill A. Shutemov 				goto alloc_nohuge;
1920779750d2SKirill A. Shutemov 		}
1921c5bf121eSVineeth Remanan Pillai 		goto unlock;
1922800d8c63SKirill A. Shutemov 	}
1923800d8c63SKirill A. Shutemov 
1924b1d0ec3aSMatthew Wilcox (Oracle) 	hindex = round_down(index, folio_nr_pages(folio));
1925800d8c63SKirill A. Shutemov 
192666d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1927b1d0ec3aSMatthew Wilcox (Oracle) 		__folio_set_referenced(folio);
192866d2f4d2SHugh Dickins 
1929b7dd44a1SMatthew Wilcox (Oracle) 	error = shmem_add_to_page_cache(folio, mapping, hindex,
19303fea5a49SJohannes Weiner 					NULL, gfp & GFP_RECLAIM_MASK,
19313fea5a49SJohannes Weiner 					charge_mm);
19323fea5a49SJohannes Weiner 	if (error)
1933800d8c63SKirill A. Shutemov 		goto unacct;
1934b1d0ec3aSMatthew Wilcox (Oracle) 	folio_add_lru(folio);
193554af6042SHugh Dickins 
19364595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
1937b1d0ec3aSMatthew Wilcox (Oracle) 	info->alloced += folio_nr_pages(folio);
1938fa020a2bSAndrew Morton 	inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
193954af6042SHugh Dickins 	shmem_recalc_inode(inode);
19404595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
19411635f6a7SHugh Dickins 	alloced = true;
194254af6042SHugh Dickins 
1943b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio_test_pmd_mappable(folio) &&
1944779750d2SKirill A. Shutemov 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1945fc26babbSMatthew Wilcox (Oracle) 					folio_next_index(folio) - 1) {
1946779750d2SKirill A. Shutemov 		/*
1947fc26babbSMatthew Wilcox (Oracle) 		 * Part of the large folio is beyond i_size: subject
1948779750d2SKirill A. Shutemov 		 * to shrink under memory pressure.
1949779750d2SKirill A. Shutemov 		 */
1950779750d2SKirill A. Shutemov 		spin_lock(&sbinfo->shrinklist_lock);
1951d041353dSCong Wang 		/*
1952d041353dSCong Wang 		 * _careful to defend against unlocked access to
1953d041353dSCong Wang 		 * ->shrink_list in shmem_unused_huge_shrink()
1954d041353dSCong Wang 		 */
1955d041353dSCong Wang 		if (list_empty_careful(&info->shrinklist)) {
1956779750d2SKirill A. Shutemov 			list_add_tail(&info->shrinklist,
1957779750d2SKirill A. Shutemov 				      &sbinfo->shrinklist);
1958779750d2SKirill A. Shutemov 			sbinfo->shrinklist_len++;
1959779750d2SKirill A. Shutemov 		}
1960779750d2SKirill A. Shutemov 		spin_unlock(&sbinfo->shrinklist_lock);
1961779750d2SKirill A. Shutemov 	}
1962779750d2SKirill A. Shutemov 
1963ec9516fbSHugh Dickins 	/*
1964fc26babbSMatthew Wilcox (Oracle) 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
19651635f6a7SHugh Dickins 	 */
19661635f6a7SHugh Dickins 	if (sgp == SGP_FALLOC)
19671635f6a7SHugh Dickins 		sgp = SGP_WRITE;
19681635f6a7SHugh Dickins clear:
19691635f6a7SHugh Dickins 	/*
1970fc26babbSMatthew Wilcox (Oracle) 	 * Let SGP_WRITE caller clear ends if write does not fill folio;
1971fc26babbSMatthew Wilcox (Oracle) 	 * but SGP_FALLOC on a folio fallocated earlier must initialize
19721635f6a7SHugh Dickins 	 * it now, lest undo on failure cancel our earlier guarantee.
1973ec9516fbSHugh Dickins 	 */
1974b1d0ec3aSMatthew Wilcox (Oracle) 	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
1975b1d0ec3aSMatthew Wilcox (Oracle) 		long i, n = folio_nr_pages(folio);
1976800d8c63SKirill A. Shutemov 
1977b1d0ec3aSMatthew Wilcox (Oracle) 		for (i = 0; i < n; i++)
1978b1d0ec3aSMatthew Wilcox (Oracle) 			clear_highpage(folio_page(folio, i));
1979b1d0ec3aSMatthew Wilcox (Oracle) 		flush_dcache_folio(folio);
1980b1d0ec3aSMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
1981ec9516fbSHugh Dickins 	}
1982bde05d1cSHugh Dickins 
198354af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
198475edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
198509cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1986267a4c76SHugh Dickins 		if (alloced) {
1987b1d0ec3aSMatthew Wilcox (Oracle) 			folio_clear_dirty(folio);
1988b1d0ec3aSMatthew Wilcox (Oracle) 			filemap_remove_folio(folio);
19894595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1990267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
19914595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
1992267a4c76SHugh Dickins 		}
199354af6042SHugh Dickins 		error = -EINVAL;
1994267a4c76SHugh Dickins 		goto unlock;
1995ff36b801SShaohua Li 	}
199663ec1973SMatthew Wilcox (Oracle) out:
1997fc26babbSMatthew Wilcox (Oracle) 	*foliop = folio;
199854af6042SHugh Dickins 	return 0;
1999d00806b1SNick Piggin 
2000d0217ac0SNick Piggin 	/*
200154af6042SHugh Dickins 	 * Error recovery.
20021da177e4SLinus Torvalds 	 */
200354af6042SHugh Dickins unacct:
2004b1d0ec3aSMatthew Wilcox (Oracle) 	shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
2005800d8c63SKirill A. Shutemov 
2006b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio_test_large(folio)) {
2007b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
2008b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
2009800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
2010800d8c63SKirill A. Shutemov 	}
2011d1899228SHugh Dickins unlock:
2012b1d0ec3aSMatthew Wilcox (Oracle) 	if (folio) {
2013b1d0ec3aSMatthew Wilcox (Oracle) 		folio_unlock(folio);
2014b1d0ec3aSMatthew Wilcox (Oracle) 		folio_put(folio);
201554af6042SHugh Dickins 	}
201654af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
20174595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
201854af6042SHugh Dickins 		shmem_recalc_inode(inode);
20194595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
20201da177e4SLinus Torvalds 		goto repeat;
2021d8dc74f2SAdrian Bunk 	}
20227f4446eeSMatthew Wilcox 	if (error == -EEXIST)
202354af6042SHugh Dickins 		goto repeat;
202454af6042SHugh Dickins 	return error;
20251da177e4SLinus Torvalds }
20261da177e4SLinus Torvalds 
20274e1fc793SMatthew Wilcox (Oracle) int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
20284e1fc793SMatthew Wilcox (Oracle) 		enum sgp_type sgp)
20294e1fc793SMatthew Wilcox (Oracle) {
20304e1fc793SMatthew Wilcox (Oracle) 	return shmem_get_folio_gfp(inode, index, foliop, sgp,
20314e1fc793SMatthew Wilcox (Oracle) 			mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
20324e1fc793SMatthew Wilcox (Oracle) }
20334e1fc793SMatthew Wilcox (Oracle) 
203410d20bd2SLinus Torvalds /*
203510d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
203610d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
203710d20bd2SLinus Torvalds  * target.
203810d20bd2SLinus Torvalds  */
2039ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
204010d20bd2SLinus Torvalds {
204110d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
20422055da97SIngo Molnar 	list_del_init(&wait->entry);
204310d20bd2SLinus Torvalds 	return ret;
204410d20bd2SLinus Torvalds }
204510d20bd2SLinus Torvalds 
204620acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
20471da177e4SLinus Torvalds {
204811bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
2049496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
20509e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
205168a54100SMatthew Wilcox (Oracle) 	struct folio *folio = NULL;
205220acce67SSouptick Joarder 	int err;
205320acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
20541da177e4SLinus Torvalds 
2055f00cdc6dSHugh Dickins 	/*
2056f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
2057f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
20589608703eSJan Kara 	 * locks writers out with its hold on i_rwsem.  So refrain from
20598e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
20608e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
20618e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
20628e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
20638e205f77SHugh Dickins 	 *
20648e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
20658e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
20668e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
20678e205f77SHugh Dickins 	 *
20688e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
20699608703eSJan Kara 	 * standard mutex or completion: but we cannot take i_rwsem in fault,
20708e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
2071f00cdc6dSHugh Dickins 	 */
2072f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
2073f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
2074f00cdc6dSHugh Dickins 
2075f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2076f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
20778e205f77SHugh Dickins 		if (shmem_falloc &&
20788e205f77SHugh Dickins 		    shmem_falloc->waitq &&
20798e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
20808e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
20818897c1b1SKirill A. Shutemov 			struct file *fpin;
20828e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
208310d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
20848e205f77SHugh Dickins 
20858e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
20868897c1b1SKirill A. Shutemov 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
20878897c1b1SKirill A. Shutemov 			if (fpin)
20888e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
20898e205f77SHugh Dickins 
20908e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
20918e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
20928e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
20938e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20948e205f77SHugh Dickins 			schedule();
20958e205f77SHugh Dickins 
20968e205f77SHugh Dickins 			/*
20978e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
20988e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
20998e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
21008e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
21018e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
21028e205f77SHugh Dickins 			 */
21038e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
21048e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
21058e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
21068897c1b1SKirill A. Shutemov 
21078897c1b1SKirill A. Shutemov 			if (fpin)
21088897c1b1SKirill A. Shutemov 				fput(fpin);
21098e205f77SHugh Dickins 			return ret;
2110f00cdc6dSHugh Dickins 		}
21118e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2112f00cdc6dSHugh Dickins 	}
2113f00cdc6dSHugh Dickins 
211468a54100SMatthew Wilcox (Oracle) 	err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
2115cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
211620acce67SSouptick Joarder 	if (err)
211720acce67SSouptick Joarder 		return vmf_error(err);
211868a54100SMatthew Wilcox (Oracle) 	if (folio)
211968a54100SMatthew Wilcox (Oracle) 		vmf->page = folio_file_page(folio, vmf->pgoff);
212068da9f05SHugh Dickins 	return ret;
21211da177e4SLinus Torvalds }
21221da177e4SLinus Torvalds 
2123c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2124c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2125c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2126c01d5b30SHugh Dickins {
2127c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2128c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2129c01d5b30SHugh Dickins 	unsigned long addr;
2130c01d5b30SHugh Dickins 	unsigned long offset;
2131c01d5b30SHugh Dickins 	unsigned long inflated_len;
2132c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2133c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2134c01d5b30SHugh Dickins 
2135c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2136c01d5b30SHugh Dickins 		return -ENOMEM;
2137c01d5b30SHugh Dickins 
2138c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2139c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2140c01d5b30SHugh Dickins 
2141396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2142c01d5b30SHugh Dickins 		return addr;
2143c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2144c01d5b30SHugh Dickins 		return addr;
2145c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2146c01d5b30SHugh Dickins 		return addr;
2147c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2148c01d5b30SHugh Dickins 		return addr;
2149c01d5b30SHugh Dickins 
2150c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2151c01d5b30SHugh Dickins 		return addr;
2152c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2153c01d5b30SHugh Dickins 		return addr;
2154c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2155c01d5b30SHugh Dickins 		return addr;
2156c01d5b30SHugh Dickins 	/*
2157c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2158c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
215999158997SKirill A. Shutemov 	 * But if caller specified an address hint and we allocated area there
216099158997SKirill A. Shutemov 	 * successfully, respect that as before.
2161c01d5b30SHugh Dickins 	 */
216299158997SKirill A. Shutemov 	if (uaddr == addr)
2163c01d5b30SHugh Dickins 		return addr;
2164c01d5b30SHugh Dickins 
2165c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2166c01d5b30SHugh Dickins 		struct super_block *sb;
2167c01d5b30SHugh Dickins 
2168c01d5b30SHugh Dickins 		if (file) {
2169c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2170c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2171c01d5b30SHugh Dickins 		} else {
2172c01d5b30SHugh Dickins 			/*
2173c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2174c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2175c01d5b30SHugh Dickins 			 */
2176c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2177c01d5b30SHugh Dickins 				return addr;
2178c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2179c01d5b30SHugh Dickins 		}
21803089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2181c01d5b30SHugh Dickins 			return addr;
2182c01d5b30SHugh Dickins 	}
2183c01d5b30SHugh Dickins 
2184c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2185c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2186c01d5b30SHugh Dickins 		return addr;
2187c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2188c01d5b30SHugh Dickins 		return addr;
2189c01d5b30SHugh Dickins 
2190c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2191c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2192c01d5b30SHugh Dickins 		return addr;
2193c01d5b30SHugh Dickins 	if (inflated_len < len)
2194c01d5b30SHugh Dickins 		return addr;
2195c01d5b30SHugh Dickins 
219699158997SKirill A. Shutemov 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2197c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2198c01d5b30SHugh Dickins 		return addr;
2199c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2200c01d5b30SHugh Dickins 		return addr;
2201c01d5b30SHugh Dickins 
2202c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2203c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2204c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2205c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2206c01d5b30SHugh Dickins 
2207c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2208c01d5b30SHugh Dickins 		return addr;
2209c01d5b30SHugh Dickins 	return inflated_addr;
2210c01d5b30SHugh Dickins }
2211c01d5b30SHugh Dickins 
22121da177e4SLinus Torvalds #ifdef CONFIG_NUMA
221341ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
22141da177e4SLinus Torvalds {
2215496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
221641ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
22171da177e4SLinus Torvalds }
22181da177e4SLinus Torvalds 
2219d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2220d8dc74f2SAdrian Bunk 					  unsigned long addr)
22211da177e4SLinus Torvalds {
2222496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
222341ffe5d5SHugh Dickins 	pgoff_t index;
22241da177e4SLinus Torvalds 
222541ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
222641ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
22271da177e4SLinus Torvalds }
22281da177e4SLinus Torvalds #endif
22291da177e4SLinus Torvalds 
2230d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
22311da177e4SLinus Torvalds {
2232496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
22331da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
22341da177e4SLinus Torvalds 	int retval = -ENOMEM;
22351da177e4SLinus Torvalds 
2236ea0dfeb4SHugh Dickins 	/*
2237ea0dfeb4SHugh Dickins 	 * What serializes the accesses to info->flags?
2238ea0dfeb4SHugh Dickins 	 * ipc_lock_object() when called from shmctl_do_lock(),
2239ea0dfeb4SHugh Dickins 	 * no serialization needed when called from shm_destroy().
2240ea0dfeb4SHugh Dickins 	 */
22411da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
2242d7c9e99aSAlexey Gladkov 		if (!user_shm_lock(inode->i_size, ucounts))
22431da177e4SLinus Torvalds 			goto out_nomem;
22441da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
224589e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
22461da177e4SLinus Torvalds 	}
2247d7c9e99aSAlexey Gladkov 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2248d7c9e99aSAlexey Gladkov 		user_shm_unlock(inode->i_size, ucounts);
22491da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
225089e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
22511da177e4SLinus Torvalds 	}
22521da177e4SLinus Torvalds 	retval = 0;
225389e004eaSLee Schermerhorn 
22541da177e4SLinus Torvalds out_nomem:
22551da177e4SLinus Torvalds 	return retval;
22561da177e4SLinus Torvalds }
22571da177e4SLinus Torvalds 
22589b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
22591da177e4SLinus Torvalds {
2260ab3948f5SJoel Fernandes (Google) 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
226122247efdSPeter Xu 	int ret;
2262ab3948f5SJoel Fernandes (Google) 
226322247efdSPeter Xu 	ret = seal_check_future_write(info->seals, vma);
226422247efdSPeter Xu 	if (ret)
226522247efdSPeter Xu 		return ret;
2266ab3948f5SJoel Fernandes (Google) 
226751b0bff2SCatalin Marinas 	/* arm64 - allow memory tagging on RAM-based files */
226851b0bff2SCatalin Marinas 	vma->vm_flags |= VM_MTE_ALLOWED;
226951b0bff2SCatalin Marinas 
22701da177e4SLinus Torvalds 	file_accessed(file);
22711da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
22721da177e4SLinus Torvalds 	return 0;
22731da177e4SLinus Torvalds }
22741da177e4SLinus Torvalds 
2275cb241339SHugh Dickins #ifdef CONFIG_TMPFS_XATTR
2276cb241339SHugh Dickins static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2277cb241339SHugh Dickins 
2278cb241339SHugh Dickins /*
2279cb241339SHugh Dickins  * chattr's fsflags are unrelated to extended attributes,
2280cb241339SHugh Dickins  * but tmpfs has chosen to enable them under the same config option.
2281cb241339SHugh Dickins  */
2282cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2283e408e695STheodore Ts'o {
2284cb241339SHugh Dickins 	unsigned int i_flags = 0;
2285cb241339SHugh Dickins 
2286cb241339SHugh Dickins 	if (fsflags & FS_NOATIME_FL)
2287cb241339SHugh Dickins 		i_flags |= S_NOATIME;
2288cb241339SHugh Dickins 	if (fsflags & FS_APPEND_FL)
2289cb241339SHugh Dickins 		i_flags |= S_APPEND;
2290cb241339SHugh Dickins 	if (fsflags & FS_IMMUTABLE_FL)
2291cb241339SHugh Dickins 		i_flags |= S_IMMUTABLE;
2292cb241339SHugh Dickins 	/*
2293cb241339SHugh Dickins 	 * But FS_NODUMP_FL does not require any action in i_flags.
2294cb241339SHugh Dickins 	 */
2295cb241339SHugh Dickins 	inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2296e408e695STheodore Ts'o }
2297cb241339SHugh Dickins #else
2298cb241339SHugh Dickins static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2299cb241339SHugh Dickins {
2300cb241339SHugh Dickins }
2301cb241339SHugh Dickins #define shmem_initxattrs NULL
2302cb241339SHugh Dickins #endif
2303e408e695STheodore Ts'o 
2304e408e695STheodore Ts'o static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
230509208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
23061da177e4SLinus Torvalds {
23071da177e4SLinus Torvalds 	struct inode *inode;
23081da177e4SLinus Torvalds 	struct shmem_inode_info *info;
23091da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2310e809d5f0SChris Down 	ino_t ino;
23111da177e4SLinus Torvalds 
2312e809d5f0SChris Down 	if (shmem_reserve_inode(sb, &ino))
23131da177e4SLinus Torvalds 		return NULL;
23141da177e4SLinus Torvalds 
23151da177e4SLinus Torvalds 	inode = new_inode(sb);
23161da177e4SLinus Torvalds 	if (inode) {
2317e809d5f0SChris Down 		inode->i_ino = ino;
231821cb47beSChristian Brauner 		inode_init_owner(&init_user_ns, inode, dir, mode);
23191da177e4SLinus Torvalds 		inode->i_blocks = 0;
2320078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
232146c9a946SArnd Bergmann 		inode->i_generation = prandom_u32();
23221da177e4SLinus Torvalds 		info = SHMEM_I(inode);
23231da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
23241da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
2325af53d3e9SHugh Dickins 		atomic_set(&info->stop_eviction, 0);
232640e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
23270b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2328f7cd16a5SXavier Roche 		info->i_crtime = inode->i_mtime;
2329e408e695STheodore Ts'o 		info->fsflags = (dir == NULL) ? 0 :
2330e408e695STheodore Ts'o 			SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2331cb241339SHugh Dickins 		if (info->fsflags)
2332cb241339SHugh Dickins 			shmem_set_inode_flags(inode, info->fsflags);
2333779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
23341da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
233538f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
233672c04902SAl Viro 		cache_no_acl(inode);
2337ff36da69SMatthew Wilcox (Oracle) 		mapping_set_large_folios(inode->i_mapping);
23381da177e4SLinus Torvalds 
23391da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
23401da177e4SLinus Torvalds 		default:
234139f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
23421da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
23431da177e4SLinus Torvalds 			break;
23441da177e4SLinus Torvalds 		case S_IFREG:
234514fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
23461da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
23471da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
234871fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
234971fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
23501da177e4SLinus Torvalds 			break;
23511da177e4SLinus Torvalds 		case S_IFDIR:
2352d8c76e6fSDave Hansen 			inc_nlink(inode);
23531da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
23541da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
23551da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
23561da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
23571da177e4SLinus Torvalds 			break;
23581da177e4SLinus Torvalds 		case S_IFLNK:
23591da177e4SLinus Torvalds 			/*
23601da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
23611da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
23621da177e4SLinus Torvalds 			 */
236371fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
23641da177e4SLinus Torvalds 			break;
23651da177e4SLinus Torvalds 		}
2366b45d71fbSJoel Fernandes (Google) 
2367b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
23685b04c689SPavel Emelyanov 	} else
23695b04c689SPavel Emelyanov 		shmem_free_inode(sb);
23701da177e4SLinus Torvalds 	return inode;
23711da177e4SLinus Torvalds }
23721da177e4SLinus Torvalds 
23733460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
23743460f6e5SAxel Rasmussen int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
23754c27fe4cSMike Rapoport 			   pmd_t *dst_pmd,
23764c27fe4cSMike Rapoport 			   struct vm_area_struct *dst_vma,
23774c27fe4cSMike Rapoport 			   unsigned long dst_addr,
23784c27fe4cSMike Rapoport 			   unsigned long src_addr,
23798ee79edfSPeter Xu 			   bool zeropage, bool wp_copy,
23804c27fe4cSMike Rapoport 			   struct page **pagep)
23814c27fe4cSMike Rapoport {
23824c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
23834c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
23844c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
23854c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
23864c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
23874c27fe4cSMike Rapoport 	void *page_kaddr;
2388b7dd44a1SMatthew Wilcox (Oracle) 	struct folio *folio;
23894c27fe4cSMike Rapoport 	int ret;
23903460f6e5SAxel Rasmussen 	pgoff_t max_off;
23914c27fe4cSMike Rapoport 
23927ed9d238SAxel Rasmussen 	if (!shmem_inode_acct_block(inode, 1)) {
23937ed9d238SAxel Rasmussen 		/*
23947ed9d238SAxel Rasmussen 		 * We may have got a page, returned -ENOENT triggering a retry,
23957ed9d238SAxel Rasmussen 		 * and now we find ourselves with -ENOMEM. Release the page, to
23967ed9d238SAxel Rasmussen 		 * avoid a BUG_ON in our caller.
23977ed9d238SAxel Rasmussen 		 */
23987ed9d238SAxel Rasmussen 		if (unlikely(*pagep)) {
23997ed9d238SAxel Rasmussen 			put_page(*pagep);
24007ed9d238SAxel Rasmussen 			*pagep = NULL;
24017ed9d238SAxel Rasmussen 		}
24027d64ae3aSAxel Rasmussen 		return -ENOMEM;
24037ed9d238SAxel Rasmussen 	}
24044c27fe4cSMike Rapoport 
2405cb658a45SAndrea Arcangeli 	if (!*pagep) {
24067d64ae3aSAxel Rasmussen 		ret = -ENOMEM;
24077a7256d5SMatthew Wilcox (Oracle) 		folio = shmem_alloc_folio(gfp, info, pgoff);
24087a7256d5SMatthew Wilcox (Oracle) 		if (!folio)
24090f079694SMike Rapoport 			goto out_unacct_blocks;
24104c27fe4cSMike Rapoport 
24113460f6e5SAxel Rasmussen 		if (!zeropage) {	/* COPY */
24127a7256d5SMatthew Wilcox (Oracle) 			page_kaddr = kmap_local_folio(folio, 0);
24138d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
24148d103963SMike Rapoport 					     (const void __user *)src_addr,
24154c27fe4cSMike Rapoport 					     PAGE_SIZE);
24167a7256d5SMatthew Wilcox (Oracle) 			kunmap_local(page_kaddr);
24174c27fe4cSMike Rapoport 
2418c1e8d7c6SMichel Lespinasse 			/* fallback to copy_from_user outside mmap_lock */
24194c27fe4cSMike Rapoport 			if (unlikely(ret)) {
24207a7256d5SMatthew Wilcox (Oracle) 				*pagep = &folio->page;
24217d64ae3aSAxel Rasmussen 				ret = -ENOENT;
24224c27fe4cSMike Rapoport 				/* don't free the page */
24237d64ae3aSAxel Rasmussen 				goto out_unacct_blocks;
24244c27fe4cSMike Rapoport 			}
242519b482c2SMuchun Song 
24267a7256d5SMatthew Wilcox (Oracle) 			flush_dcache_folio(folio);
24273460f6e5SAxel Rasmussen 		} else {		/* ZEROPAGE */
24287a7256d5SMatthew Wilcox (Oracle) 			clear_user_highpage(&folio->page, dst_addr);
24298d103963SMike Rapoport 		}
24304c27fe4cSMike Rapoport 	} else {
24317a7256d5SMatthew Wilcox (Oracle) 		folio = page_folio(*pagep);
24327a7256d5SMatthew Wilcox (Oracle) 		VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
24334c27fe4cSMike Rapoport 		*pagep = NULL;
24344c27fe4cSMike Rapoport 	}
24354c27fe4cSMike Rapoport 
24367a7256d5SMatthew Wilcox (Oracle) 	VM_BUG_ON(folio_test_locked(folio));
24377a7256d5SMatthew Wilcox (Oracle) 	VM_BUG_ON(folio_test_swapbacked(folio));
24387a7256d5SMatthew Wilcox (Oracle) 	__folio_set_locked(folio);
24397a7256d5SMatthew Wilcox (Oracle) 	__folio_set_swapbacked(folio);
24407a7256d5SMatthew Wilcox (Oracle) 	__folio_mark_uptodate(folio);
24419cc90c66SAndrea Arcangeli 
2442e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2443e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
24443460f6e5SAxel Rasmussen 	if (unlikely(pgoff >= max_off))
2445e2a50c1fSAndrea Arcangeli 		goto out_release;
2446e2a50c1fSAndrea Arcangeli 
2447b7dd44a1SMatthew Wilcox (Oracle) 	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
24483fea5a49SJohannes Weiner 				      gfp & GFP_RECLAIM_MASK, dst_mm);
24494c27fe4cSMike Rapoport 	if (ret)
24504c27fe4cSMike Rapoport 		goto out_release;
24514c27fe4cSMike Rapoport 
24527d64ae3aSAxel Rasmussen 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
24537a7256d5SMatthew Wilcox (Oracle) 				       &folio->page, true, wp_copy);
24547d64ae3aSAxel Rasmussen 	if (ret)
24557d64ae3aSAxel Rasmussen 		goto out_delete_from_cache;
24564c27fe4cSMike Rapoport 
245794b7cc01SYang Shi 	spin_lock_irq(&info->lock);
24584c27fe4cSMike Rapoport 	info->alloced++;
24594c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
24604c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
246194b7cc01SYang Shi 	spin_unlock_irq(&info->lock);
24624c27fe4cSMike Rapoport 
24637a7256d5SMatthew Wilcox (Oracle) 	folio_unlock(folio);
24647d64ae3aSAxel Rasmussen 	return 0;
24657d64ae3aSAxel Rasmussen out_delete_from_cache:
24667a7256d5SMatthew Wilcox (Oracle) 	filemap_remove_folio(folio);
24674c27fe4cSMike Rapoport out_release:
24687a7256d5SMatthew Wilcox (Oracle) 	folio_unlock(folio);
24697a7256d5SMatthew Wilcox (Oracle) 	folio_put(folio);
24704c27fe4cSMike Rapoport out_unacct_blocks:
24710f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
24727d64ae3aSAxel Rasmussen 	return ret;
24734c27fe4cSMike Rapoport }
24743460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
24758d103963SMike Rapoport 
24761da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
247792e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
247869f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
24791da177e4SLinus Torvalds 
24801da177e4SLinus Torvalds static int
2481800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
24829d6b0cd7SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
2483800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
24841da177e4SLinus Torvalds {
2485800d15a5SNick Piggin 	struct inode *inode = mapping->host;
248640e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
248709cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
2488eff1f906SMatthew Wilcox (Oracle) 	struct folio *folio;
2489a7605426SYang Shi 	int ret = 0;
249040e041a2SDavid Herrmann 
24919608703eSJan Kara 	/* i_rwsem is held by caller */
2492ab3948f5SJoel Fernandes (Google) 	if (unlikely(info->seals & (F_SEAL_GROW |
2493ab3948f5SJoel Fernandes (Google) 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2494ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
249540e041a2SDavid Herrmann 			return -EPERM;
249640e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
249740e041a2SDavid Herrmann 			return -EPERM;
249840e041a2SDavid Herrmann 	}
249940e041a2SDavid Herrmann 
2500eff1f906SMatthew Wilcox (Oracle) 	ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
2501a7605426SYang Shi 
2502a7605426SYang Shi 	if (ret)
2503a7605426SYang Shi 		return ret;
2504a7605426SYang Shi 
2505eff1f906SMatthew Wilcox (Oracle) 	*pagep = folio_file_page(folio, index);
2506a7605426SYang Shi 	if (PageHWPoison(*pagep)) {
2507eff1f906SMatthew Wilcox (Oracle) 		folio_unlock(folio);
2508eff1f906SMatthew Wilcox (Oracle) 		folio_put(folio);
2509a7605426SYang Shi 		*pagep = NULL;
2510a7605426SYang Shi 		return -EIO;
2511a7605426SYang Shi 	}
2512a7605426SYang Shi 
2513a7605426SYang Shi 	return 0;
2514800d15a5SNick Piggin }
2515800d15a5SNick Piggin 
2516800d15a5SNick Piggin static int
2517800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2518800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2519800d15a5SNick Piggin 			struct page *page, void *fsdata)
2520800d15a5SNick Piggin {
2521800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2522800d15a5SNick Piggin 
2523800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2524800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2525800d15a5SNick Piggin 
2526ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2527800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2528800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2529800d8c63SKirill A. Shutemov 			int i;
2530800d8c63SKirill A. Shutemov 
2531800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2532800d8c63SKirill A. Shutemov 				if (head + i == page)
2533800d8c63SKirill A. Shutemov 					continue;
2534800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2535800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2536800d8c63SKirill A. Shutemov 			}
2537800d8c63SKirill A. Shutemov 		}
253809cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
253909cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2540ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
254109cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2542ec9516fbSHugh Dickins 		}
2543800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2544ec9516fbSHugh Dickins 	}
2545d3602444SHugh Dickins 	set_page_dirty(page);
25466746aff7SWu Fengguang 	unlock_page(page);
254709cbfeafSKirill A. Shutemov 	put_page(page);
2548d3602444SHugh Dickins 
2549800d15a5SNick Piggin 	return copied;
25501da177e4SLinus Torvalds }
25511da177e4SLinus Torvalds 
25522ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
25531da177e4SLinus Torvalds {
25546e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
25556e58e79dSAl Viro 	struct inode *inode = file_inode(file);
25561da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
255741ffe5d5SHugh Dickins 	pgoff_t index;
255841ffe5d5SHugh Dickins 	unsigned long offset;
2559f7c1d074SGeert Uytterhoeven 	int error = 0;
2560cb66a7a1SAl Viro 	ssize_t retval = 0;
25616e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2562a0ee5ec5SHugh Dickins 
256309cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
256409cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
25651da177e4SLinus Torvalds 
25661da177e4SLinus Torvalds 	for (;;) {
25674601e2fcSMatthew Wilcox (Oracle) 		struct folio *folio = NULL;
25681da177e4SLinus Torvalds 		struct page *page = NULL;
256941ffe5d5SHugh Dickins 		pgoff_t end_index;
257041ffe5d5SHugh Dickins 		unsigned long nr, ret;
25711da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
25721da177e4SLinus Torvalds 
257309cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25741da177e4SLinus Torvalds 		if (index > end_index)
25751da177e4SLinus Torvalds 			break;
25761da177e4SLinus Torvalds 		if (index == end_index) {
257709cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25781da177e4SLinus Torvalds 			if (nr <= offset)
25791da177e4SLinus Torvalds 				break;
25801da177e4SLinus Torvalds 		}
25811da177e4SLinus Torvalds 
25824601e2fcSMatthew Wilcox (Oracle) 		error = shmem_get_folio(inode, index, &folio, SGP_READ);
25836e58e79dSAl Viro 		if (error) {
25846e58e79dSAl Viro 			if (error == -EINVAL)
25856e58e79dSAl Viro 				error = 0;
25861da177e4SLinus Torvalds 			break;
25871da177e4SLinus Torvalds 		}
25884601e2fcSMatthew Wilcox (Oracle) 		if (folio) {
25894601e2fcSMatthew Wilcox (Oracle) 			folio_unlock(folio);
2590a7605426SYang Shi 
25914601e2fcSMatthew Wilcox (Oracle) 			page = folio_file_page(folio, index);
2592a7605426SYang Shi 			if (PageHWPoison(page)) {
25934601e2fcSMatthew Wilcox (Oracle) 				folio_put(folio);
2594a7605426SYang Shi 				error = -EIO;
2595a7605426SYang Shi 				break;
2596a7605426SYang Shi 			}
259775edd345SHugh Dickins 		}
25981da177e4SLinus Torvalds 
25991da177e4SLinus Torvalds 		/*
26001da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
26019608703eSJan Kara 		 * are called without i_rwsem protection against truncate
26021da177e4SLinus Torvalds 		 */
260309cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
26041da177e4SLinus Torvalds 		i_size = i_size_read(inode);
260509cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
26061da177e4SLinus Torvalds 		if (index == end_index) {
260709cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
26081da177e4SLinus Torvalds 			if (nr <= offset) {
26094601e2fcSMatthew Wilcox (Oracle) 				if (folio)
26104601e2fcSMatthew Wilcox (Oracle) 					folio_put(folio);
26111da177e4SLinus Torvalds 				break;
26121da177e4SLinus Torvalds 			}
26131da177e4SLinus Torvalds 		}
26141da177e4SLinus Torvalds 		nr -= offset;
26151da177e4SLinus Torvalds 
26164601e2fcSMatthew Wilcox (Oracle) 		if (folio) {
26171da177e4SLinus Torvalds 			/*
26181da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
26191da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
26201da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
26211da177e4SLinus Torvalds 			 */
26221da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
26231da177e4SLinus Torvalds 				flush_dcache_page(page);
26241da177e4SLinus Torvalds 			/*
26251da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
26261da177e4SLinus Torvalds 			 */
26271da177e4SLinus Torvalds 			if (!offset)
26284601e2fcSMatthew Wilcox (Oracle) 				folio_mark_accessed(folio);
26291da177e4SLinus Torvalds 			/*
26301da177e4SLinus Torvalds 			 * Ok, we have the page, and it's up-to-date, so
26311da177e4SLinus Torvalds 			 * now we can copy it to user space...
26321da177e4SLinus Torvalds 			 */
26332ba5bbedSAl Viro 			ret = copy_page_to_iter(page, offset, nr, to);
26344601e2fcSMatthew Wilcox (Oracle) 			folio_put(folio);
26351bdec44bSHugh Dickins 
2636fcb14cb1SAl Viro 		} else if (user_backed_iter(to)) {
26371bdec44bSHugh Dickins 			/*
26381bdec44bSHugh Dickins 			 * Copy to user tends to be so well optimized, but
26391bdec44bSHugh Dickins 			 * clear_user() not so much, that it is noticeably
26401bdec44bSHugh Dickins 			 * faster to copy the zero page instead of clearing.
26411bdec44bSHugh Dickins 			 */
26421bdec44bSHugh Dickins 			ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
26431bdec44bSHugh Dickins 		} else {
26441bdec44bSHugh Dickins 			/*
26451bdec44bSHugh Dickins 			 * But submitting the same page twice in a row to
26461bdec44bSHugh Dickins 			 * splice() - or others? - can result in confusion:
26471bdec44bSHugh Dickins 			 * so don't attempt that optimization on pipes etc.
26481bdec44bSHugh Dickins 			 */
26491bdec44bSHugh Dickins 			ret = iov_iter_zero(nr, to);
26501bdec44bSHugh Dickins 		}
26511bdec44bSHugh Dickins 
26526e58e79dSAl Viro 		retval += ret;
26531da177e4SLinus Torvalds 		offset += ret;
265409cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
265509cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
26561da177e4SLinus Torvalds 
26572ba5bbedSAl Viro 		if (!iov_iter_count(to))
26581da177e4SLinus Torvalds 			break;
26596e58e79dSAl Viro 		if (ret < nr) {
26606e58e79dSAl Viro 			error = -EFAULT;
26616e58e79dSAl Viro 			break;
26626e58e79dSAl Viro 		}
26631da177e4SLinus Torvalds 		cond_resched();
26641da177e4SLinus Torvalds 	}
26651da177e4SLinus Torvalds 
266609cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
26676e58e79dSAl Viro 	file_accessed(file);
26686e58e79dSAl Viro 	return retval ? retval : error;
26691da177e4SLinus Torvalds }
26701da177e4SLinus Torvalds 
2671965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2672220f2ac9SHugh Dickins {
2673220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2674220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2675220f2ac9SHugh Dickins 
2676965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2677965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2678220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
267941139aa4SMatthew Wilcox (Oracle) 	if (offset < 0)
268041139aa4SMatthew Wilcox (Oracle) 		return -ENXIO;
268141139aa4SMatthew Wilcox (Oracle) 
26825955102cSAl Viro 	inode_lock(inode);
26839608703eSJan Kara 	/* We're holding i_rwsem so we can access i_size directly */
268441139aa4SMatthew Wilcox (Oracle) 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2685387aae6fSHugh Dickins 	if (offset >= 0)
268646a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
26875955102cSAl Viro 	inode_unlock(inode);
2688220f2ac9SHugh Dickins 	return offset;
2689220f2ac9SHugh Dickins }
2690220f2ac9SHugh Dickins 
269183e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
269283e4fa9cSHugh Dickins 							 loff_t len)
269383e4fa9cSHugh Dickins {
2694496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2695e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
269640e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
26971aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2698d144bf62SHugh Dickins 	pgoff_t start, index, end, undo_fallocend;
2699e2d12e22SHugh Dickins 	int error;
270083e4fa9cSHugh Dickins 
270113ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
270213ace4d0SHugh Dickins 		return -EOPNOTSUPP;
270313ace4d0SHugh Dickins 
27045955102cSAl Viro 	inode_lock(inode);
270583e4fa9cSHugh Dickins 
270683e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
270783e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
270883e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
270983e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
27108e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
271183e4fa9cSHugh Dickins 
27129608703eSJan Kara 		/* protected by i_rwsem */
2713ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
271440e041a2SDavid Herrmann 			error = -EPERM;
271540e041a2SDavid Herrmann 			goto out;
271640e041a2SDavid Herrmann 		}
271740e041a2SDavid Herrmann 
27188e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2719aa71ecd8SChen Jun 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2720f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2721f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2722f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2723f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2724f00cdc6dSHugh Dickins 
272583e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
272683e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
272783e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
272883e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
272983e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
27308e205f77SHugh Dickins 
27318e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
27328e205f77SHugh Dickins 		inode->i_private = NULL;
27338e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
27342055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
27358e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
273683e4fa9cSHugh Dickins 		error = 0;
27378e205f77SHugh Dickins 		goto out;
273883e4fa9cSHugh Dickins 	}
273983e4fa9cSHugh Dickins 
2740e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2741e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2742e2d12e22SHugh Dickins 	if (error)
2743e2d12e22SHugh Dickins 		goto out;
2744e2d12e22SHugh Dickins 
274540e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
274640e041a2SDavid Herrmann 		error = -EPERM;
274740e041a2SDavid Herrmann 		goto out;
274840e041a2SDavid Herrmann 	}
274940e041a2SDavid Herrmann 
275009cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
275109cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2752e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2753e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2754e2d12e22SHugh Dickins 		error = -ENOSPC;
2755e2d12e22SHugh Dickins 		goto out;
2756e2d12e22SHugh Dickins 	}
2757e2d12e22SHugh Dickins 
27588e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
27591aac1400SHugh Dickins 	shmem_falloc.start = start;
27601aac1400SHugh Dickins 	shmem_falloc.next  = start;
27611aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
27621aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
27631aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27641aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
27651aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
27661aac1400SHugh Dickins 
2767d144bf62SHugh Dickins 	/*
2768d144bf62SHugh Dickins 	 * info->fallocend is only relevant when huge pages might be
2769d144bf62SHugh Dickins 	 * involved: to prevent split_huge_page() freeing fallocated
2770d144bf62SHugh Dickins 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
2771d144bf62SHugh Dickins 	 */
2772d144bf62SHugh Dickins 	undo_fallocend = info->fallocend;
2773d144bf62SHugh Dickins 	if (info->fallocend < end)
2774d144bf62SHugh Dickins 		info->fallocend = end;
2775d144bf62SHugh Dickins 
2776050dcb5cSHugh Dickins 	for (index = start; index < end; ) {
2777b0802b22SMatthew Wilcox (Oracle) 		struct folio *folio;
2778e2d12e22SHugh Dickins 
2779e2d12e22SHugh Dickins 		/*
2780e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2781e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2782e2d12e22SHugh Dickins 		 */
2783e2d12e22SHugh Dickins 		if (signal_pending(current))
2784e2d12e22SHugh Dickins 			error = -EINTR;
27851aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
27861aac1400SHugh Dickins 			error = -ENOMEM;
2787e2d12e22SHugh Dickins 		else
2788b0802b22SMatthew Wilcox (Oracle) 			error = shmem_get_folio(inode, index, &folio,
2789b0802b22SMatthew Wilcox (Oracle) 						SGP_FALLOC);
2790e2d12e22SHugh Dickins 		if (error) {
2791d144bf62SHugh Dickins 			info->fallocend = undo_fallocend;
2792b0802b22SMatthew Wilcox (Oracle) 			/* Remove the !uptodate folios we added */
27937f556567SHugh Dickins 			if (index > start) {
27941635f6a7SHugh Dickins 				shmem_undo_range(inode,
279509cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2796b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
27977f556567SHugh Dickins 			}
27981aac1400SHugh Dickins 			goto undone;
2799e2d12e22SHugh Dickins 		}
2800e2d12e22SHugh Dickins 
2801050dcb5cSHugh Dickins 		/*
2802050dcb5cSHugh Dickins 		 * Here is a more important optimization than it appears:
2803b0802b22SMatthew Wilcox (Oracle) 		 * a second SGP_FALLOC on the same large folio will clear it,
2804b0802b22SMatthew Wilcox (Oracle) 		 * making it uptodate and un-undoable if we fail later.
2805050dcb5cSHugh Dickins 		 */
2806b0802b22SMatthew Wilcox (Oracle) 		index = folio_next_index(folio);
2807050dcb5cSHugh Dickins 		/* Beware 32-bit wraparound */
2808050dcb5cSHugh Dickins 		if (!index)
2809050dcb5cSHugh Dickins 			index--;
2810050dcb5cSHugh Dickins 
2811e2d12e22SHugh Dickins 		/*
28121aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
28131aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
28141aac1400SHugh Dickins 		 */
2815b0802b22SMatthew Wilcox (Oracle) 		if (!folio_test_uptodate(folio))
2816050dcb5cSHugh Dickins 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
2817050dcb5cSHugh Dickins 		shmem_falloc.next = index;
28181aac1400SHugh Dickins 
28191aac1400SHugh Dickins 		/*
2820b0802b22SMatthew Wilcox (Oracle) 		 * If !uptodate, leave it that way so that freeable folios
28211635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
2822b0802b22SMatthew Wilcox (Oracle) 		 * But mark it dirty so that memory pressure will swap rather
2823b0802b22SMatthew Wilcox (Oracle) 		 * than free the folios we are allocating (and SGP_CACHE folios
2824e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2825e2d12e22SHugh Dickins 		 */
2826b0802b22SMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
2827b0802b22SMatthew Wilcox (Oracle) 		folio_unlock(folio);
2828b0802b22SMatthew Wilcox (Oracle) 		folio_put(folio);
2829e2d12e22SHugh Dickins 		cond_resched();
2830e2d12e22SHugh Dickins 	}
2831e2d12e22SHugh Dickins 
2832e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2833e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
28341aac1400SHugh Dickins undone:
28351aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
28361aac1400SHugh Dickins 	inode->i_private = NULL;
28371aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2838e2d12e22SHugh Dickins out:
283915f242bbSHugh Dickins 	if (!error)
284015f242bbSHugh Dickins 		file_modified(file);
28415955102cSAl Viro 	inode_unlock(inode);
284283e4fa9cSHugh Dickins 	return error;
284383e4fa9cSHugh Dickins }
284483e4fa9cSHugh Dickins 
2845726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
28461da177e4SLinus Torvalds {
2847726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
28481da177e4SLinus Torvalds 
28491da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
285009cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
28511da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
28520edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
28531da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
285441ffe5d5SHugh Dickins 		buf->f_bavail =
285541ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
285641ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
28570edd73b3SHugh Dickins 	}
28580edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
28591da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
28601da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
28611da177e4SLinus Torvalds 	}
28621da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
286359cda49eSAmir Goldstein 
286459cda49eSAmir Goldstein 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
286559cda49eSAmir Goldstein 
28661da177e4SLinus Torvalds 	return 0;
28671da177e4SLinus Torvalds }
28681da177e4SLinus Torvalds 
28691da177e4SLinus Torvalds /*
28701da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
28711da177e4SLinus Torvalds  */
28721da177e4SLinus Torvalds static int
2873549c7297SChristian Brauner shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2874549c7297SChristian Brauner 	    struct dentry *dentry, umode_t mode, dev_t dev)
28751da177e4SLinus Torvalds {
28760b0a0806SHugh Dickins 	struct inode *inode;
28771da177e4SLinus Torvalds 	int error = -ENOSPC;
28781da177e4SLinus Torvalds 
2879454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
28801da177e4SLinus Torvalds 	if (inode) {
2881feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2882feda821eSChristoph Hellwig 		if (error)
2883feda821eSChristoph Hellwig 			goto out_iput;
28842a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
28859d8f13baSMimi Zohar 						     &dentry->d_name,
28866d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2887feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2888feda821eSChristoph Hellwig 			goto out_iput;
288937ec43cdSMimi Zohar 
2890718deb6bSAl Viro 		error = 0;
28911da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2892078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
28931da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
28941da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
28951da177e4SLinus Torvalds 	}
28961da177e4SLinus Torvalds 	return error;
2897feda821eSChristoph Hellwig out_iput:
2898feda821eSChristoph Hellwig 	iput(inode);
2899feda821eSChristoph Hellwig 	return error;
29001da177e4SLinus Torvalds }
29011da177e4SLinus Torvalds 
290260545d0dSAl Viro static int
2903549c7297SChristian Brauner shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2904549c7297SChristian Brauner 	      struct dentry *dentry, umode_t mode)
290560545d0dSAl Viro {
290660545d0dSAl Viro 	struct inode *inode;
290760545d0dSAl Viro 	int error = -ENOSPC;
290860545d0dSAl Viro 
290960545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
291060545d0dSAl Viro 	if (inode) {
291160545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
291260545d0dSAl Viro 						     NULL,
291360545d0dSAl Viro 						     shmem_initxattrs, NULL);
2914feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2915feda821eSChristoph Hellwig 			goto out_iput;
2916feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2917feda821eSChristoph Hellwig 		if (error)
2918feda821eSChristoph Hellwig 			goto out_iput;
291960545d0dSAl Viro 		d_tmpfile(dentry, inode);
292060545d0dSAl Viro 	}
292160545d0dSAl Viro 	return error;
2922feda821eSChristoph Hellwig out_iput:
2923feda821eSChristoph Hellwig 	iput(inode);
2924feda821eSChristoph Hellwig 	return error;
292560545d0dSAl Viro }
292660545d0dSAl Viro 
2927549c7297SChristian Brauner static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2928549c7297SChristian Brauner 		       struct dentry *dentry, umode_t mode)
29291da177e4SLinus Torvalds {
29301da177e4SLinus Torvalds 	int error;
29311da177e4SLinus Torvalds 
2932549c7297SChristian Brauner 	if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2933549c7297SChristian Brauner 				 mode | S_IFDIR, 0)))
29341da177e4SLinus Torvalds 		return error;
2935d8c76e6fSDave Hansen 	inc_nlink(dir);
29361da177e4SLinus Torvalds 	return 0;
29371da177e4SLinus Torvalds }
29381da177e4SLinus Torvalds 
2939549c7297SChristian Brauner static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2940549c7297SChristian Brauner 			struct dentry *dentry, umode_t mode, bool excl)
29411da177e4SLinus Torvalds {
2942549c7297SChristian Brauner 	return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
29431da177e4SLinus Torvalds }
29441da177e4SLinus Torvalds 
29451da177e4SLinus Torvalds /*
29461da177e4SLinus Torvalds  * Link a file..
29471da177e4SLinus Torvalds  */
29481da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
29491da177e4SLinus Torvalds {
295075c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
295129b00e60SDarrick J. Wong 	int ret = 0;
29521da177e4SLinus Torvalds 
29531da177e4SLinus Torvalds 	/*
29541da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
29551da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
29561da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
29571062af92SDarrick J. Wong 	 * But if an O_TMPFILE file is linked into the tmpfs, the
29581062af92SDarrick J. Wong 	 * first link must skip that, to get the accounting right.
29591da177e4SLinus Torvalds 	 */
29601062af92SDarrick J. Wong 	if (inode->i_nlink) {
2961e809d5f0SChris Down 		ret = shmem_reserve_inode(inode->i_sb, NULL);
29625b04c689SPavel Emelyanov 		if (ret)
29635b04c689SPavel Emelyanov 			goto out;
29641062af92SDarrick J. Wong 	}
29651da177e4SLinus Torvalds 
29661da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
2967078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2968d8c76e6fSDave Hansen 	inc_nlink(inode);
29697de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
29701da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
29711da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
29725b04c689SPavel Emelyanov out:
29735b04c689SPavel Emelyanov 	return ret;
29741da177e4SLinus Torvalds }
29751da177e4SLinus Torvalds 
29761da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
29771da177e4SLinus Torvalds {
297875c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
29791da177e4SLinus Torvalds 
29805b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
29815b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
29821da177e4SLinus Torvalds 
29831da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
2984078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
29859a53c3a7SDave Hansen 	drop_nlink(inode);
29861da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
29871da177e4SLinus Torvalds 	return 0;
29881da177e4SLinus Torvalds }
29891da177e4SLinus Torvalds 
29901da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
29911da177e4SLinus Torvalds {
29921da177e4SLinus Torvalds 	if (!simple_empty(dentry))
29931da177e4SLinus Torvalds 		return -ENOTEMPTY;
29941da177e4SLinus Torvalds 
299575c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
29969a53c3a7SDave Hansen 	drop_nlink(dir);
29971da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
29981da177e4SLinus Torvalds }
29991da177e4SLinus Torvalds 
3000549c7297SChristian Brauner static int shmem_whiteout(struct user_namespace *mnt_userns,
3001549c7297SChristian Brauner 			  struct inode *old_dir, struct dentry *old_dentry)
300246fdb794SMiklos Szeredi {
300346fdb794SMiklos Szeredi 	struct dentry *whiteout;
300446fdb794SMiklos Szeredi 	int error;
300546fdb794SMiklos Szeredi 
300646fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
300746fdb794SMiklos Szeredi 	if (!whiteout)
300846fdb794SMiklos Szeredi 		return -ENOMEM;
300946fdb794SMiklos Szeredi 
3010549c7297SChristian Brauner 	error = shmem_mknod(&init_user_ns, old_dir, whiteout,
301146fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
301246fdb794SMiklos Szeredi 	dput(whiteout);
301346fdb794SMiklos Szeredi 	if (error)
301446fdb794SMiklos Szeredi 		return error;
301546fdb794SMiklos Szeredi 
301646fdb794SMiklos Szeredi 	/*
301746fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
301846fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
301946fdb794SMiklos Szeredi 	 *
302046fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
302146fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
302246fdb794SMiklos Szeredi 	 */
302346fdb794SMiklos Szeredi 	d_rehash(whiteout);
302446fdb794SMiklos Szeredi 	return 0;
302546fdb794SMiklos Szeredi }
302646fdb794SMiklos Szeredi 
30271da177e4SLinus Torvalds /*
30281da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
30291da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
30301da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
30311da177e4SLinus Torvalds  * gets overwritten.
30321da177e4SLinus Torvalds  */
3033549c7297SChristian Brauner static int shmem_rename2(struct user_namespace *mnt_userns,
3034549c7297SChristian Brauner 			 struct inode *old_dir, struct dentry *old_dentry,
3035549c7297SChristian Brauner 			 struct inode *new_dir, struct dentry *new_dentry,
3036549c7297SChristian Brauner 			 unsigned int flags)
30371da177e4SLinus Torvalds {
303875c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
30391da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
30401da177e4SLinus Torvalds 
304146fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
30423b69ff51SMiklos Szeredi 		return -EINVAL;
30433b69ff51SMiklos Szeredi 
304437456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
30456429e463SLorenz Bauer 		return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
304637456771SMiklos Szeredi 
30471da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
30481da177e4SLinus Torvalds 		return -ENOTEMPTY;
30491da177e4SLinus Torvalds 
305046fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
305146fdb794SMiklos Szeredi 		int error;
305246fdb794SMiklos Szeredi 
3053549c7297SChristian Brauner 		error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
305446fdb794SMiklos Szeredi 		if (error)
305546fdb794SMiklos Szeredi 			return error;
305646fdb794SMiklos Szeredi 	}
305746fdb794SMiklos Szeredi 
305875c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
30591da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3060b928095bSMiklos Szeredi 		if (they_are_dirs) {
306175c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
30629a53c3a7SDave Hansen 			drop_nlink(old_dir);
3063b928095bSMiklos Szeredi 		}
30641da177e4SLinus Torvalds 	} else if (they_are_dirs) {
30659a53c3a7SDave Hansen 		drop_nlink(old_dir);
3066d8c76e6fSDave Hansen 		inc_nlink(new_dir);
30671da177e4SLinus Torvalds 	}
30681da177e4SLinus Torvalds 
30691da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
30701da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
30711da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
30721da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3073078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
30741da177e4SLinus Torvalds 	return 0;
30751da177e4SLinus Torvalds }
30761da177e4SLinus Torvalds 
3077549c7297SChristian Brauner static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3078549c7297SChristian Brauner 			 struct dentry *dentry, const char *symname)
30791da177e4SLinus Torvalds {
30801da177e4SLinus Torvalds 	int error;
30811da177e4SLinus Torvalds 	int len;
30821da177e4SLinus Torvalds 	struct inode *inode;
30837ad0414bSMatthew Wilcox (Oracle) 	struct folio *folio;
30841da177e4SLinus Torvalds 
30851da177e4SLinus Torvalds 	len = strlen(symname) + 1;
308609cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
30871da177e4SLinus Torvalds 		return -ENAMETOOLONG;
30881da177e4SLinus Torvalds 
30890825a6f9SJoe Perches 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
30900825a6f9SJoe Perches 				VM_NORESERVE);
30911da177e4SLinus Torvalds 	if (!inode)
30921da177e4SLinus Torvalds 		return -ENOSPC;
30931da177e4SLinus Torvalds 
30949d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
30956d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3096343c3d7fSMateusz Nosek 	if (error && error != -EOPNOTSUPP) {
3097570bc1c2SStephen Smalley 		iput(inode);
3098570bc1c2SStephen Smalley 		return error;
3099570bc1c2SStephen Smalley 	}
3100570bc1c2SStephen Smalley 
31011da177e4SLinus Torvalds 	inode->i_size = len-1;
310269f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
31033ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
31043ed47db3SAl Viro 		if (!inode->i_link) {
310569f07ec9SHugh Dickins 			iput(inode);
310669f07ec9SHugh Dickins 			return -ENOMEM;
310769f07ec9SHugh Dickins 		}
310869f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
31091da177e4SLinus Torvalds 	} else {
3110e8ecde25SAl Viro 		inode_nohighmem(inode);
31117ad0414bSMatthew Wilcox (Oracle) 		error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
31121da177e4SLinus Torvalds 		if (error) {
31131da177e4SLinus Torvalds 			iput(inode);
31141da177e4SLinus Torvalds 			return error;
31151da177e4SLinus Torvalds 		}
311614fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
31171da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
31187ad0414bSMatthew Wilcox (Oracle) 		memcpy(folio_address(folio), symname, len);
31197ad0414bSMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
31207ad0414bSMatthew Wilcox (Oracle) 		folio_mark_dirty(folio);
31217ad0414bSMatthew Wilcox (Oracle) 		folio_unlock(folio);
31227ad0414bSMatthew Wilcox (Oracle) 		folio_put(folio);
31231da177e4SLinus Torvalds 	}
31241da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3125078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
31261da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
31271da177e4SLinus Torvalds 	dget(dentry);
31281da177e4SLinus Torvalds 	return 0;
31291da177e4SLinus Torvalds }
31301da177e4SLinus Torvalds 
3131fceef393SAl Viro static void shmem_put_link(void *arg)
3132fceef393SAl Viro {
3133e4b57722SMatthew Wilcox (Oracle) 	folio_mark_accessed(arg);
3134e4b57722SMatthew Wilcox (Oracle) 	folio_put(arg);
3135fceef393SAl Viro }
3136fceef393SAl Viro 
31376b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3138fceef393SAl Viro 				  struct inode *inode,
3139fceef393SAl Viro 				  struct delayed_call *done)
31401da177e4SLinus Torvalds {
3141e4b57722SMatthew Wilcox (Oracle) 	struct folio *folio = NULL;
31426b255391SAl Viro 	int error;
3143e4b57722SMatthew Wilcox (Oracle) 
31446a6c9904SAl Viro 	if (!dentry) {
3145e4b57722SMatthew Wilcox (Oracle) 		folio = filemap_get_folio(inode->i_mapping, 0);
3146e4b57722SMatthew Wilcox (Oracle) 		if (!folio)
31476b255391SAl Viro 			return ERR_PTR(-ECHILD);
31487459c149SMatthew Wilcox (Oracle) 		if (PageHWPoison(folio_page(folio, 0)) ||
3149e4b57722SMatthew Wilcox (Oracle) 		    !folio_test_uptodate(folio)) {
3150e4b57722SMatthew Wilcox (Oracle) 			folio_put(folio);
31516a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
31526a6c9904SAl Viro 		}
31536a6c9904SAl Viro 	} else {
3154e4b57722SMatthew Wilcox (Oracle) 		error = shmem_get_folio(inode, 0, &folio, SGP_READ);
3155680baacbSAl Viro 		if (error)
3156680baacbSAl Viro 			return ERR_PTR(error);
3157e4b57722SMatthew Wilcox (Oracle) 		if (!folio)
3158a7605426SYang Shi 			return ERR_PTR(-ECHILD);
31597459c149SMatthew Wilcox (Oracle) 		if (PageHWPoison(folio_page(folio, 0))) {
3160e4b57722SMatthew Wilcox (Oracle) 			folio_unlock(folio);
3161e4b57722SMatthew Wilcox (Oracle) 			folio_put(folio);
3162a7605426SYang Shi 			return ERR_PTR(-ECHILD);
3163a7605426SYang Shi 		}
3164e4b57722SMatthew Wilcox (Oracle) 		folio_unlock(folio);
31651da177e4SLinus Torvalds 	}
3166e4b57722SMatthew Wilcox (Oracle) 	set_delayed_call(done, shmem_put_link, folio);
3167e4b57722SMatthew Wilcox (Oracle) 	return folio_address(folio);
31681da177e4SLinus Torvalds }
31691da177e4SLinus Torvalds 
3170b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3171e408e695STheodore Ts'o 
3172e408e695STheodore Ts'o static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3173e408e695STheodore Ts'o {
3174e408e695STheodore Ts'o 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3175e408e695STheodore Ts'o 
3176e408e695STheodore Ts'o 	fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3177e408e695STheodore Ts'o 
3178e408e695STheodore Ts'o 	return 0;
3179e408e695STheodore Ts'o }
3180e408e695STheodore Ts'o 
3181e408e695STheodore Ts'o static int shmem_fileattr_set(struct user_namespace *mnt_userns,
3182e408e695STheodore Ts'o 			      struct dentry *dentry, struct fileattr *fa)
3183e408e695STheodore Ts'o {
3184e408e695STheodore Ts'o 	struct inode *inode = d_inode(dentry);
3185e408e695STheodore Ts'o 	struct shmem_inode_info *info = SHMEM_I(inode);
3186e408e695STheodore Ts'o 
3187e408e695STheodore Ts'o 	if (fileattr_has_fsx(fa))
3188e408e695STheodore Ts'o 		return -EOPNOTSUPP;
3189cb241339SHugh Dickins 	if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3190cb241339SHugh Dickins 		return -EOPNOTSUPP;
3191e408e695STheodore Ts'o 
3192e408e695STheodore Ts'o 	info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3193e408e695STheodore Ts'o 		(fa->flags & SHMEM_FL_USER_MODIFIABLE);
3194e408e695STheodore Ts'o 
3195cb241339SHugh Dickins 	shmem_set_inode_flags(inode, info->fsflags);
3196e408e695STheodore Ts'o 	inode->i_ctime = current_time(inode);
3197e408e695STheodore Ts'o 	return 0;
3198e408e695STheodore Ts'o }
3199e408e695STheodore Ts'o 
3200b09e0fa4SEric Paris /*
3201b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3202b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3203b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3204b09e0fa4SEric Paris  * filesystem level, though.
3205b09e0fa4SEric Paris  */
3206b09e0fa4SEric Paris 
32076d9d88d0SJarkko Sakkinen /*
32086d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
32096d9d88d0SJarkko Sakkinen  */
32106d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
32116d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
32126d9d88d0SJarkko Sakkinen 			    void *fs_info)
32136d9d88d0SJarkko Sakkinen {
32146d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
32156d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
321638f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
32176d9d88d0SJarkko Sakkinen 	size_t len;
32186d9d88d0SJarkko Sakkinen 
32196d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
322038f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
32216d9d88d0SJarkko Sakkinen 		if (!new_xattr)
32226d9d88d0SJarkko Sakkinen 			return -ENOMEM;
32236d9d88d0SJarkko Sakkinen 
32246d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
32256d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
32266d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
32276d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
32283bef735aSChengguang Xu 			kvfree(new_xattr);
32296d9d88d0SJarkko Sakkinen 			return -ENOMEM;
32306d9d88d0SJarkko Sakkinen 		}
32316d9d88d0SJarkko Sakkinen 
32326d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
32336d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
32346d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
32356d9d88d0SJarkko Sakkinen 		       xattr->name, len);
32366d9d88d0SJarkko Sakkinen 
323738f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
32386d9d88d0SJarkko Sakkinen 	}
32396d9d88d0SJarkko Sakkinen 
32406d9d88d0SJarkko Sakkinen 	return 0;
32416d9d88d0SJarkko Sakkinen }
32426d9d88d0SJarkko Sakkinen 
3243aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3244b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3245b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3246aa7c5241SAndreas Gruenbacher {
3247b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3248aa7c5241SAndreas Gruenbacher 
3249aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3250aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3251aa7c5241SAndreas Gruenbacher }
3252aa7c5241SAndreas Gruenbacher 
3253aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3254e65ce2a5SChristian Brauner 				   struct user_namespace *mnt_userns,
325559301226SAl Viro 				   struct dentry *unused, struct inode *inode,
325659301226SAl Viro 				   const char *name, const void *value,
325759301226SAl Viro 				   size_t size, int flags)
3258aa7c5241SAndreas Gruenbacher {
325959301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3260aa7c5241SAndreas Gruenbacher 
3261aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3262a46a2295SDaniel Xu 	return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3263aa7c5241SAndreas Gruenbacher }
3264aa7c5241SAndreas Gruenbacher 
3265aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3266aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3267aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3268aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3269aa7c5241SAndreas Gruenbacher };
3270aa7c5241SAndreas Gruenbacher 
3271aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3272aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3273aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3274aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3275aa7c5241SAndreas Gruenbacher };
3276aa7c5241SAndreas Gruenbacher 
3277b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3278b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3279feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3280feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3281b09e0fa4SEric Paris #endif
3282aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3283aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3284b09e0fa4SEric Paris 	NULL
3285b09e0fa4SEric Paris };
3286b09e0fa4SEric Paris 
3287b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3288b09e0fa4SEric Paris {
328975c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3290786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3291b09e0fa4SEric Paris }
3292b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3293b09e0fa4SEric Paris 
329469f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
3295f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
32966b255391SAl Viro 	.get_link	= simple_get_link,
3297b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3298b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3299b09e0fa4SEric Paris #endif
33001da177e4SLinus Torvalds };
33011da177e4SLinus Torvalds 
330292e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
3303f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
33046b255391SAl Viro 	.get_link	= shmem_get_link,
3305b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3306b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
330739f0247dSAndreas Gruenbacher #endif
3308b09e0fa4SEric Paris };
330939f0247dSAndreas Gruenbacher 
331091828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
331191828a40SDavid M. Grimes {
331291828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
331391828a40SDavid M. Grimes }
331491828a40SDavid M. Grimes 
331591828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
331691828a40SDavid M. Grimes {
331791828a40SDavid M. Grimes 	__u32 *fh = vfh;
331891828a40SDavid M. Grimes 	__u64 inum = fh[2];
331991828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
332091828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
332191828a40SDavid M. Grimes }
332291828a40SDavid M. Grimes 
332312ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
332412ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
332512ba780dSAmir Goldstein {
332612ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
332712ba780dSAmir Goldstein 
332812ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
332912ba780dSAmir Goldstein }
333012ba780dSAmir Goldstein 
333112ba780dSAmir Goldstein 
3332480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3333480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
333491828a40SDavid M. Grimes {
333591828a40SDavid M. Grimes 	struct inode *inode;
3336480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
333735c2a7f4SHugh Dickins 	u64 inum;
333891828a40SDavid M. Grimes 
3339480b116cSChristoph Hellwig 	if (fh_len < 3)
3340480b116cSChristoph Hellwig 		return NULL;
3341480b116cSChristoph Hellwig 
334235c2a7f4SHugh Dickins 	inum = fid->raw[2];
334335c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
334435c2a7f4SHugh Dickins 
3345480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3346480b116cSChristoph Hellwig 			shmem_match, fid->raw);
334791828a40SDavid M. Grimes 	if (inode) {
334812ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
334991828a40SDavid M. Grimes 		iput(inode);
335091828a40SDavid M. Grimes 	}
335191828a40SDavid M. Grimes 
3352480b116cSChristoph Hellwig 	return dentry;
335391828a40SDavid M. Grimes }
335491828a40SDavid M. Grimes 
3355b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3356b0b0382bSAl Viro 				struct inode *parent)
335791828a40SDavid M. Grimes {
33585fe0c237SAneesh Kumar K.V 	if (*len < 3) {
33595fe0c237SAneesh Kumar K.V 		*len = 3;
336094e07a75SNamjae Jeon 		return FILEID_INVALID;
33615fe0c237SAneesh Kumar K.V 	}
336291828a40SDavid M. Grimes 
33631d3382cbSAl Viro 	if (inode_unhashed(inode)) {
336491828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
336591828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
336691828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
336791828a40SDavid M. Grimes 		 * to do it once
336891828a40SDavid M. Grimes 		 */
336991828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
337091828a40SDavid M. Grimes 		spin_lock(&lock);
33711d3382cbSAl Viro 		if (inode_unhashed(inode))
337291828a40SDavid M. Grimes 			__insert_inode_hash(inode,
337391828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
337491828a40SDavid M. Grimes 		spin_unlock(&lock);
337591828a40SDavid M. Grimes 	}
337691828a40SDavid M. Grimes 
337791828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
337891828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
337991828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
338091828a40SDavid M. Grimes 
338191828a40SDavid M. Grimes 	*len = 3;
338291828a40SDavid M. Grimes 	return 1;
338391828a40SDavid M. Grimes }
338491828a40SDavid M. Grimes 
338539655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
338691828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
338791828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3388480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
338991828a40SDavid M. Grimes };
339091828a40SDavid M. Grimes 
3391626c3920SAl Viro enum shmem_param {
3392626c3920SAl Viro 	Opt_gid,
3393626c3920SAl Viro 	Opt_huge,
3394626c3920SAl Viro 	Opt_mode,
3395626c3920SAl Viro 	Opt_mpol,
3396626c3920SAl Viro 	Opt_nr_blocks,
3397626c3920SAl Viro 	Opt_nr_inodes,
3398626c3920SAl Viro 	Opt_size,
3399626c3920SAl Viro 	Opt_uid,
3400ea3271f7SChris Down 	Opt_inode32,
3401ea3271f7SChris Down 	Opt_inode64,
3402626c3920SAl Viro };
34031da177e4SLinus Torvalds 
34045eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = {
34052710c957SAl Viro 	{"never",	SHMEM_HUGE_NEVER },
34062710c957SAl Viro 	{"always",	SHMEM_HUGE_ALWAYS },
34072710c957SAl Viro 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
34082710c957SAl Viro 	{"advise",	SHMEM_HUGE_ADVISE },
34092710c957SAl Viro 	{}
34102710c957SAl Viro };
34112710c957SAl Viro 
3412d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = {
3413626c3920SAl Viro 	fsparam_u32   ("gid",		Opt_gid),
34142710c957SAl Viro 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3415626c3920SAl Viro 	fsparam_u32oct("mode",		Opt_mode),
3416626c3920SAl Viro 	fsparam_string("mpol",		Opt_mpol),
3417626c3920SAl Viro 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3418626c3920SAl Viro 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3419626c3920SAl Viro 	fsparam_string("size",		Opt_size),
3420626c3920SAl Viro 	fsparam_u32   ("uid",		Opt_uid),
3421ea3271f7SChris Down 	fsparam_flag  ("inode32",	Opt_inode32),
3422ea3271f7SChris Down 	fsparam_flag  ("inode64",	Opt_inode64),
3423626c3920SAl Viro 	{}
3424626c3920SAl Viro };
3425626c3920SAl Viro 
3426f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3427626c3920SAl Viro {
3428f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3429626c3920SAl Viro 	struct fs_parse_result result;
3430e04dc423SAl Viro 	unsigned long long size;
3431626c3920SAl Viro 	char *rest;
3432626c3920SAl Viro 	int opt;
3433626c3920SAl Viro 
3434d7167b14SAl Viro 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3435f3235626SDavid Howells 	if (opt < 0)
3436626c3920SAl Viro 		return opt;
3437626c3920SAl Viro 
3438626c3920SAl Viro 	switch (opt) {
3439626c3920SAl Viro 	case Opt_size:
3440626c3920SAl Viro 		size = memparse(param->string, &rest);
3441e04dc423SAl Viro 		if (*rest == '%') {
3442e04dc423SAl Viro 			size <<= PAGE_SHIFT;
3443e04dc423SAl Viro 			size *= totalram_pages();
3444e04dc423SAl Viro 			do_div(size, 100);
3445e04dc423SAl Viro 			rest++;
3446e04dc423SAl Viro 		}
3447e04dc423SAl Viro 		if (*rest)
3448626c3920SAl Viro 			goto bad_value;
3449e04dc423SAl Viro 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3450e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3451626c3920SAl Viro 		break;
3452626c3920SAl Viro 	case Opt_nr_blocks:
3453626c3920SAl Viro 		ctx->blocks = memparse(param->string, &rest);
34540c98c8e1SZhaoLong Wang 		if (*rest || ctx->blocks > S64_MAX)
3455626c3920SAl Viro 			goto bad_value;
3456e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3457626c3920SAl Viro 		break;
3458626c3920SAl Viro 	case Opt_nr_inodes:
3459626c3920SAl Viro 		ctx->inodes = memparse(param->string, &rest);
3460e04dc423SAl Viro 		if (*rest)
3461626c3920SAl Viro 			goto bad_value;
3462e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_INODES;
3463626c3920SAl Viro 		break;
3464626c3920SAl Viro 	case Opt_mode:
3465626c3920SAl Viro 		ctx->mode = result.uint_32 & 07777;
3466626c3920SAl Viro 		break;
3467626c3920SAl Viro 	case Opt_uid:
3468626c3920SAl Viro 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3469e04dc423SAl Viro 		if (!uid_valid(ctx->uid))
3470626c3920SAl Viro 			goto bad_value;
3471626c3920SAl Viro 		break;
3472626c3920SAl Viro 	case Opt_gid:
3473626c3920SAl Viro 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3474e04dc423SAl Viro 		if (!gid_valid(ctx->gid))
3475626c3920SAl Viro 			goto bad_value;
3476626c3920SAl Viro 		break;
3477626c3920SAl Viro 	case Opt_huge:
3478626c3920SAl Viro 		ctx->huge = result.uint_32;
3479626c3920SAl Viro 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3480396bcc52SMatthew Wilcox (Oracle) 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3481626c3920SAl Viro 		      has_transparent_hugepage()))
3482626c3920SAl Viro 			goto unsupported_parameter;
3483e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_HUGE;
3484626c3920SAl Viro 		break;
3485626c3920SAl Viro 	case Opt_mpol:
3486626c3920SAl Viro 		if (IS_ENABLED(CONFIG_NUMA)) {
3487e04dc423SAl Viro 			mpol_put(ctx->mpol);
3488e04dc423SAl Viro 			ctx->mpol = NULL;
3489626c3920SAl Viro 			if (mpol_parse_str(param->string, &ctx->mpol))
3490626c3920SAl Viro 				goto bad_value;
3491626c3920SAl Viro 			break;
3492626c3920SAl Viro 		}
3493626c3920SAl Viro 		goto unsupported_parameter;
3494ea3271f7SChris Down 	case Opt_inode32:
3495ea3271f7SChris Down 		ctx->full_inums = false;
3496ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3497ea3271f7SChris Down 		break;
3498ea3271f7SChris Down 	case Opt_inode64:
3499ea3271f7SChris Down 		if (sizeof(ino_t) < 8) {
3500ea3271f7SChris Down 			return invalfc(fc,
3501ea3271f7SChris Down 				       "Cannot use inode64 with <64bit inums in kernel\n");
3502ea3271f7SChris Down 		}
3503ea3271f7SChris Down 		ctx->full_inums = true;
3504ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3505ea3271f7SChris Down 		break;
3506e04dc423SAl Viro 	}
3507e04dc423SAl Viro 	return 0;
3508e04dc423SAl Viro 
3509626c3920SAl Viro unsupported_parameter:
3510f35aa2bcSAl Viro 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3511626c3920SAl Viro bad_value:
3512f35aa2bcSAl Viro 	return invalfc(fc, "Bad value for '%s'", param->key);
3513e04dc423SAl Viro }
3514e04dc423SAl Viro 
3515f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data)
3516e04dc423SAl Viro {
3517f3235626SDavid Howells 	char *options = data;
3518f3235626SDavid Howells 
351933f37c64SAl Viro 	if (options) {
352033f37c64SAl Viro 		int err = security_sb_eat_lsm_opts(options, &fc->security);
352133f37c64SAl Viro 		if (err)
352233f37c64SAl Viro 			return err;
352333f37c64SAl Viro 	}
352433f37c64SAl Viro 
3525b00dc3adSHugh Dickins 	while (options != NULL) {
3526626c3920SAl Viro 		char *this_char = options;
3527b00dc3adSHugh Dickins 		for (;;) {
3528b00dc3adSHugh Dickins 			/*
3529b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3530b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3531b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3532b00dc3adSHugh Dickins 			 */
3533b00dc3adSHugh Dickins 			options = strchr(options, ',');
3534b00dc3adSHugh Dickins 			if (options == NULL)
3535b00dc3adSHugh Dickins 				break;
3536b00dc3adSHugh Dickins 			options++;
3537b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3538b00dc3adSHugh Dickins 				options[-1] = '\0';
3539b00dc3adSHugh Dickins 				break;
3540b00dc3adSHugh Dickins 			}
3541b00dc3adSHugh Dickins 		}
3542626c3920SAl Viro 		if (*this_char) {
3543626c3920SAl Viro 			char *value = strchr(this_char, '=');
3544f3235626SDavid Howells 			size_t len = 0;
3545626c3920SAl Viro 			int err;
3546626c3920SAl Viro 
3547626c3920SAl Viro 			if (value) {
3548626c3920SAl Viro 				*value++ = '\0';
3549f3235626SDavid Howells 				len = strlen(value);
35501da177e4SLinus Torvalds 			}
3551f3235626SDavid Howells 			err = vfs_parse_fs_string(fc, this_char, value, len);
3552f3235626SDavid Howells 			if (err < 0)
3553f3235626SDavid Howells 				return err;
35541da177e4SLinus Torvalds 		}
3555626c3920SAl Viro 	}
35561da177e4SLinus Torvalds 	return 0;
35571da177e4SLinus Torvalds }
35581da177e4SLinus Torvalds 
3559f3235626SDavid Howells /*
3560f3235626SDavid Howells  * Reconfigure a shmem filesystem.
3561f3235626SDavid Howells  *
3562f3235626SDavid Howells  * Note that we disallow change from limited->unlimited blocks/inodes while any
3563f3235626SDavid Howells  * are in use; but we must separately disallow unlimited->limited, because in
3564f3235626SDavid Howells  * that case we have no record of how much is already in use.
3565f3235626SDavid Howells  */
3566f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc)
35671da177e4SLinus Torvalds {
3568f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3569f3235626SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
35700edd73b3SHugh Dickins 	unsigned long inodes;
3571bf11b9a8SSebastian Andrzej Siewior 	struct mempolicy *mpol = NULL;
3572f3235626SDavid Howells 	const char *err;
35730edd73b3SHugh Dickins 
3574bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock(&sbinfo->stat_lock);
35750edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
35760c98c8e1SZhaoLong Wang 
3577f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3578f3235626SDavid Howells 		if (!sbinfo->max_blocks) {
3579f3235626SDavid Howells 			err = "Cannot retroactively limit size";
35800edd73b3SHugh Dickins 			goto out;
35810b5071ddSAl Viro 		}
3582f3235626SDavid Howells 		if (percpu_counter_compare(&sbinfo->used_blocks,
3583f3235626SDavid Howells 					   ctx->blocks) > 0) {
3584f3235626SDavid Howells 			err = "Too small a size for current use";
35850b5071ddSAl Viro 			goto out;
3586f3235626SDavid Howells 		}
3587f3235626SDavid Howells 	}
3588f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3589f3235626SDavid Howells 		if (!sbinfo->max_inodes) {
3590f3235626SDavid Howells 			err = "Cannot retroactively limit inodes";
35910b5071ddSAl Viro 			goto out;
35920b5071ddSAl Viro 		}
3593f3235626SDavid Howells 		if (ctx->inodes < inodes) {
3594f3235626SDavid Howells 			err = "Too few inodes for current use";
3595f3235626SDavid Howells 			goto out;
3596f3235626SDavid Howells 		}
3597f3235626SDavid Howells 	}
35980edd73b3SHugh Dickins 
3599ea3271f7SChris Down 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3600ea3271f7SChris Down 	    sbinfo->next_ino > UINT_MAX) {
3601ea3271f7SChris Down 		err = "Current inum too high to switch to 32-bit inums";
3602ea3271f7SChris Down 		goto out;
3603ea3271f7SChris Down 	}
3604ea3271f7SChris Down 
3605f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_HUGE)
3606f3235626SDavid Howells 		sbinfo->huge = ctx->huge;
3607ea3271f7SChris Down 	if (ctx->seen & SHMEM_SEEN_INUMS)
3608ea3271f7SChris Down 		sbinfo->full_inums = ctx->full_inums;
3609f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3610f3235626SDavid Howells 		sbinfo->max_blocks  = ctx->blocks;
3611f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_INODES) {
3612f3235626SDavid Howells 		sbinfo->max_inodes  = ctx->inodes;
3613f3235626SDavid Howells 		sbinfo->free_inodes = ctx->inodes - inodes;
36140b5071ddSAl Viro 	}
361571fe804bSLee Schermerhorn 
36165f00110fSGreg Thelen 	/*
36175f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
36185f00110fSGreg Thelen 	 */
3619f3235626SDavid Howells 	if (ctx->mpol) {
3620bf11b9a8SSebastian Andrzej Siewior 		mpol = sbinfo->mpol;
3621f3235626SDavid Howells 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3622f3235626SDavid Howells 		ctx->mpol = NULL;
36235f00110fSGreg Thelen 	}
3624bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3625bf11b9a8SSebastian Andrzej Siewior 	mpol_put(mpol);
3626f3235626SDavid Howells 	return 0;
36270edd73b3SHugh Dickins out:
3628bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3629f35aa2bcSAl Viro 	return invalfc(fc, "%s", err);
36301da177e4SLinus Torvalds }
3631680d794bSakpm@linux-foundation.org 
363234c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3633680d794bSakpm@linux-foundation.org {
363434c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3635680d794bSakpm@linux-foundation.org 
3636680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3637680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
363809cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3639680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3640680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
36410825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
364209208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
36438751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
36448751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
36458751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
36468751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
36478751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
36488751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3649ea3271f7SChris Down 
3650ea3271f7SChris Down 	/*
3651ea3271f7SChris Down 	 * Showing inode{64,32} might be useful even if it's the system default,
3652ea3271f7SChris Down 	 * since then people don't have to resort to checking both here and
3653ea3271f7SChris Down 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
3654ea3271f7SChris Down 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3655ea3271f7SChris Down 	 *
3656ea3271f7SChris Down 	 * We hide it when inode64 isn't the default and we are using 32-bit
3657ea3271f7SChris Down 	 * inodes, since that probably just means the feature isn't even under
3658ea3271f7SChris Down 	 * consideration.
3659ea3271f7SChris Down 	 *
3660ea3271f7SChris Down 	 * As such:
3661ea3271f7SChris Down 	 *
3662ea3271f7SChris Down 	 *                     +-----------------+-----------------+
3663ea3271f7SChris Down 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3664ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3665ea3271f7SChris Down 	 *  | full_inums=true  | show            | show            |
3666ea3271f7SChris Down 	 *  | full_inums=false | show            | hide            |
3667ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3668ea3271f7SChris Down 	 *
3669ea3271f7SChris Down 	 */
3670ea3271f7SChris Down 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3671ea3271f7SChris Down 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3672396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
36735a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
36745a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
36755a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
36765a6e75f8SKirill A. Shutemov #endif
367771fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3678680d794bSakpm@linux-foundation.org 	return 0;
3679680d794bSakpm@linux-foundation.org }
36809183df25SDavid Herrmann 
3681680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
36821da177e4SLinus Torvalds 
36831da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
36841da177e4SLinus Torvalds {
3685602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3686602586a8SHugh Dickins 
3687e809d5f0SChris Down 	free_percpu(sbinfo->ino_batch);
3688602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
368949cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3690602586a8SHugh Dickins 	kfree(sbinfo);
36911da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
36921da177e4SLinus Torvalds }
36931da177e4SLinus Torvalds 
3694f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
36951da177e4SLinus Torvalds {
3696f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
36971da177e4SLinus Torvalds 	struct inode *inode;
36980edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3699680d794bSakpm@linux-foundation.org 
3700680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3701425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3702680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3703680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3704680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3705680d794bSakpm@linux-foundation.org 
3706680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
37071da177e4SLinus Torvalds 
37080edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
37091da177e4SLinus Torvalds 	/*
37101da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
37111da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
37121da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
37131da177e4SLinus Torvalds 	 */
37141751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3715f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3716f3235626SDavid Howells 			ctx->blocks = shmem_default_max_blocks();
3717f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3718f3235626SDavid Howells 			ctx->inodes = shmem_default_max_inodes();
3719ea3271f7SChris Down 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
3720ea3271f7SChris Down 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3721ca4e0519SAl Viro 	} else {
37221751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
37231da177e4SLinus Torvalds 	}
372491828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
37251751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOSEC;
37260edd73b3SHugh Dickins #else
37271751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
37280edd73b3SHugh Dickins #endif
3729f3235626SDavid Howells 	sbinfo->max_blocks = ctx->blocks;
3730f3235626SDavid Howells 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3731e809d5f0SChris Down 	if (sb->s_flags & SB_KERNMOUNT) {
3732e809d5f0SChris Down 		sbinfo->ino_batch = alloc_percpu(ino_t);
3733e809d5f0SChris Down 		if (!sbinfo->ino_batch)
3734e809d5f0SChris Down 			goto failed;
3735e809d5f0SChris Down 	}
3736f3235626SDavid Howells 	sbinfo->uid = ctx->uid;
3737f3235626SDavid Howells 	sbinfo->gid = ctx->gid;
3738ea3271f7SChris Down 	sbinfo->full_inums = ctx->full_inums;
3739f3235626SDavid Howells 	sbinfo->mode = ctx->mode;
3740f3235626SDavid Howells 	sbinfo->huge = ctx->huge;
3741f3235626SDavid Howells 	sbinfo->mpol = ctx->mpol;
3742f3235626SDavid Howells 	ctx->mpol = NULL;
37431da177e4SLinus Torvalds 
3744bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock_init(&sbinfo->stat_lock);
3745908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3746602586a8SHugh Dickins 		goto failed;
3747779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3748779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
37491da177e4SLinus Torvalds 
3750285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
375109cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
375209cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
37531da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
37541da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3755cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3756b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
375739f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3758b09e0fa4SEric Paris #endif
3759b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
37601751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
376139f0247dSAndreas Gruenbacher #endif
37622b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
37630edd73b3SHugh Dickins 
3764454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
37651da177e4SLinus Torvalds 	if (!inode)
37661da177e4SLinus Torvalds 		goto failed;
3767680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3768680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3769318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3770318ceed0SAl Viro 	if (!sb->s_root)
377148fde701SAl Viro 		goto failed;
37721da177e4SLinus Torvalds 	return 0;
37731da177e4SLinus Torvalds 
37741da177e4SLinus Torvalds failed:
37751da177e4SLinus Torvalds 	shmem_put_super(sb);
3776f2b346e4SMiaohe Lin 	return -ENOMEM;
37771da177e4SLinus Torvalds }
37781da177e4SLinus Torvalds 
3779f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc)
3780f3235626SDavid Howells {
3781f3235626SDavid Howells 	return get_tree_nodev(fc, shmem_fill_super);
3782f3235626SDavid Howells }
3783f3235626SDavid Howells 
3784f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc)
3785f3235626SDavid Howells {
3786f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3787f3235626SDavid Howells 
3788f3235626SDavid Howells 	if (ctx) {
3789f3235626SDavid Howells 		mpol_put(ctx->mpol);
3790f3235626SDavid Howells 		kfree(ctx);
3791f3235626SDavid Howells 	}
3792f3235626SDavid Howells }
3793f3235626SDavid Howells 
3794f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = {
3795f3235626SDavid Howells 	.free			= shmem_free_fc,
3796f3235626SDavid Howells 	.get_tree		= shmem_get_tree,
3797f3235626SDavid Howells #ifdef CONFIG_TMPFS
3798f3235626SDavid Howells 	.parse_monolithic	= shmem_parse_options,
3799f3235626SDavid Howells 	.parse_param		= shmem_parse_one,
3800f3235626SDavid Howells 	.reconfigure		= shmem_reconfigure,
3801f3235626SDavid Howells #endif
3802f3235626SDavid Howells };
3803f3235626SDavid Howells 
3804fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
38051da177e4SLinus Torvalds 
38061da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
38071da177e4SLinus Torvalds {
380841ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
3809fd60b288SMuchun Song 	info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
381041ffe5d5SHugh Dickins 	if (!info)
38111da177e4SLinus Torvalds 		return NULL;
381241ffe5d5SHugh Dickins 	return &info->vfs_inode;
38131da177e4SLinus Torvalds }
38141da177e4SLinus Torvalds 
381574b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode)
3816fa0d7e3dSNick Piggin {
381784e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
38183ed47db3SAl Viro 		kfree(inode->i_link);
3819fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3820fa0d7e3dSNick Piggin }
3821fa0d7e3dSNick Piggin 
38221da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
38231da177e4SLinus Torvalds {
382409208d15SAl Viro 	if (S_ISREG(inode->i_mode))
38251da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
38261da177e4SLinus Torvalds }
38271da177e4SLinus Torvalds 
382841ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
38291da177e4SLinus Torvalds {
383041ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
383141ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
38321da177e4SLinus Torvalds }
38331da177e4SLinus Torvalds 
38349a8ec03eSweiping zhang static void shmem_init_inodecache(void)
38351da177e4SLinus Torvalds {
38361da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
38371da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
38385d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
38391da177e4SLinus Torvalds }
38401da177e4SLinus Torvalds 
384141ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
38421da177e4SLinus Torvalds {
38431a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
38441da177e4SLinus Torvalds }
38451da177e4SLinus Torvalds 
3846a7605426SYang Shi /* Keep the page in page cache instead of truncating it */
3847a7605426SYang Shi static int shmem_error_remove_page(struct address_space *mapping,
3848a7605426SYang Shi 				   struct page *page)
3849a7605426SYang Shi {
3850a7605426SYang Shi 	return 0;
3851a7605426SYang Shi }
3852a7605426SYang Shi 
385330e6a51dSHui Su const struct address_space_operations shmem_aops = {
38541da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
385546de8b97SMatthew Wilcox (Oracle) 	.dirty_folio	= noop_dirty_folio,
38561da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3857800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3858800d15a5SNick Piggin 	.write_end	= shmem_write_end,
38591da177e4SLinus Torvalds #endif
38601c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
386154184650SMatthew Wilcox (Oracle) 	.migrate_folio	= migrate_folio,
38621c93923cSAndrew Morton #endif
3863a7605426SYang Shi 	.error_remove_page = shmem_error_remove_page,
38641da177e4SLinus Torvalds };
386530e6a51dSHui Su EXPORT_SYMBOL(shmem_aops);
38661da177e4SLinus Torvalds 
386715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
38681da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3869c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
38701da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3871220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
38722ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
38738174202bSAl Viro 	.write_iter	= generic_file_write_iter,
38741b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
387582c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3876f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
387783e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
38781da177e4SLinus Torvalds #endif
38791da177e4SLinus Torvalds };
38801da177e4SLinus Torvalds 
388192e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
388244a30220SYu Zhao 	.getattr	= shmem_getattr,
388394c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3884b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3885b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3886feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3887e408e695STheodore Ts'o 	.fileattr_get	= shmem_fileattr_get,
3888e408e695STheodore Ts'o 	.fileattr_set	= shmem_fileattr_set,
3889b09e0fa4SEric Paris #endif
38901da177e4SLinus Torvalds };
38911da177e4SLinus Torvalds 
389292e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
38931da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3894f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
38951da177e4SLinus Torvalds 	.create		= shmem_create,
38961da177e4SLinus Torvalds 	.lookup		= simple_lookup,
38971da177e4SLinus Torvalds 	.link		= shmem_link,
38981da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
38991da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
39001da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
39011da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
39021da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
39032773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
390460545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
39051da177e4SLinus Torvalds #endif
3906b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3907b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3908e408e695STheodore Ts'o 	.fileattr_get	= shmem_fileattr_get,
3909e408e695STheodore Ts'o 	.fileattr_set	= shmem_fileattr_set,
3910b09e0fa4SEric Paris #endif
391139f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
391294c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3913feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
391439f0247dSAndreas Gruenbacher #endif
391539f0247dSAndreas Gruenbacher };
391639f0247dSAndreas Gruenbacher 
391792e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3918f7cd16a5SXavier Roche 	.getattr	= shmem_getattr,
3919b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3920b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3921b09e0fa4SEric Paris #endif
392239f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
392394c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3924feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
392539f0247dSAndreas Gruenbacher #endif
39261da177e4SLinus Torvalds };
39271da177e4SLinus Torvalds 
3928759b9775SHugh Dickins static const struct super_operations shmem_ops = {
39291da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
393074b1da56SAl Viro 	.free_inode	= shmem_free_in_core_inode,
39311da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
39321da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
39331da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
3934680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
39351da177e4SLinus Torvalds #endif
39361f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
39371da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
39381da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3939396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3940779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3941779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3942779750d2SKirill A. Shutemov #endif
39431da177e4SLinus Torvalds };
39441da177e4SLinus Torvalds 
3945f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
394654cb8821SNick Piggin 	.fault		= shmem_fault,
3947d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
39481da177e4SLinus Torvalds #ifdef CONFIG_NUMA
39491da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
39501da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
39511da177e4SLinus Torvalds #endif
39521da177e4SLinus Torvalds };
39531da177e4SLinus Torvalds 
3954f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc)
39551da177e4SLinus Torvalds {
3956f3235626SDavid Howells 	struct shmem_options *ctx;
3957f3235626SDavid Howells 
3958f3235626SDavid Howells 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3959f3235626SDavid Howells 	if (!ctx)
3960f3235626SDavid Howells 		return -ENOMEM;
3961f3235626SDavid Howells 
3962f3235626SDavid Howells 	ctx->mode = 0777 | S_ISVTX;
3963f3235626SDavid Howells 	ctx->uid = current_fsuid();
3964f3235626SDavid Howells 	ctx->gid = current_fsgid();
3965f3235626SDavid Howells 
3966f3235626SDavid Howells 	fc->fs_private = ctx;
3967f3235626SDavid Howells 	fc->ops = &shmem_fs_context_ops;
3968f3235626SDavid Howells 	return 0;
39691da177e4SLinus Torvalds }
39701da177e4SLinus Torvalds 
397141ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
39721da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
39731da177e4SLinus Torvalds 	.name		= "tmpfs",
3974f3235626SDavid Howells 	.init_fs_context = shmem_init_fs_context,
3975f3235626SDavid Howells #ifdef CONFIG_TMPFS
3976d7167b14SAl Viro 	.parameters	= shmem_fs_parameters,
3977f3235626SDavid Howells #endif
39781da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
3979ff36da69SMatthew Wilcox (Oracle) 	.fs_flags	= FS_USERNS_MOUNT,
39801da177e4SLinus Torvalds };
39811da177e4SLinus Torvalds 
39829096bbe9SMiaohe Lin void __init shmem_init(void)
39831da177e4SLinus Torvalds {
39841da177e4SLinus Torvalds 	int error;
39851da177e4SLinus Torvalds 
39869a8ec03eSweiping zhang 	shmem_init_inodecache();
39871da177e4SLinus Torvalds 
398841ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
39891da177e4SLinus Torvalds 	if (error) {
39901170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
39911da177e4SLinus Torvalds 		goto out2;
39921da177e4SLinus Torvalds 	}
399395dc112aSGreg Kroah-Hartman 
3994ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
39951da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
39961da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
39971170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
39981da177e4SLinus Torvalds 		goto out1;
39991da177e4SLinus Torvalds 	}
40005a6e75f8SKirill A. Shutemov 
4001396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4002435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
40035a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
40045a6e75f8SKirill A. Shutemov 	else
40055e6e5a12SHugh Dickins 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
40065a6e75f8SKirill A. Shutemov #endif
40079096bbe9SMiaohe Lin 	return;
40081da177e4SLinus Torvalds 
40091da177e4SLinus Torvalds out1:
401041ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
40111da177e4SLinus Torvalds out2:
401241ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
40131da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
40141da177e4SLinus Torvalds }
4015853ac43aSMatt Mackall 
4016396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
40175a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
40185a6e75f8SKirill A. Shutemov 				  struct kobj_attribute *attr, char *buf)
40195a6e75f8SKirill A. Shutemov {
402026083eb6SColin Ian King 	static const int values[] = {
40215a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
40225a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
40235a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
40245a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
40255a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
40265a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
40275a6e75f8SKirill A. Shutemov 	};
402879d4d38aSJoe Perches 	int len = 0;
402979d4d38aSJoe Perches 	int i;
40305a6e75f8SKirill A. Shutemov 
403179d4d38aSJoe Perches 	for (i = 0; i < ARRAY_SIZE(values); i++) {
403279d4d38aSJoe Perches 		len += sysfs_emit_at(buf, len,
403379d4d38aSJoe Perches 				     shmem_huge == values[i] ? "%s[%s]" : "%s%s",
403479d4d38aSJoe Perches 				     i ? " " : "",
40355a6e75f8SKirill A. Shutemov 				     shmem_format_huge(values[i]));
40365a6e75f8SKirill A. Shutemov 	}
403779d4d38aSJoe Perches 
403879d4d38aSJoe Perches 	len += sysfs_emit_at(buf, len, "\n");
403979d4d38aSJoe Perches 
404079d4d38aSJoe Perches 	return len;
40415a6e75f8SKirill A. Shutemov }
40425a6e75f8SKirill A. Shutemov 
40435a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
40445a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
40455a6e75f8SKirill A. Shutemov {
40465a6e75f8SKirill A. Shutemov 	char tmp[16];
40475a6e75f8SKirill A. Shutemov 	int huge;
40485a6e75f8SKirill A. Shutemov 
40495a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
40505a6e75f8SKirill A. Shutemov 		return -EINVAL;
40515a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
40525a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
40535a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
40545a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
40555a6e75f8SKirill A. Shutemov 
40565a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
40575a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
40585a6e75f8SKirill A. Shutemov 		return -EINVAL;
40595a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
40605a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
40615a6e75f8SKirill A. Shutemov 		return -EINVAL;
40625a6e75f8SKirill A. Shutemov 
40635a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
4064435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
40655a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
40665a6e75f8SKirill A. Shutemov 	return count;
40675a6e75f8SKirill A. Shutemov }
40685a6e75f8SKirill A. Shutemov 
40694bfa8adaSMiaohe Lin struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
4070396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4071f3f0e1d2SKirill A. Shutemov 
4072853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
4073853ac43aSMatt Mackall 
4074853ac43aSMatt Mackall /*
4075853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4076853ac43aSMatt Mackall  *
4077853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
4078853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
4079853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
4080853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
4081853ac43aSMatt Mackall  */
4082853ac43aSMatt Mackall 
408341ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
4084853ac43aSMatt Mackall 	.name		= "tmpfs",
4085f3235626SDavid Howells 	.init_fs_context = ramfs_init_fs_context,
4086d7167b14SAl Viro 	.parameters	= ramfs_fs_parameters,
4087853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
40882b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
4089853ac43aSMatt Mackall };
4090853ac43aSMatt Mackall 
40919096bbe9SMiaohe Lin void __init shmem_init(void)
4092853ac43aSMatt Mackall {
409341ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4094853ac43aSMatt Mackall 
409541ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
4096853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
4097853ac43aSMatt Mackall }
4098853ac43aSMatt Mackall 
409910a9c496SChristoph Hellwig int shmem_unuse(unsigned int type)
4100853ac43aSMatt Mackall {
4101853ac43aSMatt Mackall 	return 0;
4102853ac43aSMatt Mackall }
4103853ac43aSMatt Mackall 
4104d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
41053f96b79aSHugh Dickins {
41063f96b79aSHugh Dickins 	return 0;
41073f96b79aSHugh Dickins }
41083f96b79aSHugh Dickins 
410924513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
411024513264SHugh Dickins {
411124513264SHugh Dickins }
411224513264SHugh Dickins 
4113c01d5b30SHugh Dickins #ifdef CONFIG_MMU
4114c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4115c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
4116c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
4117c01d5b30SHugh Dickins {
4118c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4119c01d5b30SHugh Dickins }
4120c01d5b30SHugh Dickins #endif
4121c01d5b30SHugh Dickins 
412241ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
412394c1e62dSHugh Dickins {
412441ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
412594c1e62dSHugh Dickins }
412694c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
412794c1e62dSHugh Dickins 
4128853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
41290b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
4130454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
41310b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
41320b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
4133853ac43aSMatt Mackall 
4134853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4135853ac43aSMatt Mackall 
4136853ac43aSMatt Mackall /* common code */
41371da177e4SLinus Torvalds 
4138703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4139c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
41401da177e4SLinus Torvalds {
41411da177e4SLinus Torvalds 	struct inode *inode;
414293dec2daSAl Viro 	struct file *res;
41431da177e4SLinus Torvalds 
4144703321b6SMatthew Auld 	if (IS_ERR(mnt))
4145703321b6SMatthew Auld 		return ERR_CAST(mnt);
41461da177e4SLinus Torvalds 
4147285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
41481da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
41491da177e4SLinus Torvalds 
41501da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
41511da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
41521da177e4SLinus Torvalds 
415393dec2daSAl Viro 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
415493dec2daSAl Viro 				flags);
4155dac2d1f6SAl Viro 	if (unlikely(!inode)) {
4156dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
4157dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
4158dac2d1f6SAl Viro 	}
4159c7277090SEric Paris 	inode->i_flags |= i_flags;
41601da177e4SLinus Torvalds 	inode->i_size = size;
41616d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
416226567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
416393dec2daSAl Viro 	if (!IS_ERR(res))
416493dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
41654b42af81SAl Viro 				&shmem_file_operations);
41666b4d0b27SAl Viro 	if (IS_ERR(res))
416793dec2daSAl Viro 		iput(inode);
41686b4d0b27SAl Viro 	return res;
41691da177e4SLinus Torvalds }
4170c7277090SEric Paris 
4171c7277090SEric Paris /**
4172c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4173c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
4174c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
4175e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
4176e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
4177c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4178c7277090SEric Paris  * @size: size to be set for the file
4179c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4180c7277090SEric Paris  */
4181c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4182c7277090SEric Paris {
4183703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4184c7277090SEric Paris }
4185c7277090SEric Paris 
4186c7277090SEric Paris /**
4187c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4188c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4189c7277090SEric Paris  * @size: size to be set for the file
4190c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4191c7277090SEric Paris  */
4192c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4193c7277090SEric Paris {
4194703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4195c7277090SEric Paris }
4196395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
41971da177e4SLinus Torvalds 
419846711810SRandy Dunlap /**
4199703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4200703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
4201703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
4202703321b6SMatthew Auld  * @size: size to be set for the file
4203703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4204703321b6SMatthew Auld  */
4205703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4206703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
4207703321b6SMatthew Auld {
4208703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
4209703321b6SMatthew Auld }
4210703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4211703321b6SMatthew Auld 
4212703321b6SMatthew Auld /**
42131da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
421445e55300SPeter Collingbourne  * @vma: the vma to be mmapped is prepared by do_mmap
42151da177e4SLinus Torvalds  */
42161da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
42171da177e4SLinus Torvalds {
42181da177e4SLinus Torvalds 	struct file *file;
42191da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
42201da177e4SLinus Torvalds 
422166fc1303SHugh Dickins 	/*
4222c1e8d7c6SMichel Lespinasse 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
422366fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
422466fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
422566fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
422666fc1303SHugh Dickins 	 */
4227703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
42281da177e4SLinus Torvalds 	if (IS_ERR(file))
42291da177e4SLinus Torvalds 		return PTR_ERR(file);
42301da177e4SLinus Torvalds 
42311da177e4SLinus Torvalds 	if (vma->vm_file)
42321da177e4SLinus Torvalds 		fput(vma->vm_file);
42331da177e4SLinus Torvalds 	vma->vm_file = file;
42341da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
4235f3f0e1d2SKirill A. Shutemov 
42361da177e4SLinus Torvalds 	return 0;
42371da177e4SLinus Torvalds }
4238d9d90e5eSHugh Dickins 
4239d9d90e5eSHugh Dickins /**
4240d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4241d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4242d9d90e5eSHugh Dickins  * @index:	the page index
4243d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4244d9d90e5eSHugh Dickins  *
4245d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4246d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
42477e0a1265SMatthew Wilcox (Oracle)  * But read_cache_page_gfp() uses the ->read_folio() method: which does not
4248d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4249d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4250d9d90e5eSHugh Dickins  *
425168da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
425268da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4253d9d90e5eSHugh Dickins  */
4254d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4255d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4256d9d90e5eSHugh Dickins {
425768da9f05SHugh Dickins #ifdef CONFIG_SHMEM
425868da9f05SHugh Dickins 	struct inode *inode = mapping->host;
4259a3a9c397SMatthew Wilcox (Oracle) 	struct folio *folio;
42609276aad6SHugh Dickins 	struct page *page;
426168da9f05SHugh Dickins 	int error;
426268da9f05SHugh Dickins 
426330e6a51dSHui Su 	BUG_ON(!shmem_mapping(mapping));
4264a3a9c397SMatthew Wilcox (Oracle) 	error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
4265cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
426668da9f05SHugh Dickins 	if (error)
4267a7605426SYang Shi 		return ERR_PTR(error);
4268a7605426SYang Shi 
4269a3a9c397SMatthew Wilcox (Oracle) 	folio_unlock(folio);
4270a3a9c397SMatthew Wilcox (Oracle) 	page = folio_file_page(folio, index);
4271a7605426SYang Shi 	if (PageHWPoison(page)) {
4272a3a9c397SMatthew Wilcox (Oracle) 		folio_put(folio);
4273a7605426SYang Shi 		return ERR_PTR(-EIO);
4274a7605426SYang Shi 	}
4275a7605426SYang Shi 
427668da9f05SHugh Dickins 	return page;
427768da9f05SHugh Dickins #else
427868da9f05SHugh Dickins 	/*
427968da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
428068da9f05SHugh Dickins 	 */
4281d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
428268da9f05SHugh Dickins #endif
4283d9d90e5eSHugh Dickins }
4284d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4285