xref: /openbmc/linux/mm/shmem.c (revision 1e84a3d997b74c33491899e31d48774f252213ab)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31853ac43aSMatt Mackall #include <linux/mm.h>
3246c9a946SArnd Bergmann #include <linux/random.h>
33174cd4b1SIngo Molnar #include <linux/sched/signal.h>
34b95f1b31SPaul Gortmaker #include <linux/export.h>
35853ac43aSMatt Mackall #include <linux/swap.h>
36e2e40f2cSChristoph Hellwig #include <linux/uio.h>
37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h>
38749df87bSMike Kravetz #include <linux/hugetlb.h>
39b56a2d8aSVineeth Remanan Pillai #include <linux/frontswap.h>
40626c3920SAl Viro #include <linux/fs_parser.h>
4186a2f3f2SMiaohe Lin #include <linux/swapfile.h>
4295cc09d6SAndrea Arcangeli 
43853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
44853ac43aSMatt Mackall 
45853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
461da177e4SLinus Torvalds /*
471da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
481da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
491da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
501da177e4SLinus Torvalds  */
511da177e4SLinus Torvalds 
5239f0247dSAndreas Gruenbacher #include <linux/xattr.h>
53a5694255SChristoph Hellwig #include <linux/exportfs.h>
541c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
55feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
561da177e4SLinus Torvalds #include <linux/mman.h>
571da177e4SLinus Torvalds #include <linux/string.h>
581da177e4SLinus Torvalds #include <linux/slab.h>
591da177e4SLinus Torvalds #include <linux/backing-dev.h>
601da177e4SLinus Torvalds #include <linux/shmem_fs.h>
611da177e4SLinus Torvalds #include <linux/writeback.h>
62bda97eabSHugh Dickins #include <linux/pagevec.h>
6341ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6483e4fa9cSHugh Dickins #include <linux/falloc.h>
65708e3508SHugh Dickins #include <linux/splice.h>
661da177e4SLinus Torvalds #include <linux/security.h>
671da177e4SLinus Torvalds #include <linux/swapops.h>
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/namei.h>
70b00dc3adSHugh Dickins #include <linux/ctype.h>
71304dbdb7SLee Schermerhorn #include <linux/migrate.h>
72c1f60a5aSChristoph Lameter #include <linux/highmem.h>
73680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7492562927SMimi Zohar #include <linux/magic.h>
759183df25SDavid Herrmann #include <linux/syscalls.h>
7640e041a2SDavid Herrmann #include <linux/fcntl.h>
779183df25SDavid Herrmann #include <uapi/linux/memfd.h>
78cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
794c27fe4cSMike Rapoport #include <linux/rmap.h>
802b4db796SAmir Goldstein #include <linux/uuid.h>
81304dbdb7SLee Schermerhorn 
827c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
831da177e4SLinus Torvalds 
84dd56b046SMel Gorman #include "internal.h"
85dd56b046SMel Gorman 
8609cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8709cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
901da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
911da177e4SLinus Torvalds 
9269f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9369f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9469f07ec9SHugh Dickins 
951aac1400SHugh Dickins /*
96f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
979608703eSJan Kara  * inode->i_private (with i_rwsem making sure that it has only one user at
98f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
991aac1400SHugh Dickins  */
1001aac1400SHugh Dickins struct shmem_falloc {
1018e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1021aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1031aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1041aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1051aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1061aac1400SHugh Dickins };
1071aac1400SHugh Dickins 
1080b5071ddSAl Viro struct shmem_options {
1090b5071ddSAl Viro 	unsigned long long blocks;
1100b5071ddSAl Viro 	unsigned long long inodes;
1110b5071ddSAl Viro 	struct mempolicy *mpol;
1120b5071ddSAl Viro 	kuid_t uid;
1130b5071ddSAl Viro 	kgid_t gid;
1140b5071ddSAl Viro 	umode_t mode;
115ea3271f7SChris Down 	bool full_inums;
1160b5071ddSAl Viro 	int huge;
1170b5071ddSAl Viro 	int seen;
1180b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1
1190b5071ddSAl Viro #define SHMEM_SEEN_INODES 2
1200b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4
121ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8
1220b5071ddSAl Viro };
1230b5071ddSAl Viro 
124b76db735SAndrew Morton #ifdef CONFIG_TMPFS
125680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
126680d794bSakpm@linux-foundation.org {
127ca79b0c2SArun KS 	return totalram_pages() / 2;
128680d794bSakpm@linux-foundation.org }
129680d794bSakpm@linux-foundation.org 
130680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
131680d794bSakpm@linux-foundation.org {
132ca79b0c2SArun KS 	unsigned long nr_pages = totalram_pages();
133ca79b0c2SArun KS 
134ca79b0c2SArun KS 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
135680d794bSakpm@linux-foundation.org }
136b76db735SAndrew Morton #endif
137680d794bSakpm@linux-foundation.org 
138c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
139c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
140c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
141c5bf121eSVineeth Remanan Pillai 			     vm_fault_t *fault_type);
14268da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1439e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp,
144cfda0526SMike Rapoport 		gfp_t gfp, struct vm_area_struct *vma,
1452b740303SSouptick Joarder 		struct vm_fault *vmf, vm_fault_t *fault_type);
14668da9f05SHugh Dickins 
147f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index,
1489e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp)
14968da9f05SHugh Dickins {
15068da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
151cfda0526SMike Rapoport 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
15268da9f05SHugh Dickins }
1531da177e4SLinus Torvalds 
1541da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1551da177e4SLinus Torvalds {
1561da177e4SLinus Torvalds 	return sb->s_fs_info;
1571da177e4SLinus Torvalds }
1581da177e4SLinus Torvalds 
1591da177e4SLinus Torvalds /*
1601da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1611da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1621da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1631da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1641da177e4SLinus Torvalds  */
1651da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1661da177e4SLinus Torvalds {
1670b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
168191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1691da177e4SLinus Torvalds }
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1721da177e4SLinus Torvalds {
1730b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1741da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1751da177e4SLinus Torvalds }
1761da177e4SLinus Torvalds 
17777142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
17877142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
17977142517SKonstantin Khlebnikov {
18077142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
18177142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
18277142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
18377142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
18477142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
18577142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
18677142517SKonstantin Khlebnikov 	}
18777142517SKonstantin Khlebnikov 	return 0;
18877142517SKonstantin Khlebnikov }
18977142517SKonstantin Khlebnikov 
1901da177e4SLinus Torvalds /*
1911da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
19275edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
1931da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1941da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1951da177e4SLinus Torvalds  */
196800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
1971da177e4SLinus Torvalds {
198800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
199800d8c63SKirill A. Shutemov 		return 0;
200800d8c63SKirill A. Shutemov 
201800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
202800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
2031da177e4SLinus Torvalds }
2041da177e4SLinus Torvalds 
2051da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
2061da177e4SLinus Torvalds {
2070b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
20809cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
2091da177e4SLinus Torvalds }
2101da177e4SLinus Torvalds 
2110f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
2120f079694SMike Rapoport {
2130f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2140f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2150f079694SMike Rapoport 
2160f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
2170f079694SMike Rapoport 		return false;
2180f079694SMike Rapoport 
2190f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2200f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2210f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2220f079694SMike Rapoport 			goto unacct;
2230f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2240f079694SMike Rapoport 	}
2250f079694SMike Rapoport 
2260f079694SMike Rapoport 	return true;
2270f079694SMike Rapoport 
2280f079694SMike Rapoport unacct:
2290f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2300f079694SMike Rapoport 	return false;
2310f079694SMike Rapoport }
2320f079694SMike Rapoport 
2330f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2340f079694SMike Rapoport {
2350f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2360f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2370f079694SMike Rapoport 
2380f079694SMike Rapoport 	if (sbinfo->max_blocks)
2390f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2400f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2410f079694SMike Rapoport }
2420f079694SMike Rapoport 
243759b9775SHugh Dickins static const struct super_operations shmem_ops;
24430e6a51dSHui Su const struct address_space_operations shmem_aops;
24515ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
24692e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
24792e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
24892e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
249f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
250779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2511da177e4SLinus Torvalds 
252b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
253b0506e48SMike Rapoport {
254b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
255b0506e48SMike Rapoport }
256b0506e48SMike Rapoport 
2571da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
258cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2591da177e4SLinus Torvalds 
260e809d5f0SChris Down /*
261e809d5f0SChris Down  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
262e809d5f0SChris Down  * produces a novel ino for the newly allocated inode.
263e809d5f0SChris Down  *
264e809d5f0SChris Down  * It may also be called when making a hard link to permit the space needed by
265e809d5f0SChris Down  * each dentry. However, in that case, no new inode number is needed since that
266e809d5f0SChris Down  * internally draws from another pool of inode numbers (currently global
267e809d5f0SChris Down  * get_next_ino()). This case is indicated by passing NULL as inop.
268e809d5f0SChris Down  */
269e809d5f0SChris Down #define SHMEM_INO_BATCH 1024
270e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
2715b04c689SPavel Emelyanov {
2725b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
273e809d5f0SChris Down 	ino_t ino;
274e809d5f0SChris Down 
275e809d5f0SChris Down 	if (!(sb->s_flags & SB_KERNMOUNT)) {
276bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
277bb3e96d6SByron Stanoszek 		if (sbinfo->max_inodes) {
2785b04c689SPavel Emelyanov 			if (!sbinfo->free_inodes) {
279bf11b9a8SSebastian Andrzej Siewior 				raw_spin_unlock(&sbinfo->stat_lock);
2805b04c689SPavel Emelyanov 				return -ENOSPC;
2815b04c689SPavel Emelyanov 			}
2825b04c689SPavel Emelyanov 			sbinfo->free_inodes--;
283bb3e96d6SByron Stanoszek 		}
284e809d5f0SChris Down 		if (inop) {
285e809d5f0SChris Down 			ino = sbinfo->next_ino++;
286e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
287e809d5f0SChris Down 				ino = sbinfo->next_ino++;
288ea3271f7SChris Down 			if (unlikely(!sbinfo->full_inums &&
289ea3271f7SChris Down 				     ino > UINT_MAX)) {
290e809d5f0SChris Down 				/*
291e809d5f0SChris Down 				 * Emulate get_next_ino uint wraparound for
292e809d5f0SChris Down 				 * compatibility
293e809d5f0SChris Down 				 */
294ea3271f7SChris Down 				if (IS_ENABLED(CONFIG_64BIT))
295ea3271f7SChris Down 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
296ea3271f7SChris Down 						__func__, MINOR(sb->s_dev));
297ea3271f7SChris Down 				sbinfo->next_ino = 1;
298ea3271f7SChris Down 				ino = sbinfo->next_ino++;
2995b04c689SPavel Emelyanov 			}
300e809d5f0SChris Down 			*inop = ino;
301e809d5f0SChris Down 		}
302bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
303e809d5f0SChris Down 	} else if (inop) {
304e809d5f0SChris Down 		/*
305e809d5f0SChris Down 		 * __shmem_file_setup, one of our callers, is lock-free: it
306e809d5f0SChris Down 		 * doesn't hold stat_lock in shmem_reserve_inode since
307e809d5f0SChris Down 		 * max_inodes is always 0, and is called from potentially
308e809d5f0SChris Down 		 * unknown contexts. As such, use a per-cpu batched allocator
309e809d5f0SChris Down 		 * which doesn't require the per-sb stat_lock unless we are at
310e809d5f0SChris Down 		 * the batch boundary.
311ea3271f7SChris Down 		 *
312ea3271f7SChris Down 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
313ea3271f7SChris Down 		 * shmem mounts are not exposed to userspace, so we don't need
314ea3271f7SChris Down 		 * to worry about things like glibc compatibility.
315e809d5f0SChris Down 		 */
316e809d5f0SChris Down 		ino_t *next_ino;
317bf11b9a8SSebastian Andrzej Siewior 
318e809d5f0SChris Down 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
319e809d5f0SChris Down 		ino = *next_ino;
320e809d5f0SChris Down 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
321bf11b9a8SSebastian Andrzej Siewior 			raw_spin_lock(&sbinfo->stat_lock);
322e809d5f0SChris Down 			ino = sbinfo->next_ino;
323e809d5f0SChris Down 			sbinfo->next_ino += SHMEM_INO_BATCH;
324bf11b9a8SSebastian Andrzej Siewior 			raw_spin_unlock(&sbinfo->stat_lock);
325e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
326e809d5f0SChris Down 				ino++;
327e809d5f0SChris Down 		}
328e809d5f0SChris Down 		*inop = ino;
329e809d5f0SChris Down 		*next_ino = ++ino;
330e809d5f0SChris Down 		put_cpu();
331e809d5f0SChris Down 	}
332e809d5f0SChris Down 
3335b04c689SPavel Emelyanov 	return 0;
3345b04c689SPavel Emelyanov }
3355b04c689SPavel Emelyanov 
3365b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
3375b04c689SPavel Emelyanov {
3385b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3395b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
340bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
3415b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
342bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
3435b04c689SPavel Emelyanov 	}
3445b04c689SPavel Emelyanov }
3455b04c689SPavel Emelyanov 
34646711810SRandy Dunlap /**
34741ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
3481da177e4SLinus Torvalds  * @inode: inode to recalc
3491da177e4SLinus Torvalds  *
3501da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
3511da177e4SLinus Torvalds  * undirtied hole pages behind our back.
3521da177e4SLinus Torvalds  *
3531da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
3541da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
3551da177e4SLinus Torvalds  *
3561da177e4SLinus Torvalds  * It has to be called with the spinlock held.
3571da177e4SLinus Torvalds  */
3581da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
3591da177e4SLinus Torvalds {
3601da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
3611da177e4SLinus Torvalds 	long freed;
3621da177e4SLinus Torvalds 
3631da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
3641da177e4SLinus Torvalds 	if (freed > 0) {
3651da177e4SLinus Torvalds 		info->alloced -= freed;
36654af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
3670f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
3681da177e4SLinus Torvalds 	}
3691da177e4SLinus Torvalds }
3701da177e4SLinus Torvalds 
371800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
372800d8c63SKirill A. Shutemov {
373800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3744595ef88SKirill A. Shutemov 	unsigned long flags;
375800d8c63SKirill A. Shutemov 
3760f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
377800d8c63SKirill A. Shutemov 		return false;
378b1cc94abSMike Rapoport 
379aaa52e34SHugh Dickins 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
380aaa52e34SHugh Dickins 	inode->i_mapping->nrpages += pages;
381aaa52e34SHugh Dickins 
3824595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
383800d8c63SKirill A. Shutemov 	info->alloced += pages;
384800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
385800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3864595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
387800d8c63SKirill A. Shutemov 
388800d8c63SKirill A. Shutemov 	return true;
389800d8c63SKirill A. Shutemov }
390800d8c63SKirill A. Shutemov 
391800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
392800d8c63SKirill A. Shutemov {
393800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3944595ef88SKirill A. Shutemov 	unsigned long flags;
395800d8c63SKirill A. Shutemov 
396aaa52e34SHugh Dickins 	/* nrpages adjustment done by __delete_from_page_cache() or caller */
397aaa52e34SHugh Dickins 
3984595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
399800d8c63SKirill A. Shutemov 	info->alloced -= pages;
400800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
401800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
4024595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
403800d8c63SKirill A. Shutemov 
4040f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
405800d8c63SKirill A. Shutemov }
406800d8c63SKirill A. Shutemov 
4077a5d0fbbSHugh Dickins /*
40862f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
4097a5d0fbbSHugh Dickins  */
41062f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
4117a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
4127a5d0fbbSHugh Dickins {
41362f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
4146dbaf22cSJohannes Weiner 	void *item;
4157a5d0fbbSHugh Dickins 
4167a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
4176dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
41862f945b6SMatthew Wilcox 	item = xas_load(&xas);
4197a5d0fbbSHugh Dickins 	if (item != expected)
4207a5d0fbbSHugh Dickins 		return -ENOENT;
42162f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
4227a5d0fbbSHugh Dickins 	return 0;
4237a5d0fbbSHugh Dickins }
4247a5d0fbbSHugh Dickins 
4257a5d0fbbSHugh Dickins /*
426d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
427d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
428d1899228SHugh Dickins  *
429d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
430d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
431d1899228SHugh Dickins  */
432d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
433d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
434d1899228SHugh Dickins {
435a12831bfSMatthew Wilcox 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
436d1899228SHugh Dickins }
437d1899228SHugh Dickins 
438d1899228SHugh Dickins /*
4395a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
4405a6e75f8SKirill A. Shutemov  *
4415a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
4425a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
4435a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
4445a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
4455a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
4465a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
4475a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
4485a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
4495a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
4505a6e75f8SKirill A. Shutemov  */
4515a6e75f8SKirill A. Shutemov 
4525a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
4535a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
4545a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
4555a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
4565a6e75f8SKirill A. Shutemov 
4575a6e75f8SKirill A. Shutemov /*
4585a6e75f8SKirill A. Shutemov  * Special values.
4595a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
4605a6e75f8SKirill A. Shutemov  *
4615a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
4625a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
4635a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
4645a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
4655a6e75f8SKirill A. Shutemov  *
4665a6e75f8SKirill A. Shutemov  */
4675a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
4685a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
4695a6e75f8SKirill A. Shutemov 
470396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4715a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
4725a6e75f8SKirill A. Shutemov 
4735e6e5a12SHugh Dickins static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
4745a6e75f8SKirill A. Shutemov 
4755e6e5a12SHugh Dickins bool shmem_is_huge(struct vm_area_struct *vma,
4765e6e5a12SHugh Dickins 		   struct inode *inode, pgoff_t index)
477c852023eSHugh Dickins {
478c852023eSHugh Dickins 	loff_t i_size;
479c852023eSHugh Dickins 
4805e6e5a12SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
4815e6e5a12SHugh Dickins 		return false;
4825e6e5a12SHugh Dickins 	if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
4835e6e5a12SHugh Dickins 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
484c852023eSHugh Dickins 		return false;
485c852023eSHugh Dickins 	if (shmem_huge == SHMEM_HUGE_FORCE)
486c852023eSHugh Dickins 		return true;
4875e6e5a12SHugh Dickins 
4885e6e5a12SHugh Dickins 	switch (SHMEM_SB(inode->i_sb)->huge) {
489c852023eSHugh Dickins 	case SHMEM_HUGE_ALWAYS:
490c852023eSHugh Dickins 		return true;
491c852023eSHugh Dickins 	case SHMEM_HUGE_WITHIN_SIZE:
492de6ee659SLiu Yuntao 		index = round_up(index + 1, HPAGE_PMD_NR);
493c852023eSHugh Dickins 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
494de6ee659SLiu Yuntao 		if (i_size >> PAGE_SHIFT >= index)
495c852023eSHugh Dickins 			return true;
496c852023eSHugh Dickins 		fallthrough;
497c852023eSHugh Dickins 	case SHMEM_HUGE_ADVISE:
4985e6e5a12SHugh Dickins 		if (vma && (vma->vm_flags & VM_HUGEPAGE))
4995e6e5a12SHugh Dickins 			return true;
5005e6e5a12SHugh Dickins 		fallthrough;
501c852023eSHugh Dickins 	default:
502c852023eSHugh Dickins 		return false;
503c852023eSHugh Dickins 	}
504c852023eSHugh Dickins }
5055a6e75f8SKirill A. Shutemov 
506e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS)
5075a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
5085a6e75f8SKirill A. Shutemov {
5095a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
5105a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
5115a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
5125a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
5135a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
5145a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
5155a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
5165a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
5175a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
5185a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
5195a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
5205a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
5215a6e75f8SKirill A. Shutemov 	return -EINVAL;
5225a6e75f8SKirill A. Shutemov }
523e5f2249aSArnd Bergmann #endif
5245a6e75f8SKirill A. Shutemov 
525e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
5265a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
5275a6e75f8SKirill A. Shutemov {
5285a6e75f8SKirill A. Shutemov 	switch (huge) {
5295a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
5305a6e75f8SKirill A. Shutemov 		return "never";
5315a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
5325a6e75f8SKirill A. Shutemov 		return "always";
5335a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
5345a6e75f8SKirill A. Shutemov 		return "within_size";
5355a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
5365a6e75f8SKirill A. Shutemov 		return "advise";
5375a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
5385a6e75f8SKirill A. Shutemov 		return "deny";
5395a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
5405a6e75f8SKirill A. Shutemov 		return "force";
5415a6e75f8SKirill A. Shutemov 	default:
5425a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
5435a6e75f8SKirill A. Shutemov 		return "bad_val";
5445a6e75f8SKirill A. Shutemov 	}
5455a6e75f8SKirill A. Shutemov }
546f1f5929cSJérémy Lefaure #endif
5475a6e75f8SKirill A. Shutemov 
548779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
549779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
550779750d2SKirill A. Shutemov {
551779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
552253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
553779750d2SKirill A. Shutemov 	struct inode *inode;
554779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
555779750d2SKirill A. Shutemov 	struct page *page;
556779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
557779750d2SKirill A. Shutemov 	int removed = 0, split = 0;
558779750d2SKirill A. Shutemov 
559779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
560779750d2SKirill A. Shutemov 		return SHRINK_STOP;
561779750d2SKirill A. Shutemov 
562779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
563779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
564779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
565779750d2SKirill A. Shutemov 
566779750d2SKirill A. Shutemov 		/* pin the inode */
567779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
568779750d2SKirill A. Shutemov 
569779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
570779750d2SKirill A. Shutemov 		if (!inode) {
571779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
572779750d2SKirill A. Shutemov 			removed++;
573779750d2SKirill A. Shutemov 			goto next;
574779750d2SKirill A. Shutemov 		}
575779750d2SKirill A. Shutemov 
576779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
577779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
578779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
579253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
580779750d2SKirill A. Shutemov 			removed++;
581779750d2SKirill A. Shutemov 			goto next;
582779750d2SKirill A. Shutemov 		}
583779750d2SKirill A. Shutemov 
584779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
585779750d2SKirill A. Shutemov next:
586779750d2SKirill A. Shutemov 		if (!--batch)
587779750d2SKirill A. Shutemov 			break;
588779750d2SKirill A. Shutemov 	}
589779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
590779750d2SKirill A. Shutemov 
591253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
592253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
593253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
594253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
595253fd0f0SKirill A. Shutemov 		iput(inode);
596253fd0f0SKirill A. Shutemov 	}
597253fd0f0SKirill A. Shutemov 
598779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
599779750d2SKirill A. Shutemov 		int ret;
600779750d2SKirill A. Shutemov 
601779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
602779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
603779750d2SKirill A. Shutemov 
604b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
605b3cd54b2SKirill A. Shutemov 			goto leave;
606779750d2SKirill A. Shutemov 
607b3cd54b2SKirill A. Shutemov 		page = find_get_page(inode->i_mapping,
608779750d2SKirill A. Shutemov 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
609779750d2SKirill A. Shutemov 		if (!page)
610779750d2SKirill A. Shutemov 			goto drop;
611779750d2SKirill A. Shutemov 
612b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
613779750d2SKirill A. Shutemov 		if (!PageTransHuge(page)) {
614779750d2SKirill A. Shutemov 			put_page(page);
615779750d2SKirill A. Shutemov 			goto drop;
616779750d2SKirill A. Shutemov 		}
617779750d2SKirill A. Shutemov 
618b3cd54b2SKirill A. Shutemov 		/*
619b3cd54b2SKirill A. Shutemov 		 * Leave the inode on the list if we failed to lock
620b3cd54b2SKirill A. Shutemov 		 * the page at this time.
621b3cd54b2SKirill A. Shutemov 		 *
622b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
623b3cd54b2SKirill A. Shutemov 		 * reclaim path.
624b3cd54b2SKirill A. Shutemov 		 */
625b3cd54b2SKirill A. Shutemov 		if (!trylock_page(page)) {
626b3cd54b2SKirill A. Shutemov 			put_page(page);
627b3cd54b2SKirill A. Shutemov 			goto leave;
628b3cd54b2SKirill A. Shutemov 		}
629b3cd54b2SKirill A. Shutemov 
630779750d2SKirill A. Shutemov 		ret = split_huge_page(page);
631779750d2SKirill A. Shutemov 		unlock_page(page);
632779750d2SKirill A. Shutemov 		put_page(page);
633779750d2SKirill A. Shutemov 
634b3cd54b2SKirill A. Shutemov 		/* If split failed leave the inode on the list */
635b3cd54b2SKirill A. Shutemov 		if (ret)
636b3cd54b2SKirill A. Shutemov 			goto leave;
637779750d2SKirill A. Shutemov 
638779750d2SKirill A. Shutemov 		split++;
639779750d2SKirill A. Shutemov drop:
640779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
641779750d2SKirill A. Shutemov 		removed++;
642b3cd54b2SKirill A. Shutemov leave:
643779750d2SKirill A. Shutemov 		iput(inode);
644779750d2SKirill A. Shutemov 	}
645779750d2SKirill A. Shutemov 
646779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
647779750d2SKirill A. Shutemov 	list_splice_tail(&list, &sbinfo->shrinklist);
648779750d2SKirill A. Shutemov 	sbinfo->shrinklist_len -= removed;
649779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
650779750d2SKirill A. Shutemov 
651779750d2SKirill A. Shutemov 	return split;
652779750d2SKirill A. Shutemov }
653779750d2SKirill A. Shutemov 
654779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
655779750d2SKirill A. Shutemov 		struct shrink_control *sc)
656779750d2SKirill A. Shutemov {
657779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
658779750d2SKirill A. Shutemov 
659779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
660779750d2SKirill A. Shutemov 		return SHRINK_STOP;
661779750d2SKirill A. Shutemov 
662779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
663779750d2SKirill A. Shutemov }
664779750d2SKirill A. Shutemov 
665779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
666779750d2SKirill A. Shutemov 		struct shrink_control *sc)
667779750d2SKirill A. Shutemov {
668779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
669779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
670779750d2SKirill A. Shutemov }
671396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
6725a6e75f8SKirill A. Shutemov 
6735a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
6745a6e75f8SKirill A. Shutemov 
6755e6e5a12SHugh Dickins bool shmem_is_huge(struct vm_area_struct *vma,
6765e6e5a12SHugh Dickins 		   struct inode *inode, pgoff_t index)
6775e6e5a12SHugh Dickins {
6785e6e5a12SHugh Dickins 	return false;
6795e6e5a12SHugh Dickins }
6805e6e5a12SHugh Dickins 
681779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
682779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
683779750d2SKirill A. Shutemov {
684779750d2SKirill A. Shutemov 	return 0;
685779750d2SKirill A. Shutemov }
686396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6875a6e75f8SKirill A. Shutemov 
6885a6e75f8SKirill A. Shutemov /*
68946f65ec1SHugh Dickins  * Like add_to_page_cache_locked, but error if expected item has gone.
69046f65ec1SHugh Dickins  */
69146f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page,
69246f65ec1SHugh Dickins 				   struct address_space *mapping,
6933fea5a49SJohannes Weiner 				   pgoff_t index, void *expected, gfp_t gfp,
6943fea5a49SJohannes Weiner 				   struct mm_struct *charge_mm)
69546f65ec1SHugh Dickins {
696552446a4SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
697552446a4SMatthew Wilcox 	unsigned long i = 0;
698d8c6546bSMatthew Wilcox (Oracle) 	unsigned long nr = compound_nr(page);
6993fea5a49SJohannes Weiner 	int error;
70046f65ec1SHugh Dickins 
701800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
702800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
703309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
704309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
705800d8c63SKirill A. Shutemov 	VM_BUG_ON(expected && PageTransHuge(page));
70646f65ec1SHugh Dickins 
707800d8c63SKirill A. Shutemov 	page_ref_add(page, nr);
70846f65ec1SHugh Dickins 	page->mapping = mapping;
70946f65ec1SHugh Dickins 	page->index = index;
71046f65ec1SHugh Dickins 
7114c6355b2SJohannes Weiner 	if (!PageSwapCache(page)) {
7128f425e4eSMatthew Wilcox (Oracle) 		error = mem_cgroup_charge(page_folio(page), charge_mm, gfp);
7133fea5a49SJohannes Weiner 		if (error) {
7144c6355b2SJohannes Weiner 			if (PageTransHuge(page)) {
7153fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK);
7163fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
7173fea5a49SJohannes Weiner 			}
7183fea5a49SJohannes Weiner 			goto error;
7193fea5a49SJohannes Weiner 		}
7204c6355b2SJohannes Weiner 	}
7213fea5a49SJohannes Weiner 	cgroup_throttle_swaprate(page, gfp);
7223fea5a49SJohannes Weiner 
723552446a4SMatthew Wilcox 	do {
724552446a4SMatthew Wilcox 		void *entry;
725552446a4SMatthew Wilcox 		xas_lock_irq(&xas);
726552446a4SMatthew Wilcox 		entry = xas_find_conflict(&xas);
727552446a4SMatthew Wilcox 		if (entry != expected)
728552446a4SMatthew Wilcox 			xas_set_err(&xas, -EEXIST);
729552446a4SMatthew Wilcox 		xas_create_range(&xas);
730552446a4SMatthew Wilcox 		if (xas_error(&xas))
731552446a4SMatthew Wilcox 			goto unlock;
732552446a4SMatthew Wilcox next:
7334101196bSMatthew Wilcox (Oracle) 		xas_store(&xas, page);
734552446a4SMatthew Wilcox 		if (++i < nr) {
735552446a4SMatthew Wilcox 			xas_next(&xas);
736552446a4SMatthew Wilcox 			goto next;
737552446a4SMatthew Wilcox 		}
738800d8c63SKirill A. Shutemov 		if (PageTransHuge(page)) {
739800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
74057b2847dSMuchun Song 			__mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
741552446a4SMatthew Wilcox 		}
742552446a4SMatthew Wilcox 		mapping->nrpages += nr;
7430d1c2072SJohannes Weiner 		__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
7440d1c2072SJohannes Weiner 		__mod_lruvec_page_state(page, NR_SHMEM, nr);
745552446a4SMatthew Wilcox unlock:
746552446a4SMatthew Wilcox 		xas_unlock_irq(&xas);
747552446a4SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
748552446a4SMatthew Wilcox 
749552446a4SMatthew Wilcox 	if (xas_error(&xas)) {
7503fea5a49SJohannes Weiner 		error = xas_error(&xas);
7513fea5a49SJohannes Weiner 		goto error;
75246f65ec1SHugh Dickins 	}
753552446a4SMatthew Wilcox 
754552446a4SMatthew Wilcox 	return 0;
7553fea5a49SJohannes Weiner error:
7563fea5a49SJohannes Weiner 	page->mapping = NULL;
7573fea5a49SJohannes Weiner 	page_ref_sub(page, nr);
7583fea5a49SJohannes Weiner 	return error;
75946f65ec1SHugh Dickins }
76046f65ec1SHugh Dickins 
76146f65ec1SHugh Dickins /*
7626922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
7636922c0c7SHugh Dickins  */
7646922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
7656922c0c7SHugh Dickins {
7666922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
7676922c0c7SHugh Dickins 	int error;
7686922c0c7SHugh Dickins 
769800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
770800d8c63SKirill A. Shutemov 
771b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
77262f945b6SMatthew Wilcox 	error = shmem_replace_entry(mapping, page->index, page, radswap);
7736922c0c7SHugh Dickins 	page->mapping = NULL;
7746922c0c7SHugh Dickins 	mapping->nrpages--;
7750d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_FILE_PAGES);
7760d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_SHMEM);
777b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
77809cbfeafSKirill A. Shutemov 	put_page(page);
7796922c0c7SHugh Dickins 	BUG_ON(error);
7806922c0c7SHugh Dickins }
7816922c0c7SHugh Dickins 
7826922c0c7SHugh Dickins /*
783c121d3bbSMatthew Wilcox  * Remove swap entry from page cache, free the swap and its page cache.
7847a5d0fbbSHugh Dickins  */
7857a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
7867a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
7877a5d0fbbSHugh Dickins {
7886dbaf22cSJohannes Weiner 	void *old;
7897a5d0fbbSHugh Dickins 
79055f3f7eaSMatthew Wilcox 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
7916dbaf22cSJohannes Weiner 	if (old != radswap)
7926dbaf22cSJohannes Weiner 		return -ENOENT;
7937a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
7946dbaf22cSJohannes Weiner 	return 0;
7957a5d0fbbSHugh Dickins }
7967a5d0fbbSHugh Dickins 
7977a5d0fbbSHugh Dickins /*
7986a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
79948131e03SVlastimil Babka  * given offsets are swapped out.
8006a15a370SVlastimil Babka  *
8019608703eSJan Kara  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
8026a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
8036a15a370SVlastimil Babka  */
80448131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
80548131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
8066a15a370SVlastimil Babka {
8077ae3424fSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
8086a15a370SVlastimil Babka 	struct page *page;
80948131e03SVlastimil Babka 	unsigned long swapped = 0;
8106a15a370SVlastimil Babka 
8116a15a370SVlastimil Babka 	rcu_read_lock();
8127ae3424fSMatthew Wilcox 	xas_for_each(&xas, page, end - 1) {
8137ae3424fSMatthew Wilcox 		if (xas_retry(&xas, page))
8142cf938aaSMatthew Wilcox 			continue;
8153159f943SMatthew Wilcox 		if (xa_is_value(page))
8166a15a370SVlastimil Babka 			swapped++;
8176a15a370SVlastimil Babka 
8186a15a370SVlastimil Babka 		if (need_resched()) {
8197ae3424fSMatthew Wilcox 			xas_pause(&xas);
8206a15a370SVlastimil Babka 			cond_resched_rcu();
8216a15a370SVlastimil Babka 		}
8226a15a370SVlastimil Babka 	}
8236a15a370SVlastimil Babka 
8246a15a370SVlastimil Babka 	rcu_read_unlock();
8256a15a370SVlastimil Babka 
8266a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
8276a15a370SVlastimil Babka }
8286a15a370SVlastimil Babka 
8296a15a370SVlastimil Babka /*
83048131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
83148131e03SVlastimil Babka  * given vma is swapped out.
83248131e03SVlastimil Babka  *
8339608703eSJan Kara  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
83448131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
83548131e03SVlastimil Babka  */
83648131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
83748131e03SVlastimil Babka {
83848131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
83948131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
84048131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
84148131e03SVlastimil Babka 	unsigned long swapped;
84248131e03SVlastimil Babka 
84348131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
84448131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
84548131e03SVlastimil Babka 
84648131e03SVlastimil Babka 	/*
84748131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
84848131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
84948131e03SVlastimil Babka 	 * already track.
85048131e03SVlastimil Babka 	 */
85148131e03SVlastimil Babka 	if (!swapped)
85248131e03SVlastimil Babka 		return 0;
85348131e03SVlastimil Babka 
85448131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
85548131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
85648131e03SVlastimil Babka 
85748131e03SVlastimil Babka 	/* Here comes the more involved part */
85802399c88SPeter Xu 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
85902399c88SPeter Xu 					vma->vm_pgoff + vma_pages(vma));
86048131e03SVlastimil Babka }
86148131e03SVlastimil Babka 
86248131e03SVlastimil Babka /*
86324513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
86424513264SHugh Dickins  */
86524513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
86624513264SHugh Dickins {
86724513264SHugh Dickins 	struct pagevec pvec;
86824513264SHugh Dickins 	pgoff_t index = 0;
86924513264SHugh Dickins 
87086679820SMel Gorman 	pagevec_init(&pvec);
87124513264SHugh Dickins 	/*
87224513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
87324513264SHugh Dickins 	 */
87424513264SHugh Dickins 	while (!mapping_unevictable(mapping)) {
87596888e0aSMatthew Wilcox (Oracle) 		if (!pagevec_lookup(&pvec, mapping, &index))
87624513264SHugh Dickins 			break;
87764e3d12fSKuo-Hsin Yang 		check_move_unevictable_pages(&pvec);
87824513264SHugh Dickins 		pagevec_release(&pvec);
87924513264SHugh Dickins 		cond_resched();
88024513264SHugh Dickins 	}
8817a5d0fbbSHugh Dickins }
8827a5d0fbbSHugh Dickins 
8837a5d0fbbSHugh Dickins /*
88471725ed1SHugh Dickins  * Check whether a hole-punch or truncation needs to split a huge page,
88571725ed1SHugh Dickins  * returning true if no split was required, or the split has been successful.
88671725ed1SHugh Dickins  *
88771725ed1SHugh Dickins  * Eviction (or truncation to 0 size) should never need to split a huge page;
88871725ed1SHugh Dickins  * but in rare cases might do so, if shmem_undo_range() failed to trylock on
88971725ed1SHugh Dickins  * head, and then succeeded to trylock on tail.
89071725ed1SHugh Dickins  *
89171725ed1SHugh Dickins  * A split can only succeed when there are no additional references on the
89271725ed1SHugh Dickins  * huge page: so the split below relies upon find_get_entries() having stopped
89371725ed1SHugh Dickins  * when it found a subpage of the huge page, without getting further references.
89471725ed1SHugh Dickins  */
89571725ed1SHugh Dickins static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
89671725ed1SHugh Dickins {
89771725ed1SHugh Dickins 	if (!PageTransCompound(page))
89871725ed1SHugh Dickins 		return true;
89971725ed1SHugh Dickins 
90071725ed1SHugh Dickins 	/* Just proceed to delete a huge page wholly within the range punched */
90171725ed1SHugh Dickins 	if (PageHead(page) &&
90271725ed1SHugh Dickins 	    page->index >= start && page->index + HPAGE_PMD_NR <= end)
90371725ed1SHugh Dickins 		return true;
90471725ed1SHugh Dickins 
90571725ed1SHugh Dickins 	/* Try to split huge page, so we can truly punch the hole or truncate */
90671725ed1SHugh Dickins 	return split_huge_page(page) >= 0;
90771725ed1SHugh Dickins }
90871725ed1SHugh Dickins 
90971725ed1SHugh Dickins /*
9107f4446eeSMatthew Wilcox  * Remove range of pages and swap entries from page cache, and free them.
9111635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
9127a5d0fbbSHugh Dickins  */
9131635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
9141635f6a7SHugh Dickins 								 bool unfalloc)
9151da177e4SLinus Torvalds {
916285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
9171da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
91809cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
91909cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
92009cbfeafSKirill A. Shutemov 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
92109cbfeafSKirill A. Shutemov 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
922bda97eabSHugh Dickins 	struct pagevec pvec;
9237a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
9247a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
925285b2c4fSHugh Dickins 	pgoff_t index;
926bda97eabSHugh Dickins 	int i;
9271da177e4SLinus Torvalds 
92883e4fa9cSHugh Dickins 	if (lend == -1)
92983e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
930bda97eabSHugh Dickins 
931d144bf62SHugh Dickins 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
932d144bf62SHugh Dickins 		info->fallocend = start;
933d144bf62SHugh Dickins 
93486679820SMel Gorman 	pagevec_init(&pvec);
935bda97eabSHugh Dickins 	index = start;
9365c211ba2SMatthew Wilcox (Oracle) 	while (index < end && find_lock_entries(mapping, index, end - 1,
9375c211ba2SMatthew Wilcox (Oracle) 			&pvec, indices)) {
938bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
9397b774aabSMatthew Wilcox (Oracle) 			struct folio *folio = (struct folio *)pvec.pages[i];
940bda97eabSHugh Dickins 
9417a5d0fbbSHugh Dickins 			index = indices[i];
942bda97eabSHugh Dickins 
9437b774aabSMatthew Wilcox (Oracle) 			if (xa_is_value(folio)) {
9441635f6a7SHugh Dickins 				if (unfalloc)
9451635f6a7SHugh Dickins 					continue;
9467a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
9477b774aabSMatthew Wilcox (Oracle) 								index, folio);
9487a5d0fbbSHugh Dickins 				continue;
9497a5d0fbbSHugh Dickins 			}
9507b774aabSMatthew Wilcox (Oracle) 			index += folio_nr_pages(folio) - 1;
9517a5d0fbbSHugh Dickins 
9527b774aabSMatthew Wilcox (Oracle) 			if (!unfalloc || !folio_test_uptodate(folio))
953*1e84a3d9SMatthew Wilcox (Oracle) 				truncate_inode_folio(mapping, folio);
9547b774aabSMatthew Wilcox (Oracle) 			folio_unlock(folio);
955bda97eabSHugh Dickins 		}
9560cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
95724513264SHugh Dickins 		pagevec_release(&pvec);
958bda97eabSHugh Dickins 		cond_resched();
959bda97eabSHugh Dickins 		index++;
960bda97eabSHugh Dickins 	}
961bda97eabSHugh Dickins 
96283e4fa9cSHugh Dickins 	if (partial_start) {
963bda97eabSHugh Dickins 		struct page *page = NULL;
9649e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, start - 1, &page, SGP_READ);
965bda97eabSHugh Dickins 		if (page) {
96609cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
96783e4fa9cSHugh Dickins 			if (start > end) {
96883e4fa9cSHugh Dickins 				top = partial_end;
96983e4fa9cSHugh Dickins 				partial_end = 0;
97083e4fa9cSHugh Dickins 			}
97183e4fa9cSHugh Dickins 			zero_user_segment(page, partial_start, top);
972bda97eabSHugh Dickins 			set_page_dirty(page);
973bda97eabSHugh Dickins 			unlock_page(page);
97409cbfeafSKirill A. Shutemov 			put_page(page);
975bda97eabSHugh Dickins 		}
976bda97eabSHugh Dickins 	}
97783e4fa9cSHugh Dickins 	if (partial_end) {
97883e4fa9cSHugh Dickins 		struct page *page = NULL;
9799e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, end, &page, SGP_READ);
98083e4fa9cSHugh Dickins 		if (page) {
98183e4fa9cSHugh Dickins 			zero_user_segment(page, 0, partial_end);
98283e4fa9cSHugh Dickins 			set_page_dirty(page);
98383e4fa9cSHugh Dickins 			unlock_page(page);
98409cbfeafSKirill A. Shutemov 			put_page(page);
98583e4fa9cSHugh Dickins 		}
98683e4fa9cSHugh Dickins 	}
98783e4fa9cSHugh Dickins 	if (start >= end)
98883e4fa9cSHugh Dickins 		return;
989bda97eabSHugh Dickins 
990bda97eabSHugh Dickins 	index = start;
991b1a36650SHugh Dickins 	while (index < end) {
992bda97eabSHugh Dickins 		cond_resched();
9930cd6144aSJohannes Weiner 
994cf2039afSMatthew Wilcox (Oracle) 		if (!find_get_entries(mapping, index, end - 1, &pvec,
995cf2039afSMatthew Wilcox (Oracle) 				indices)) {
996b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
997b1a36650SHugh Dickins 			if (index == start || end != -1)
998bda97eabSHugh Dickins 				break;
999b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
1000bda97eabSHugh Dickins 			index = start;
1001bda97eabSHugh Dickins 			continue;
1002bda97eabSHugh Dickins 		}
1003bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
1004bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
1005bda97eabSHugh Dickins 
10067a5d0fbbSHugh Dickins 			index = indices[i];
10073159f943SMatthew Wilcox 			if (xa_is_value(page)) {
10081635f6a7SHugh Dickins 				if (unfalloc)
10091635f6a7SHugh Dickins 					continue;
1010b1a36650SHugh Dickins 				if (shmem_free_swap(mapping, index, page)) {
1011b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
1012b1a36650SHugh Dickins 					index--;
1013b1a36650SHugh Dickins 					break;
1014b1a36650SHugh Dickins 				}
1015b1a36650SHugh Dickins 				nr_swaps_freed++;
10167a5d0fbbSHugh Dickins 				continue;
10177a5d0fbbSHugh Dickins 			}
10187a5d0fbbSHugh Dickins 
1019bda97eabSHugh Dickins 			lock_page(page);
1020800d8c63SKirill A. Shutemov 
10211635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
102271725ed1SHugh Dickins 				if (page_mapping(page) != mapping) {
1023b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
1024b1a36650SHugh Dickins 					unlock_page(page);
1025b1a36650SHugh Dickins 					index--;
1026b1a36650SHugh Dickins 					break;
10277a5d0fbbSHugh Dickins 				}
102871725ed1SHugh Dickins 				VM_BUG_ON_PAGE(PageWriteback(page), page);
102971725ed1SHugh Dickins 				if (shmem_punch_compound(page, start, end))
1030*1e84a3d9SMatthew Wilcox (Oracle) 					truncate_inode_folio(mapping,
1031*1e84a3d9SMatthew Wilcox (Oracle) 							     page_folio(page));
10320783ac95SHugh Dickins 				else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
103371725ed1SHugh Dickins 					/* Wipe the page and don't get stuck */
103471725ed1SHugh Dickins 					clear_highpage(page);
103571725ed1SHugh Dickins 					flush_dcache_page(page);
103671725ed1SHugh Dickins 					set_page_dirty(page);
103771725ed1SHugh Dickins 					if (index <
103871725ed1SHugh Dickins 					    round_up(start, HPAGE_PMD_NR))
103971725ed1SHugh Dickins 						start = index + 1;
104071725ed1SHugh Dickins 				}
10411635f6a7SHugh Dickins 			}
1042bda97eabSHugh Dickins 			unlock_page(page);
1043bda97eabSHugh Dickins 		}
10440cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
104524513264SHugh Dickins 		pagevec_release(&pvec);
1046bda97eabSHugh Dickins 		index++;
1047bda97eabSHugh Dickins 	}
104894c1e62dSHugh Dickins 
10494595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
10507a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
10511da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
10524595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
10531635f6a7SHugh Dickins }
10541da177e4SLinus Torvalds 
10551635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
10561635f6a7SHugh Dickins {
10571635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
1058078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
10591da177e4SLinus Torvalds }
106094c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
10611da177e4SLinus Torvalds 
1062549c7297SChristian Brauner static int shmem_getattr(struct user_namespace *mnt_userns,
1063549c7297SChristian Brauner 			 const struct path *path, struct kstat *stat,
1064a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
106544a30220SYu Zhao {
1066a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
106744a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
106844a30220SYu Zhao 
1069d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
10704595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
107144a30220SYu Zhao 		shmem_recalc_inode(inode);
10724595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1073d0424c42SHugh Dickins 	}
10740d56a451SChristian Brauner 	generic_fillattr(&init_user_ns, inode, stat);
107589fdcd26SYang Shi 
1076a7fddc36SHugh Dickins 	if (shmem_is_huge(NULL, inode, 0))
107789fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
107889fdcd26SYang Shi 
107944a30220SYu Zhao 	return 0;
108044a30220SYu Zhao }
108144a30220SYu Zhao 
1082549c7297SChristian Brauner static int shmem_setattr(struct user_namespace *mnt_userns,
1083549c7297SChristian Brauner 			 struct dentry *dentry, struct iattr *attr)
10841da177e4SLinus Torvalds {
108575c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
108640e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
10871da177e4SLinus Torvalds 	int error;
10881da177e4SLinus Torvalds 
10892f221d6fSChristian Brauner 	error = setattr_prepare(&init_user_ns, dentry, attr);
1090db78b877SChristoph Hellwig 	if (error)
1091db78b877SChristoph Hellwig 		return error;
1092db78b877SChristoph Hellwig 
109394c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
109494c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
109594c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
10963889e6e7Snpiggin@suse.de 
10979608703eSJan Kara 		/* protected by i_rwsem */
109840e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
109940e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
110040e041a2SDavid Herrmann 			return -EPERM;
110140e041a2SDavid Herrmann 
110294c1e62dSHugh Dickins 		if (newsize != oldsize) {
110377142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
110477142517SKonstantin Khlebnikov 					oldsize, newsize);
110577142517SKonstantin Khlebnikov 			if (error)
110677142517SKonstantin Khlebnikov 				return error;
110794c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1108078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
110994c1e62dSHugh Dickins 		}
1110afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
111194c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1112d0424c42SHugh Dickins 			if (oldsize > holebegin)
1113d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1114d0424c42SHugh Dickins 							holebegin, 0, 1);
1115d0424c42SHugh Dickins 			if (info->alloced)
1116d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1117d0424c42SHugh Dickins 							newsize, (loff_t)-1);
111894c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1119d0424c42SHugh Dickins 			if (oldsize > holebegin)
1120d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1121d0424c42SHugh Dickins 							holebegin, 0, 1);
112294c1e62dSHugh Dickins 		}
11231da177e4SLinus Torvalds 	}
11241da177e4SLinus Torvalds 
11252f221d6fSChristian Brauner 	setattr_copy(&init_user_ns, inode, attr);
1126db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1127e65ce2a5SChristian Brauner 		error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
11281da177e4SLinus Torvalds 	return error;
11291da177e4SLinus Torvalds }
11301da177e4SLinus Torvalds 
11311f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
11321da177e4SLinus Torvalds {
11331da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1134779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
11351da177e4SLinus Torvalds 
113630e6a51dSHui Su 	if (shmem_mapping(inode->i_mapping)) {
11371da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
11381da177e4SLinus Torvalds 		inode->i_size = 0;
11393889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1140779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1141779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1142779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1143779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1144779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1145779750d2SKirill A. Shutemov 			}
1146779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1147779750d2SKirill A. Shutemov 		}
1148af53d3e9SHugh Dickins 		while (!list_empty(&info->swaplist)) {
1149af53d3e9SHugh Dickins 			/* Wait while shmem_unuse() is scanning this inode... */
1150af53d3e9SHugh Dickins 			wait_var_event(&info->stop_eviction,
1151af53d3e9SHugh Dickins 				       !atomic_read(&info->stop_eviction));
1152cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
1153af53d3e9SHugh Dickins 			/* ...but beware of the race if we peeked too early */
1154af53d3e9SHugh Dickins 			if (!atomic_read(&info->stop_eviction))
11551da177e4SLinus Torvalds 				list_del_init(&info->swaplist);
1156cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
11571da177e4SLinus Torvalds 		}
11583ed47db3SAl Viro 	}
1159b09e0fa4SEric Paris 
116038f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
11610f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
11625b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1163dbd5768fSJan Kara 	clear_inode(inode);
11641da177e4SLinus Torvalds }
11651da177e4SLinus Torvalds 
1166b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1167b56a2d8aSVineeth Remanan Pillai 				   pgoff_t start, unsigned int nr_entries,
1168b56a2d8aSVineeth Remanan Pillai 				   struct page **entries, pgoff_t *indices,
116987039546SHugh Dickins 				   unsigned int type, bool frontswap)
1170478922e2SMatthew Wilcox {
1171b56a2d8aSVineeth Remanan Pillai 	XA_STATE(xas, &mapping->i_pages, start);
1172b56a2d8aSVineeth Remanan Pillai 	struct page *page;
117387039546SHugh Dickins 	swp_entry_t entry;
1174b56a2d8aSVineeth Remanan Pillai 	unsigned int ret = 0;
1175b56a2d8aSVineeth Remanan Pillai 
1176b56a2d8aSVineeth Remanan Pillai 	if (!nr_entries)
1177b56a2d8aSVineeth Remanan Pillai 		return 0;
1178478922e2SMatthew Wilcox 
1179478922e2SMatthew Wilcox 	rcu_read_lock();
1180b56a2d8aSVineeth Remanan Pillai 	xas_for_each(&xas, page, ULONG_MAX) {
1181b56a2d8aSVineeth Remanan Pillai 		if (xas_retry(&xas, page))
11825b9c98f3SMike Kravetz 			continue;
1183b56a2d8aSVineeth Remanan Pillai 
1184b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1185478922e2SMatthew Wilcox 			continue;
1186b56a2d8aSVineeth Remanan Pillai 
118787039546SHugh Dickins 		entry = radix_to_swp_entry(page);
118887039546SHugh Dickins 		if (swp_type(entry) != type)
1189b56a2d8aSVineeth Remanan Pillai 			continue;
119087039546SHugh Dickins 		if (frontswap &&
119187039546SHugh Dickins 		    !frontswap_test(swap_info[type], swp_offset(entry)))
119287039546SHugh Dickins 			continue;
1193b56a2d8aSVineeth Remanan Pillai 
1194b56a2d8aSVineeth Remanan Pillai 		indices[ret] = xas.xa_index;
1195b56a2d8aSVineeth Remanan Pillai 		entries[ret] = page;
1196b56a2d8aSVineeth Remanan Pillai 
1197b56a2d8aSVineeth Remanan Pillai 		if (need_resched()) {
1198e21a2955SMatthew Wilcox 			xas_pause(&xas);
1199478922e2SMatthew Wilcox 			cond_resched_rcu();
1200478922e2SMatthew Wilcox 		}
1201b56a2d8aSVineeth Remanan Pillai 		if (++ret == nr_entries)
1202b56a2d8aSVineeth Remanan Pillai 			break;
1203b56a2d8aSVineeth Remanan Pillai 	}
1204478922e2SMatthew Wilcox 	rcu_read_unlock();
1205e21a2955SMatthew Wilcox 
1206b56a2d8aSVineeth Remanan Pillai 	return ret;
1207b56a2d8aSVineeth Remanan Pillai }
1208b56a2d8aSVineeth Remanan Pillai 
1209b56a2d8aSVineeth Remanan Pillai /*
1210b56a2d8aSVineeth Remanan Pillai  * Move the swapped pages for an inode to page cache. Returns the count
1211b56a2d8aSVineeth Remanan Pillai  * of pages swapped in, or the error in case of failure.
1212b56a2d8aSVineeth Remanan Pillai  */
1213b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1214b56a2d8aSVineeth Remanan Pillai 				    pgoff_t *indices)
1215b56a2d8aSVineeth Remanan Pillai {
1216b56a2d8aSVineeth Remanan Pillai 	int i = 0;
1217b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
1218b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1219b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1220b56a2d8aSVineeth Remanan Pillai 
1221b56a2d8aSVineeth Remanan Pillai 	for (i = 0; i < pvec.nr; i++) {
1222b56a2d8aSVineeth Remanan Pillai 		struct page *page = pvec.pages[i];
1223b56a2d8aSVineeth Remanan Pillai 
1224b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1225b56a2d8aSVineeth Remanan Pillai 			continue;
1226b56a2d8aSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, indices[i],
1227b56a2d8aSVineeth Remanan Pillai 					  &page, SGP_CACHE,
1228b56a2d8aSVineeth Remanan Pillai 					  mapping_gfp_mask(mapping),
1229b56a2d8aSVineeth Remanan Pillai 					  NULL, NULL);
1230b56a2d8aSVineeth Remanan Pillai 		if (error == 0) {
1231b56a2d8aSVineeth Remanan Pillai 			unlock_page(page);
1232b56a2d8aSVineeth Remanan Pillai 			put_page(page);
1233b56a2d8aSVineeth Remanan Pillai 			ret++;
1234b56a2d8aSVineeth Remanan Pillai 		}
1235b56a2d8aSVineeth Remanan Pillai 		if (error == -ENOMEM)
1236b56a2d8aSVineeth Remanan Pillai 			break;
1237b56a2d8aSVineeth Remanan Pillai 		error = 0;
1238b56a2d8aSVineeth Remanan Pillai 	}
1239b56a2d8aSVineeth Remanan Pillai 	return error ? error : ret;
1240478922e2SMatthew Wilcox }
1241478922e2SMatthew Wilcox 
124246f65ec1SHugh Dickins /*
124346f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
124446f65ec1SHugh Dickins  */
1245b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1246b56a2d8aSVineeth Remanan Pillai 			     bool frontswap, unsigned long *fs_pages_to_unuse)
12471da177e4SLinus Torvalds {
1248b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1249b56a2d8aSVineeth Remanan Pillai 	pgoff_t start = 0;
1250b56a2d8aSVineeth Remanan Pillai 	struct pagevec pvec;
1251b56a2d8aSVineeth Remanan Pillai 	pgoff_t indices[PAGEVEC_SIZE];
1252b56a2d8aSVineeth Remanan Pillai 	bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1253b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
12541da177e4SLinus Torvalds 
1255b56a2d8aSVineeth Remanan Pillai 	pagevec_init(&pvec);
1256b56a2d8aSVineeth Remanan Pillai 	do {
1257b56a2d8aSVineeth Remanan Pillai 		unsigned int nr_entries = PAGEVEC_SIZE;
12582e0e26c7SHugh Dickins 
1259b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1260b56a2d8aSVineeth Remanan Pillai 			nr_entries = *fs_pages_to_unuse;
12612e0e26c7SHugh Dickins 
1262b56a2d8aSVineeth Remanan Pillai 		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1263b56a2d8aSVineeth Remanan Pillai 						  pvec.pages, indices,
126487039546SHugh Dickins 						  type, frontswap);
1265b56a2d8aSVineeth Remanan Pillai 		if (pvec.nr == 0) {
1266b56a2d8aSVineeth Remanan Pillai 			ret = 0;
1267778dd893SHugh Dickins 			break;
1268b56a2d8aSVineeth Remanan Pillai 		}
1269b56a2d8aSVineeth Remanan Pillai 
1270b56a2d8aSVineeth Remanan Pillai 		ret = shmem_unuse_swap_entries(inode, pvec, indices);
1271b56a2d8aSVineeth Remanan Pillai 		if (ret < 0)
1272b56a2d8aSVineeth Remanan Pillai 			break;
1273b56a2d8aSVineeth Remanan Pillai 
1274b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial) {
1275b56a2d8aSVineeth Remanan Pillai 			*fs_pages_to_unuse -= ret;
1276b56a2d8aSVineeth Remanan Pillai 			if (*fs_pages_to_unuse == 0) {
1277b56a2d8aSVineeth Remanan Pillai 				ret = FRONTSWAP_PAGES_UNUSED;
1278b56a2d8aSVineeth Remanan Pillai 				break;
1279b56a2d8aSVineeth Remanan Pillai 			}
1280b56a2d8aSVineeth Remanan Pillai 		}
1281b56a2d8aSVineeth Remanan Pillai 
1282b56a2d8aSVineeth Remanan Pillai 		start = indices[pvec.nr - 1];
1283b56a2d8aSVineeth Remanan Pillai 	} while (true);
1284b56a2d8aSVineeth Remanan Pillai 
1285b56a2d8aSVineeth Remanan Pillai 	return ret;
1286b56a2d8aSVineeth Remanan Pillai }
1287b56a2d8aSVineeth Remanan Pillai 
1288b56a2d8aSVineeth Remanan Pillai /*
1289b56a2d8aSVineeth Remanan Pillai  * Read all the shared memory data that resides in the swap
1290b56a2d8aSVineeth Remanan Pillai  * device 'type' back into memory, so the swap device can be
1291b56a2d8aSVineeth Remanan Pillai  * unused.
1292b56a2d8aSVineeth Remanan Pillai  */
1293b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
1294b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
1295b56a2d8aSVineeth Remanan Pillai {
1296b56a2d8aSVineeth Remanan Pillai 	struct shmem_inode_info *info, *next;
1297b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1298b56a2d8aSVineeth Remanan Pillai 
1299b56a2d8aSVineeth Remanan Pillai 	if (list_empty(&shmem_swaplist))
1300b56a2d8aSVineeth Remanan Pillai 		return 0;
1301b56a2d8aSVineeth Remanan Pillai 
1302b56a2d8aSVineeth Remanan Pillai 	mutex_lock(&shmem_swaplist_mutex);
1303b56a2d8aSVineeth Remanan Pillai 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1304b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped) {
1305b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1306b56a2d8aSVineeth Remanan Pillai 			continue;
1307b56a2d8aSVineeth Remanan Pillai 		}
1308af53d3e9SHugh Dickins 		/*
1309af53d3e9SHugh Dickins 		 * Drop the swaplist mutex while searching the inode for swap;
1310af53d3e9SHugh Dickins 		 * but before doing so, make sure shmem_evict_inode() will not
1311af53d3e9SHugh Dickins 		 * remove placeholder inode from swaplist, nor let it be freed
1312af53d3e9SHugh Dickins 		 * (igrab() would protect from unlink, but not from unmount).
1313af53d3e9SHugh Dickins 		 */
1314af53d3e9SHugh Dickins 		atomic_inc(&info->stop_eviction);
1315b56a2d8aSVineeth Remanan Pillai 		mutex_unlock(&shmem_swaplist_mutex);
1316b56a2d8aSVineeth Remanan Pillai 
1317af53d3e9SHugh Dickins 		error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1318b56a2d8aSVineeth Remanan Pillai 					  fs_pages_to_unuse);
1319b56a2d8aSVineeth Remanan Pillai 		cond_resched();
1320b56a2d8aSVineeth Remanan Pillai 
1321b56a2d8aSVineeth Remanan Pillai 		mutex_lock(&shmem_swaplist_mutex);
1322b56a2d8aSVineeth Remanan Pillai 		next = list_next_entry(info, swaplist);
1323b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped)
1324b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1325af53d3e9SHugh Dickins 		if (atomic_dec_and_test(&info->stop_eviction))
1326af53d3e9SHugh Dickins 			wake_up_var(&info->stop_eviction);
1327b56a2d8aSVineeth Remanan Pillai 		if (error)
1328b56a2d8aSVineeth Remanan Pillai 			break;
13291da177e4SLinus Torvalds 	}
1330cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1331778dd893SHugh Dickins 
1332778dd893SHugh Dickins 	return error;
13331da177e4SLinus Torvalds }
13341da177e4SLinus Torvalds 
13351da177e4SLinus Torvalds /*
13361da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
13371da177e4SLinus Torvalds  */
13381da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
13391da177e4SLinus Torvalds {
13401da177e4SLinus Torvalds 	struct shmem_inode_info *info;
13411da177e4SLinus Torvalds 	struct address_space *mapping;
13421da177e4SLinus Torvalds 	struct inode *inode;
13436922c0c7SHugh Dickins 	swp_entry_t swap;
13446922c0c7SHugh Dickins 	pgoff_t index;
13451da177e4SLinus Torvalds 
13461e6decf3SHugh Dickins 	/*
13471e6decf3SHugh Dickins 	 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
13481e6decf3SHugh Dickins 	 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
13491e6decf3SHugh Dickins 	 * and its shmem_writeback() needs them to be split when swapping.
13501e6decf3SHugh Dickins 	 */
13511e6decf3SHugh Dickins 	if (PageTransCompound(page)) {
13521e6decf3SHugh Dickins 		/* Ensure the subpages are still dirty */
13531e6decf3SHugh Dickins 		SetPageDirty(page);
13541e6decf3SHugh Dickins 		if (split_huge_page(page) < 0)
13551e6decf3SHugh Dickins 			goto redirty;
13561e6decf3SHugh Dickins 		ClearPageDirty(page);
13571e6decf3SHugh Dickins 	}
13581e6decf3SHugh Dickins 
13591da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
13601da177e4SLinus Torvalds 	mapping = page->mapping;
13611da177e4SLinus Torvalds 	index = page->index;
13621da177e4SLinus Torvalds 	inode = mapping->host;
13631da177e4SLinus Torvalds 	info = SHMEM_I(inode);
13641da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
13651da177e4SLinus Torvalds 		goto redirty;
1366d9fe526aSHugh Dickins 	if (!total_swap_pages)
13671da177e4SLinus Torvalds 		goto redirty;
13681da177e4SLinus Torvalds 
1369d9fe526aSHugh Dickins 	/*
137097b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
137197b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
137297b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
137397b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
137497b713baSChristoph Hellwig 	 * threads or sync.
1375d9fe526aSHugh Dickins 	 */
137648f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
137748f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
137848f170fbSHugh Dickins 		goto redirty;
137948f170fbSHugh Dickins 	}
13801635f6a7SHugh Dickins 
13811635f6a7SHugh Dickins 	/*
13821635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13831635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
13841635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
13851aac1400SHugh Dickins 	 *
13861aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
13871aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
13881aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
13891aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
13901aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
13911635f6a7SHugh Dickins 	 */
13921635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
13931aac1400SHugh Dickins 		if (inode->i_private) {
13941aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
13951aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
13961aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
13971aac1400SHugh Dickins 			if (shmem_falloc &&
13988e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
13991aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
14001aac1400SHugh Dickins 			    index < shmem_falloc->next)
14011aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
14021aac1400SHugh Dickins 			else
14031aac1400SHugh Dickins 				shmem_falloc = NULL;
14041aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
14051aac1400SHugh Dickins 			if (shmem_falloc)
14061aac1400SHugh Dickins 				goto redirty;
14071aac1400SHugh Dickins 		}
14081635f6a7SHugh Dickins 		clear_highpage(page);
14091635f6a7SHugh Dickins 		flush_dcache_page(page);
14101635f6a7SHugh Dickins 		SetPageUptodate(page);
14111635f6a7SHugh Dickins 	}
14121635f6a7SHugh Dickins 
141338d8b4e6SHuang Ying 	swap = get_swap_page(page);
141448f170fbSHugh Dickins 	if (!swap.val)
141548f170fbSHugh Dickins 		goto redirty;
1416d9fe526aSHugh Dickins 
1417b1dea800SHugh Dickins 	/*
1418b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
14196922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
14206922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1421b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
14226922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
14236922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1424b1dea800SHugh Dickins 	 */
1425b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
142605bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
1427b56a2d8aSVineeth Remanan Pillai 		list_add(&info->swaplist, &shmem_swaplist);
1428b1dea800SHugh Dickins 
14294afab1cdSYang Shi 	if (add_to_swap_cache(page, swap,
14303852f676SJoonsoo Kim 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
14313852f676SJoonsoo Kim 			NULL) == 0) {
14324595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1433267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1434267a4c76SHugh Dickins 		info->swapped++;
14354595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1436267a4c76SHugh Dickins 
1437aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
14386922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
14396922c0c7SHugh Dickins 
14406922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1441d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
14429fab5619SHugh Dickins 		swap_writepage(page, wbc);
14431da177e4SLinus Torvalds 		return 0;
14441da177e4SLinus Torvalds 	}
14451da177e4SLinus Torvalds 
14466922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
144775f6d6d2SMinchan Kim 	put_swap_page(page, swap);
14481da177e4SLinus Torvalds redirty:
14491da177e4SLinus Torvalds 	set_page_dirty(page);
1450d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1451d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1452d9fe526aSHugh Dickins 	unlock_page(page);
1453d9fe526aSHugh Dickins 	return 0;
14541da177e4SLinus Torvalds }
14551da177e4SLinus Torvalds 
145675edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
145771fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1458680d794bSakpm@linux-foundation.org {
1459680d794bSakpm@linux-foundation.org 	char buffer[64];
1460680d794bSakpm@linux-foundation.org 
146171fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1462095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1463095f1fc4SLee Schermerhorn 
1464a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1465095f1fc4SLee Schermerhorn 
1466095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1467680d794bSakpm@linux-foundation.org }
146871fe804bSLee Schermerhorn 
146971fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
147071fe804bSLee Schermerhorn {
147171fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
147271fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
1473bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
147471fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
147571fe804bSLee Schermerhorn 		mpol_get(mpol);
1476bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
147771fe804bSLee Schermerhorn 	}
147871fe804bSLee Schermerhorn 	return mpol;
147971fe804bSLee Schermerhorn }
148075edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
148175edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
148275edd345SHugh Dickins {
148375edd345SHugh Dickins }
148475edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
148575edd345SHugh Dickins {
148675edd345SHugh Dickins 	return NULL;
148775edd345SHugh Dickins }
148875edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
148975edd345SHugh Dickins #ifndef CONFIG_NUMA
149075edd345SHugh Dickins #define vm_policy vm_private_data
149175edd345SHugh Dickins #endif
1492680d794bSakpm@linux-foundation.org 
1493800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1494800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1495800d8c63SKirill A. Shutemov {
1496800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
14972c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1498800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1499800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1500800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1501800d8c63SKirill A. Shutemov }
1502800d8c63SKirill A. Shutemov 
1503800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1504800d8c63SKirill A. Shutemov {
1505800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1506800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1507800d8c63SKirill A. Shutemov }
1508800d8c63SKirill A. Shutemov 
150941ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
151041ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
15111da177e4SLinus Torvalds {
15121da177e4SLinus Torvalds 	struct vm_area_struct pvma;
151318a2f371SMel Gorman 	struct page *page;
15148c63ca5bSWill Deacon 	struct vm_fault vmf = {
15158c63ca5bSWill Deacon 		.vma = &pvma,
15168c63ca5bSWill Deacon 	};
15171da177e4SLinus Torvalds 
1518800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1519e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1520800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
152118a2f371SMel Gorman 
1522800d8c63SKirill A. Shutemov 	return page;
1523800d8c63SKirill A. Shutemov }
152418a2f371SMel Gorman 
152578cc8cdcSRik van Riel /*
152678cc8cdcSRik van Riel  * Make sure huge_gfp is always more limited than limit_gfp.
152778cc8cdcSRik van Riel  * Some of the flags set permissions, while others set limitations.
152878cc8cdcSRik van Riel  */
152978cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
153078cc8cdcSRik van Riel {
153178cc8cdcSRik van Riel 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
153278cc8cdcSRik van Riel 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1533187df5ddSRik van Riel 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1534187df5ddSRik van Riel 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1535187df5ddSRik van Riel 
1536187df5ddSRik van Riel 	/* Allow allocations only from the originally specified zones. */
1537187df5ddSRik van Riel 	result |= zoneflags;
153878cc8cdcSRik van Riel 
153978cc8cdcSRik van Riel 	/*
154078cc8cdcSRik van Riel 	 * Minimize the result gfp by taking the union with the deny flags,
154178cc8cdcSRik van Riel 	 * and the intersection of the allow flags.
154278cc8cdcSRik van Riel 	 */
154378cc8cdcSRik van Riel 	result |= (limit_gfp & denyflags);
154478cc8cdcSRik van Riel 	result |= (huge_gfp & limit_gfp) & allowflags;
154578cc8cdcSRik van Riel 
154678cc8cdcSRik van Riel 	return result;
154778cc8cdcSRik van Riel }
154878cc8cdcSRik van Riel 
1549800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp,
1550800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1551800d8c63SKirill A. Shutemov {
1552800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
15537b8d046fSMatthew Wilcox 	struct address_space *mapping = info->vfs_inode.i_mapping;
15547b8d046fSMatthew Wilcox 	pgoff_t hindex;
1555800d8c63SKirill A. Shutemov 	struct page *page;
1556800d8c63SKirill A. Shutemov 
15574620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
15587b8d046fSMatthew Wilcox 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
15597b8d046fSMatthew Wilcox 								XA_PRESENT))
1560800d8c63SKirill A. Shutemov 		return NULL;
1561800d8c63SKirill A. Shutemov 
1562800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1563164cc4feSRik van Riel 	page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(),
1564164cc4feSRik van Riel 			       true);
1565800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1566800d8c63SKirill A. Shutemov 	if (page)
1567800d8c63SKirill A. Shutemov 		prep_transhuge_page(page);
1568dcdf11eeSDavid Rientjes 	else
1569dcdf11eeSDavid Rientjes 		count_vm_event(THP_FILE_FALLBACK);
157018a2f371SMel Gorman 	return page;
157118a2f371SMel Gorman }
157218a2f371SMel Gorman 
157318a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp,
157418a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
157518a2f371SMel Gorman {
157618a2f371SMel Gorman 	struct vm_area_struct pvma;
157718a2f371SMel Gorman 	struct page *page;
157818a2f371SMel Gorman 
1579800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1580800d8c63SKirill A. Shutemov 	page = alloc_page_vma(gfp, &pvma, 0);
1581800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
158218a2f371SMel Gorman 
1583800d8c63SKirill A. Shutemov 	return page;
1584800d8c63SKirill A. Shutemov }
1585800d8c63SKirill A. Shutemov 
1586800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
15870f079694SMike Rapoport 		struct inode *inode,
1588800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1589800d8c63SKirill A. Shutemov {
15900f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
1591800d8c63SKirill A. Shutemov 	struct page *page;
1592800d8c63SKirill A. Shutemov 	int nr;
1593800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1594800d8c63SKirill A. Shutemov 
1595396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1596800d8c63SKirill A. Shutemov 		huge = false;
1597800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1598800d8c63SKirill A. Shutemov 
15990f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1600800d8c63SKirill A. Shutemov 		goto failed;
1601800d8c63SKirill A. Shutemov 
1602800d8c63SKirill A. Shutemov 	if (huge)
1603800d8c63SKirill A. Shutemov 		page = shmem_alloc_hugepage(gfp, info, index);
1604800d8c63SKirill A. Shutemov 	else
1605800d8c63SKirill A. Shutemov 		page = shmem_alloc_page(gfp, info, index);
160675edd345SHugh Dickins 	if (page) {
160775edd345SHugh Dickins 		__SetPageLocked(page);
160875edd345SHugh Dickins 		__SetPageSwapBacked(page);
1609800d8c63SKirill A. Shutemov 		return page;
161075edd345SHugh Dickins 	}
161118a2f371SMel Gorman 
1612800d8c63SKirill A. Shutemov 	err = -ENOMEM;
16130f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1614800d8c63SKirill A. Shutemov failed:
1615800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
16161da177e4SLinus Torvalds }
161771fe804bSLee Schermerhorn 
16181da177e4SLinus Torvalds /*
1619bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1620bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1621bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1622bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1623bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1624bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1625bde05d1cSHugh Dickins  *
1626bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1627bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1628bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1629bde05d1cSHugh Dickins  */
1630bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1631bde05d1cSHugh Dickins {
1632bde05d1cSHugh Dickins 	return page_zonenum(page) > gfp_zone(gfp);
1633bde05d1cSHugh Dickins }
1634bde05d1cSHugh Dickins 
1635bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1636bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1637bde05d1cSHugh Dickins {
1638bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
1639d21bba2bSMatthew Wilcox (Oracle) 	struct folio *old, *new;
1640bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1641c1cb20d4SYu Zhao 	swp_entry_t entry;
1642bde05d1cSHugh Dickins 	pgoff_t swap_index;
1643bde05d1cSHugh Dickins 	int error;
1644bde05d1cSHugh Dickins 
1645bde05d1cSHugh Dickins 	oldpage = *pagep;
1646c1cb20d4SYu Zhao 	entry.val = page_private(oldpage);
1647c1cb20d4SYu Zhao 	swap_index = swp_offset(entry);
1648bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1649bde05d1cSHugh Dickins 
1650bde05d1cSHugh Dickins 	/*
1651bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1652bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1653bde05d1cSHugh Dickins 	 */
1654bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1655bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1656bde05d1cSHugh Dickins 	if (!newpage)
1657bde05d1cSHugh Dickins 		return -ENOMEM;
1658bde05d1cSHugh Dickins 
165909cbfeafSKirill A. Shutemov 	get_page(newpage);
1660bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
16610142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1662bde05d1cSHugh Dickins 
16639956edf3SHugh Dickins 	__SetPageLocked(newpage);
16649956edf3SHugh Dickins 	__SetPageSwapBacked(newpage);
1665bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1666c1cb20d4SYu Zhao 	set_page_private(newpage, entry.val);
1667bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1668bde05d1cSHugh Dickins 
1669bde05d1cSHugh Dickins 	/*
1670bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1671bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1672bde05d1cSHugh Dickins 	 */
1673b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
167462f945b6SMatthew Wilcox 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
16750142ef6cSHugh Dickins 	if (!error) {
1676d21bba2bSMatthew Wilcox (Oracle) 		old = page_folio(oldpage);
1677d21bba2bSMatthew Wilcox (Oracle) 		new = page_folio(newpage);
1678d21bba2bSMatthew Wilcox (Oracle) 		mem_cgroup_migrate(old, new);
16790d1c2072SJohannes Weiner 		__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
16800d1c2072SJohannes Weiner 		__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
16810142ef6cSHugh Dickins 	}
1682b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1683bde05d1cSHugh Dickins 
16840142ef6cSHugh Dickins 	if (unlikely(error)) {
16850142ef6cSHugh Dickins 		/*
16860142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
16870142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
16880142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
16890142ef6cSHugh Dickins 		 */
16900142ef6cSHugh Dickins 		oldpage = newpage;
16910142ef6cSHugh Dickins 	} else {
16926058eaecSJohannes Weiner 		lru_cache_add(newpage);
16930142ef6cSHugh Dickins 		*pagep = newpage;
16940142ef6cSHugh Dickins 	}
1695bde05d1cSHugh Dickins 
1696bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1697bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1698bde05d1cSHugh Dickins 
1699bde05d1cSHugh Dickins 	unlock_page(oldpage);
170009cbfeafSKirill A. Shutemov 	put_page(oldpage);
170109cbfeafSKirill A. Shutemov 	put_page(oldpage);
17020142ef6cSHugh Dickins 	return error;
1703bde05d1cSHugh Dickins }
1704bde05d1cSHugh Dickins 
1705bde05d1cSHugh Dickins /*
1706c5bf121eSVineeth Remanan Pillai  * Swap in the page pointed to by *pagep.
1707c5bf121eSVineeth Remanan Pillai  * Caller has to make sure that *pagep contains a valid swapped page.
1708c5bf121eSVineeth Remanan Pillai  * Returns 0 and the page in pagep if success. On failure, returns the
1709af44c12fSRandy Dunlap  * error code and NULL in *pagep.
17101da177e4SLinus Torvalds  */
1711c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1712c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
1713c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
17142b740303SSouptick Joarder 			     vm_fault_t *fault_type)
17151da177e4SLinus Torvalds {
17161da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
171723f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
171804f94e3fSDan Schatzberg 	struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1719b1e1ef34SYang Shi 	struct page *page;
17201da177e4SLinus Torvalds 	swp_entry_t swap;
17211da177e4SLinus Torvalds 	int error;
17221da177e4SLinus Torvalds 
1723c5bf121eSVineeth Remanan Pillai 	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1724c5bf121eSVineeth Remanan Pillai 	swap = radix_to_swp_entry(*pagep);
1725c5bf121eSVineeth Remanan Pillai 	*pagep = NULL;
172654af6042SHugh Dickins 
17271da177e4SLinus Torvalds 	/* Look it up and read it in.. */
1728ec560175SHuang Ying 	page = lookup_swap_cache(swap, NULL, 0);
172927ab7006SHugh Dickins 	if (!page) {
17309e18eb29SAndres Lagar-Cavilla 		/* Or update major stats only when swapin succeeds?? */
17319e18eb29SAndres Lagar-Cavilla 		if (fault_type) {
173268da9f05SHugh Dickins 			*fault_type |= VM_FAULT_MAJOR;
17339e18eb29SAndres Lagar-Cavilla 			count_vm_event(PGMAJFAULT);
17342262185cSRoman Gushchin 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
17359e18eb29SAndres Lagar-Cavilla 		}
17369e18eb29SAndres Lagar-Cavilla 		/* Here we actually start the io */
173741ffe5d5SHugh Dickins 		page = shmem_swapin(swap, gfp, info, index);
173827ab7006SHugh Dickins 		if (!page) {
17391da177e4SLinus Torvalds 			error = -ENOMEM;
174054af6042SHugh Dickins 			goto failed;
1741285b2c4fSHugh Dickins 		}
17421da177e4SLinus Torvalds 	}
17431da177e4SLinus Torvalds 
17441da177e4SLinus Torvalds 	/* We have to do this with page locked to prevent races */
174554af6042SHugh Dickins 	lock_page(page);
17460142ef6cSHugh Dickins 	if (!PageSwapCache(page) || page_private(page) != swap.val ||
1747d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap)) {
1748c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1749d1899228SHugh Dickins 		goto unlock;
1750bde05d1cSHugh Dickins 	}
175127ab7006SHugh Dickins 	if (!PageUptodate(page)) {
17521da177e4SLinus Torvalds 		error = -EIO;
175354af6042SHugh Dickins 		goto failed;
175454af6042SHugh Dickins 	}
175554af6042SHugh Dickins 	wait_on_page_writeback(page);
175654af6042SHugh Dickins 
17578a84802eSSteven Price 	/*
17588a84802eSSteven Price 	 * Some architectures may have to restore extra metadata to the
17598a84802eSSteven Price 	 * physical page after reading from swap.
17608a84802eSSteven Price 	 */
17618a84802eSSteven Price 	arch_swap_restore(swap, page);
17628a84802eSSteven Price 
1763bde05d1cSHugh Dickins 	if (shmem_should_replace_page(page, gfp)) {
1764bde05d1cSHugh Dickins 		error = shmem_replace_page(&page, gfp, info, index);
1765bde05d1cSHugh Dickins 		if (error)
176654af6042SHugh Dickins 			goto failed;
17671da177e4SLinus Torvalds 	}
17681da177e4SLinus Torvalds 
17693fea5a49SJohannes Weiner 	error = shmem_add_to_page_cache(page, mapping, index,
17703fea5a49SJohannes Weiner 					swp_to_radix_entry(swap), gfp,
17713fea5a49SJohannes Weiner 					charge_mm);
177254af6042SHugh Dickins 	if (error)
177354af6042SHugh Dickins 		goto failed;
177454af6042SHugh Dickins 
17754595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
177654af6042SHugh Dickins 	info->swapped--;
177754af6042SHugh Dickins 	shmem_recalc_inode(inode);
17784595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
177927ab7006SHugh Dickins 
178066d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
178166d2f4d2SHugh Dickins 		mark_page_accessed(page);
178266d2f4d2SHugh Dickins 
178327ab7006SHugh Dickins 	delete_from_swap_cache(page);
178427ab7006SHugh Dickins 	set_page_dirty(page);
178527ab7006SHugh Dickins 	swap_free(swap);
178627ab7006SHugh Dickins 
1787c5bf121eSVineeth Remanan Pillai 	*pagep = page;
1788c5bf121eSVineeth Remanan Pillai 	return 0;
1789c5bf121eSVineeth Remanan Pillai failed:
1790c5bf121eSVineeth Remanan Pillai 	if (!shmem_confirm_swap(mapping, index, swap))
1791c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1792c5bf121eSVineeth Remanan Pillai unlock:
1793c5bf121eSVineeth Remanan Pillai 	if (page) {
1794c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1795c5bf121eSVineeth Remanan Pillai 		put_page(page);
1796c5bf121eSVineeth Remanan Pillai 	}
1797c5bf121eSVineeth Remanan Pillai 
1798c5bf121eSVineeth Remanan Pillai 	return error;
1799c5bf121eSVineeth Remanan Pillai }
1800c5bf121eSVineeth Remanan Pillai 
1801c5bf121eSVineeth Remanan Pillai /*
1802c5bf121eSVineeth Remanan Pillai  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1803c5bf121eSVineeth Remanan Pillai  *
1804c5bf121eSVineeth Remanan Pillai  * If we allocate a new one we do not mark it dirty. That's up to the
1805c5bf121eSVineeth Remanan Pillai  * vm. If we swap it in we mark it dirty since we also free the swap
1806c5bf121eSVineeth Remanan Pillai  * entry since a page cannot live in both the swap and page cache.
1807c5bf121eSVineeth Remanan Pillai  *
1808c949b097SAxel Rasmussen  * vma, vmf, and fault_type are only supplied by shmem_fault:
1809c5bf121eSVineeth Remanan Pillai  * otherwise they are NULL.
1810c5bf121eSVineeth Remanan Pillai  */
1811c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1812c5bf121eSVineeth Remanan Pillai 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1813c5bf121eSVineeth Remanan Pillai 	struct vm_area_struct *vma, struct vm_fault *vmf,
1814c5bf121eSVineeth Remanan Pillai 			vm_fault_t *fault_type)
1815c5bf121eSVineeth Remanan Pillai {
1816c5bf121eSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1817c5bf121eSVineeth Remanan Pillai 	struct shmem_inode_info *info = SHMEM_I(inode);
1818c5bf121eSVineeth Remanan Pillai 	struct shmem_sb_info *sbinfo;
1819c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm;
1820c5bf121eSVineeth Remanan Pillai 	struct page *page;
1821c5bf121eSVineeth Remanan Pillai 	pgoff_t hindex = index;
1822164cc4feSRik van Riel 	gfp_t huge_gfp;
1823c5bf121eSVineeth Remanan Pillai 	int error;
1824c5bf121eSVineeth Remanan Pillai 	int once = 0;
1825c5bf121eSVineeth Remanan Pillai 	int alloced = 0;
1826c5bf121eSVineeth Remanan Pillai 
1827c5bf121eSVineeth Remanan Pillai 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1828c5bf121eSVineeth Remanan Pillai 		return -EFBIG;
1829c5bf121eSVineeth Remanan Pillai repeat:
1830c5bf121eSVineeth Remanan Pillai 	if (sgp <= SGP_CACHE &&
1831c5bf121eSVineeth Remanan Pillai 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1832c5bf121eSVineeth Remanan Pillai 		return -EINVAL;
1833c5bf121eSVineeth Remanan Pillai 	}
1834c5bf121eSVineeth Remanan Pillai 
1835c5bf121eSVineeth Remanan Pillai 	sbinfo = SHMEM_SB(inode->i_sb);
183604f94e3fSDan Schatzberg 	charge_mm = vma ? vma->vm_mm : NULL;
1837c5bf121eSVineeth Remanan Pillai 
183844835d20SMatthew Wilcox (Oracle) 	page = pagecache_get_page(mapping, index,
183944835d20SMatthew Wilcox (Oracle) 					FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
1840c949b097SAxel Rasmussen 
1841c949b097SAxel Rasmussen 	if (page && vma && userfaultfd_minor(vma)) {
1842c949b097SAxel Rasmussen 		if (!xa_is_value(page)) {
1843c949b097SAxel Rasmussen 			unlock_page(page);
1844c949b097SAxel Rasmussen 			put_page(page);
1845c949b097SAxel Rasmussen 		}
1846c949b097SAxel Rasmussen 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1847c949b097SAxel Rasmussen 		return 0;
1848c949b097SAxel Rasmussen 	}
1849c949b097SAxel Rasmussen 
1850c5bf121eSVineeth Remanan Pillai 	if (xa_is_value(page)) {
1851c5bf121eSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, index, &page,
1852c5bf121eSVineeth Remanan Pillai 					  sgp, gfp, vma, fault_type);
1853c5bf121eSVineeth Remanan Pillai 		if (error == -EEXIST)
1854c5bf121eSVineeth Remanan Pillai 			goto repeat;
1855c5bf121eSVineeth Remanan Pillai 
1856c5bf121eSVineeth Remanan Pillai 		*pagep = page;
1857c5bf121eSVineeth Remanan Pillai 		return error;
1858c5bf121eSVineeth Remanan Pillai 	}
1859c5bf121eSVineeth Remanan Pillai 
1860acdd9f8eSHugh Dickins 	if (page) {
186163ec1973SMatthew Wilcox (Oracle) 		hindex = page->index;
1862acdd9f8eSHugh Dickins 		if (sgp == SGP_WRITE)
1863c5bf121eSVineeth Remanan Pillai 			mark_page_accessed(page);
1864acdd9f8eSHugh Dickins 		if (PageUptodate(page))
1865acdd9f8eSHugh Dickins 			goto out;
1866acdd9f8eSHugh Dickins 		/* fallocated page */
1867c5bf121eSVineeth Remanan Pillai 		if (sgp != SGP_READ)
1868c5bf121eSVineeth Remanan Pillai 			goto clear;
1869c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1870c5bf121eSVineeth Remanan Pillai 		put_page(page);
1871c5bf121eSVineeth Remanan Pillai 	}
1872c5bf121eSVineeth Remanan Pillai 
1873c5bf121eSVineeth Remanan Pillai 	/*
1874acdd9f8eSHugh Dickins 	 * SGP_READ: succeed on hole, with NULL page, letting caller zero.
1875acdd9f8eSHugh Dickins 	 * SGP_NOALLOC: fail on hole, with NULL page, letting caller fail.
1876acdd9f8eSHugh Dickins 	 */
1877acdd9f8eSHugh Dickins 	*pagep = NULL;
1878acdd9f8eSHugh Dickins 	if (sgp == SGP_READ)
1879acdd9f8eSHugh Dickins 		return 0;
1880acdd9f8eSHugh Dickins 	if (sgp == SGP_NOALLOC)
1881acdd9f8eSHugh Dickins 		return -ENOENT;
1882acdd9f8eSHugh Dickins 
1883acdd9f8eSHugh Dickins 	/*
1884acdd9f8eSHugh Dickins 	 * Fast cache lookup and swap lookup did not find it: allocate.
1885c5bf121eSVineeth Remanan Pillai 	 */
1886c5bf121eSVineeth Remanan Pillai 
1887cfda0526SMike Rapoport 	if (vma && userfaultfd_missing(vma)) {
1888cfda0526SMike Rapoport 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1889cfda0526SMike Rapoport 		return 0;
1890cfda0526SMike Rapoport 	}
1891cfda0526SMike Rapoport 
18925e6e5a12SHugh Dickins 	/* Never use a huge page for shmem_symlink() */
18935e6e5a12SHugh Dickins 	if (S_ISLNK(inode->i_mode))
1894800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
18955e6e5a12SHugh Dickins 	if (!shmem_is_huge(vma, inode, index))
1896800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
189727d80fa2SKees Cook 
1898164cc4feSRik van Riel 	huge_gfp = vma_thp_gfp_mask(vma);
189978cc8cdcSRik van Riel 	huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1900164cc4feSRik van Riel 	page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
1901800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1902c5bf121eSVineeth Remanan Pillai alloc_nohuge:
1903c5bf121eSVineeth Remanan Pillai 		page = shmem_alloc_and_acct_page(gfp, inode,
1904800d8c63SKirill A. Shutemov 						 index, false);
190554af6042SHugh Dickins 	}
1906800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1907779750d2SKirill A. Shutemov 		int retry = 5;
1908c5bf121eSVineeth Remanan Pillai 
1909800d8c63SKirill A. Shutemov 		error = PTR_ERR(page);
1910800d8c63SKirill A. Shutemov 		page = NULL;
1911779750d2SKirill A. Shutemov 		if (error != -ENOSPC)
1912c5bf121eSVineeth Remanan Pillai 			goto unlock;
1913779750d2SKirill A. Shutemov 		/*
1914c5bf121eSVineeth Remanan Pillai 		 * Try to reclaim some space by splitting a huge page
1915779750d2SKirill A. Shutemov 		 * beyond i_size on the filesystem.
1916779750d2SKirill A. Shutemov 		 */
1917779750d2SKirill A. Shutemov 		while (retry--) {
1918779750d2SKirill A. Shutemov 			int ret;
1919c5bf121eSVineeth Remanan Pillai 
1920779750d2SKirill A. Shutemov 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1921779750d2SKirill A. Shutemov 			if (ret == SHRINK_STOP)
1922779750d2SKirill A. Shutemov 				break;
1923779750d2SKirill A. Shutemov 			if (ret)
1924779750d2SKirill A. Shutemov 				goto alloc_nohuge;
1925779750d2SKirill A. Shutemov 		}
1926c5bf121eSVineeth Remanan Pillai 		goto unlock;
1927800d8c63SKirill A. Shutemov 	}
1928800d8c63SKirill A. Shutemov 
1929800d8c63SKirill A. Shutemov 	if (PageTransHuge(page))
1930800d8c63SKirill A. Shutemov 		hindex = round_down(index, HPAGE_PMD_NR);
1931800d8c63SKirill A. Shutemov 	else
1932800d8c63SKirill A. Shutemov 		hindex = index;
1933800d8c63SKirill A. Shutemov 
193466d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1935eb39d618SHugh Dickins 		__SetPageReferenced(page);
193666d2f4d2SHugh Dickins 
1937800d8c63SKirill A. Shutemov 	error = shmem_add_to_page_cache(page, mapping, hindex,
19383fea5a49SJohannes Weiner 					NULL, gfp & GFP_RECLAIM_MASK,
19393fea5a49SJohannes Weiner 					charge_mm);
19403fea5a49SJohannes Weiner 	if (error)
1941800d8c63SKirill A. Shutemov 		goto unacct;
19426058eaecSJohannes Weiner 	lru_cache_add(page);
194354af6042SHugh Dickins 
19444595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
1945d8c6546bSMatthew Wilcox (Oracle) 	info->alloced += compound_nr(page);
1946800d8c63SKirill A. Shutemov 	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
194754af6042SHugh Dickins 	shmem_recalc_inode(inode);
19484595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
19491635f6a7SHugh Dickins 	alloced = true;
195054af6042SHugh Dickins 
1951779750d2SKirill A. Shutemov 	if (PageTransHuge(page) &&
1952779750d2SKirill A. Shutemov 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1953779750d2SKirill A. Shutemov 			hindex + HPAGE_PMD_NR - 1) {
1954779750d2SKirill A. Shutemov 		/*
1955779750d2SKirill A. Shutemov 		 * Part of the huge page is beyond i_size: subject
1956779750d2SKirill A. Shutemov 		 * to shrink under memory pressure.
1957779750d2SKirill A. Shutemov 		 */
1958779750d2SKirill A. Shutemov 		spin_lock(&sbinfo->shrinklist_lock);
1959d041353dSCong Wang 		/*
1960d041353dSCong Wang 		 * _careful to defend against unlocked access to
1961d041353dSCong Wang 		 * ->shrink_list in shmem_unused_huge_shrink()
1962d041353dSCong Wang 		 */
1963d041353dSCong Wang 		if (list_empty_careful(&info->shrinklist)) {
1964779750d2SKirill A. Shutemov 			list_add_tail(&info->shrinklist,
1965779750d2SKirill A. Shutemov 				      &sbinfo->shrinklist);
1966779750d2SKirill A. Shutemov 			sbinfo->shrinklist_len++;
1967779750d2SKirill A. Shutemov 		}
1968779750d2SKirill A. Shutemov 		spin_unlock(&sbinfo->shrinklist_lock);
1969779750d2SKirill A. Shutemov 	}
1970779750d2SKirill A. Shutemov 
1971ec9516fbSHugh Dickins 	/*
19721635f6a7SHugh Dickins 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
19731635f6a7SHugh Dickins 	 */
19741635f6a7SHugh Dickins 	if (sgp == SGP_FALLOC)
19751635f6a7SHugh Dickins 		sgp = SGP_WRITE;
19761635f6a7SHugh Dickins clear:
19771635f6a7SHugh Dickins 	/*
19781635f6a7SHugh Dickins 	 * Let SGP_WRITE caller clear ends if write does not fill page;
19791635f6a7SHugh Dickins 	 * but SGP_FALLOC on a page fallocated earlier must initialize
19801635f6a7SHugh Dickins 	 * it now, lest undo on failure cancel our earlier guarantee.
1981ec9516fbSHugh Dickins 	 */
1982800d8c63SKirill A. Shutemov 	if (sgp != SGP_WRITE && !PageUptodate(page)) {
1983800d8c63SKirill A. Shutemov 		int i;
1984800d8c63SKirill A. Shutemov 
198563ec1973SMatthew Wilcox (Oracle) 		for (i = 0; i < compound_nr(page); i++) {
198663ec1973SMatthew Wilcox (Oracle) 			clear_highpage(page + i);
198763ec1973SMatthew Wilcox (Oracle) 			flush_dcache_page(page + i);
1988800d8c63SKirill A. Shutemov 		}
198963ec1973SMatthew Wilcox (Oracle) 		SetPageUptodate(page);
1990ec9516fbSHugh Dickins 	}
1991bde05d1cSHugh Dickins 
199254af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
199375edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
199409cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1995267a4c76SHugh Dickins 		if (alloced) {
1996267a4c76SHugh Dickins 			ClearPageDirty(page);
1997267a4c76SHugh Dickins 			delete_from_page_cache(page);
19984595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1999267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
20004595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
2001267a4c76SHugh Dickins 		}
200254af6042SHugh Dickins 		error = -EINVAL;
2003267a4c76SHugh Dickins 		goto unlock;
2004ff36b801SShaohua Li 	}
200563ec1973SMatthew Wilcox (Oracle) out:
2006800d8c63SKirill A. Shutemov 	*pagep = page + index - hindex;
200754af6042SHugh Dickins 	return 0;
2008d00806b1SNick Piggin 
2009d0217ac0SNick Piggin 	/*
201054af6042SHugh Dickins 	 * Error recovery.
20111da177e4SLinus Torvalds 	 */
201254af6042SHugh Dickins unacct:
2013d8c6546bSMatthew Wilcox (Oracle) 	shmem_inode_unacct_blocks(inode, compound_nr(page));
2014800d8c63SKirill A. Shutemov 
2015800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
2016800d8c63SKirill A. Shutemov 		unlock_page(page);
2017800d8c63SKirill A. Shutemov 		put_page(page);
2018800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
2019800d8c63SKirill A. Shutemov 	}
2020d1899228SHugh Dickins unlock:
202127ab7006SHugh Dickins 	if (page) {
202254af6042SHugh Dickins 		unlock_page(page);
202309cbfeafSKirill A. Shutemov 		put_page(page);
202454af6042SHugh Dickins 	}
202554af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
20264595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
202754af6042SHugh Dickins 		shmem_recalc_inode(inode);
20284595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
20291da177e4SLinus Torvalds 		goto repeat;
2030d8dc74f2SAdrian Bunk 	}
20317f4446eeSMatthew Wilcox 	if (error == -EEXIST)
203254af6042SHugh Dickins 		goto repeat;
203354af6042SHugh Dickins 	return error;
20341da177e4SLinus Torvalds }
20351da177e4SLinus Torvalds 
203610d20bd2SLinus Torvalds /*
203710d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
203810d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
203910d20bd2SLinus Torvalds  * target.
204010d20bd2SLinus Torvalds  */
2041ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
204210d20bd2SLinus Torvalds {
204310d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
20442055da97SIngo Molnar 	list_del_init(&wait->entry);
204510d20bd2SLinus Torvalds 	return ret;
204610d20bd2SLinus Torvalds }
204710d20bd2SLinus Torvalds 
204820acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
20491da177e4SLinus Torvalds {
205011bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
2051496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
20529e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
205320acce67SSouptick Joarder 	int err;
205420acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
20551da177e4SLinus Torvalds 
2056f00cdc6dSHugh Dickins 	/*
2057f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
2058f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
20599608703eSJan Kara 	 * locks writers out with its hold on i_rwsem.  So refrain from
20608e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
20618e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
20628e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
20638e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
20648e205f77SHugh Dickins 	 *
20658e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
20668e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
20678e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
20688e205f77SHugh Dickins 	 *
20698e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
20709608703eSJan Kara 	 * standard mutex or completion: but we cannot take i_rwsem in fault,
20718e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
2072f00cdc6dSHugh Dickins 	 */
2073f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
2074f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
2075f00cdc6dSHugh Dickins 
2076f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2077f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
20788e205f77SHugh Dickins 		if (shmem_falloc &&
20798e205f77SHugh Dickins 		    shmem_falloc->waitq &&
20808e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
20818e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
20828897c1b1SKirill A. Shutemov 			struct file *fpin;
20838e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
208410d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
20858e205f77SHugh Dickins 
20868e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
20878897c1b1SKirill A. Shutemov 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
20888897c1b1SKirill A. Shutemov 			if (fpin)
20898e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
20908e205f77SHugh Dickins 
20918e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
20928e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
20938e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
20948e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20958e205f77SHugh Dickins 			schedule();
20968e205f77SHugh Dickins 
20978e205f77SHugh Dickins 			/*
20988e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
20998e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
21008e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
21018e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
21028e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
21038e205f77SHugh Dickins 			 */
21048e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
21058e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
21068e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
21078897c1b1SKirill A. Shutemov 
21088897c1b1SKirill A. Shutemov 			if (fpin)
21098897c1b1SKirill A. Shutemov 				fput(fpin);
21108e205f77SHugh Dickins 			return ret;
2111f00cdc6dSHugh Dickins 		}
21128e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2113f00cdc6dSHugh Dickins 	}
2114f00cdc6dSHugh Dickins 
21155e6e5a12SHugh Dickins 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
2116cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
211720acce67SSouptick Joarder 	if (err)
211820acce67SSouptick Joarder 		return vmf_error(err);
211968da9f05SHugh Dickins 	return ret;
21201da177e4SLinus Torvalds }
21211da177e4SLinus Torvalds 
2122c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2123c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2124c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2125c01d5b30SHugh Dickins {
2126c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2127c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2128c01d5b30SHugh Dickins 	unsigned long addr;
2129c01d5b30SHugh Dickins 	unsigned long offset;
2130c01d5b30SHugh Dickins 	unsigned long inflated_len;
2131c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2132c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2133c01d5b30SHugh Dickins 
2134c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2135c01d5b30SHugh Dickins 		return -ENOMEM;
2136c01d5b30SHugh Dickins 
2137c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2138c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2139c01d5b30SHugh Dickins 
2140396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2141c01d5b30SHugh Dickins 		return addr;
2142c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2143c01d5b30SHugh Dickins 		return addr;
2144c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2145c01d5b30SHugh Dickins 		return addr;
2146c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2147c01d5b30SHugh Dickins 		return addr;
2148c01d5b30SHugh Dickins 
2149c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2150c01d5b30SHugh Dickins 		return addr;
2151c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2152c01d5b30SHugh Dickins 		return addr;
2153c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2154c01d5b30SHugh Dickins 		return addr;
2155c01d5b30SHugh Dickins 	/*
2156c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2157c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
215899158997SKirill A. Shutemov 	 * But if caller specified an address hint and we allocated area there
215999158997SKirill A. Shutemov 	 * successfully, respect that as before.
2160c01d5b30SHugh Dickins 	 */
216199158997SKirill A. Shutemov 	if (uaddr == addr)
2162c01d5b30SHugh Dickins 		return addr;
2163c01d5b30SHugh Dickins 
2164c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2165c01d5b30SHugh Dickins 		struct super_block *sb;
2166c01d5b30SHugh Dickins 
2167c01d5b30SHugh Dickins 		if (file) {
2168c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2169c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2170c01d5b30SHugh Dickins 		} else {
2171c01d5b30SHugh Dickins 			/*
2172c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2173c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2174c01d5b30SHugh Dickins 			 */
2175c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2176c01d5b30SHugh Dickins 				return addr;
2177c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2178c01d5b30SHugh Dickins 		}
21793089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2180c01d5b30SHugh Dickins 			return addr;
2181c01d5b30SHugh Dickins 	}
2182c01d5b30SHugh Dickins 
2183c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2184c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2185c01d5b30SHugh Dickins 		return addr;
2186c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2187c01d5b30SHugh Dickins 		return addr;
2188c01d5b30SHugh Dickins 
2189c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2190c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2191c01d5b30SHugh Dickins 		return addr;
2192c01d5b30SHugh Dickins 	if (inflated_len < len)
2193c01d5b30SHugh Dickins 		return addr;
2194c01d5b30SHugh Dickins 
219599158997SKirill A. Shutemov 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2196c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2197c01d5b30SHugh Dickins 		return addr;
2198c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2199c01d5b30SHugh Dickins 		return addr;
2200c01d5b30SHugh Dickins 
2201c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2202c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2203c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2204c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2205c01d5b30SHugh Dickins 
2206c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2207c01d5b30SHugh Dickins 		return addr;
2208c01d5b30SHugh Dickins 	return inflated_addr;
2209c01d5b30SHugh Dickins }
2210c01d5b30SHugh Dickins 
22111da177e4SLinus Torvalds #ifdef CONFIG_NUMA
221241ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
22131da177e4SLinus Torvalds {
2214496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
221541ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
22161da177e4SLinus Torvalds }
22171da177e4SLinus Torvalds 
2218d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2219d8dc74f2SAdrian Bunk 					  unsigned long addr)
22201da177e4SLinus Torvalds {
2221496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
222241ffe5d5SHugh Dickins 	pgoff_t index;
22231da177e4SLinus Torvalds 
222441ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
222541ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
22261da177e4SLinus Torvalds }
22271da177e4SLinus Torvalds #endif
22281da177e4SLinus Torvalds 
2229d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
22301da177e4SLinus Torvalds {
2231496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
22321da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
22331da177e4SLinus Torvalds 	int retval = -ENOMEM;
22341da177e4SLinus Torvalds 
2235ea0dfeb4SHugh Dickins 	/*
2236ea0dfeb4SHugh Dickins 	 * What serializes the accesses to info->flags?
2237ea0dfeb4SHugh Dickins 	 * ipc_lock_object() when called from shmctl_do_lock(),
2238ea0dfeb4SHugh Dickins 	 * no serialization needed when called from shm_destroy().
2239ea0dfeb4SHugh Dickins 	 */
22401da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
2241d7c9e99aSAlexey Gladkov 		if (!user_shm_lock(inode->i_size, ucounts))
22421da177e4SLinus Torvalds 			goto out_nomem;
22431da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
224489e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
22451da177e4SLinus Torvalds 	}
2246d7c9e99aSAlexey Gladkov 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2247d7c9e99aSAlexey Gladkov 		user_shm_unlock(inode->i_size, ucounts);
22481da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
224989e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
22501da177e4SLinus Torvalds 	}
22511da177e4SLinus Torvalds 	retval = 0;
225289e004eaSLee Schermerhorn 
22531da177e4SLinus Torvalds out_nomem:
22541da177e4SLinus Torvalds 	return retval;
22551da177e4SLinus Torvalds }
22561da177e4SLinus Torvalds 
22579b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
22581da177e4SLinus Torvalds {
2259ab3948f5SJoel Fernandes (Google) 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
226022247efdSPeter Xu 	int ret;
2261ab3948f5SJoel Fernandes (Google) 
226222247efdSPeter Xu 	ret = seal_check_future_write(info->seals, vma);
226322247efdSPeter Xu 	if (ret)
226422247efdSPeter Xu 		return ret;
2265ab3948f5SJoel Fernandes (Google) 
226651b0bff2SCatalin Marinas 	/* arm64 - allow memory tagging on RAM-based files */
226751b0bff2SCatalin Marinas 	vma->vm_flags |= VM_MTE_ALLOWED;
226851b0bff2SCatalin Marinas 
22691da177e4SLinus Torvalds 	file_accessed(file);
22701da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
2271396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2272f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2273f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
2274f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
2275f3f0e1d2SKirill A. Shutemov 	}
22761da177e4SLinus Torvalds 	return 0;
22771da177e4SLinus Torvalds }
22781da177e4SLinus Torvalds 
2279454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
228009208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
22811da177e4SLinus Torvalds {
22821da177e4SLinus Torvalds 	struct inode *inode;
22831da177e4SLinus Torvalds 	struct shmem_inode_info *info;
22841da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2285e809d5f0SChris Down 	ino_t ino;
22861da177e4SLinus Torvalds 
2287e809d5f0SChris Down 	if (shmem_reserve_inode(sb, &ino))
22881da177e4SLinus Torvalds 		return NULL;
22891da177e4SLinus Torvalds 
22901da177e4SLinus Torvalds 	inode = new_inode(sb);
22911da177e4SLinus Torvalds 	if (inode) {
2292e809d5f0SChris Down 		inode->i_ino = ino;
229321cb47beSChristian Brauner 		inode_init_owner(&init_user_ns, inode, dir, mode);
22941da177e4SLinus Torvalds 		inode->i_blocks = 0;
2295078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
229646c9a946SArnd Bergmann 		inode->i_generation = prandom_u32();
22971da177e4SLinus Torvalds 		info = SHMEM_I(inode);
22981da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
22991da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
2300af53d3e9SHugh Dickins 		atomic_set(&info->stop_eviction, 0);
230140e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
23020b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2303779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
23041da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
230538f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
230672c04902SAl Viro 		cache_no_acl(inode);
2307ff36da69SMatthew Wilcox (Oracle) 		mapping_set_large_folios(inode->i_mapping);
23081da177e4SLinus Torvalds 
23091da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
23101da177e4SLinus Torvalds 		default:
231139f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
23121da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
23131da177e4SLinus Torvalds 			break;
23141da177e4SLinus Torvalds 		case S_IFREG:
231514fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
23161da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
23171da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
231871fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
231971fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
23201da177e4SLinus Torvalds 			break;
23211da177e4SLinus Torvalds 		case S_IFDIR:
2322d8c76e6fSDave Hansen 			inc_nlink(inode);
23231da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
23241da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
23251da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
23261da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
23271da177e4SLinus Torvalds 			break;
23281da177e4SLinus Torvalds 		case S_IFLNK:
23291da177e4SLinus Torvalds 			/*
23301da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
23311da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
23321da177e4SLinus Torvalds 			 */
233371fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
23341da177e4SLinus Torvalds 			break;
23351da177e4SLinus Torvalds 		}
2336b45d71fbSJoel Fernandes (Google) 
2337b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
23385b04c689SPavel Emelyanov 	} else
23395b04c689SPavel Emelyanov 		shmem_free_inode(sb);
23401da177e4SLinus Torvalds 	return inode;
23411da177e4SLinus Torvalds }
23421da177e4SLinus Torvalds 
23433460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
23443460f6e5SAxel Rasmussen int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
23454c27fe4cSMike Rapoport 			   pmd_t *dst_pmd,
23464c27fe4cSMike Rapoport 			   struct vm_area_struct *dst_vma,
23474c27fe4cSMike Rapoport 			   unsigned long dst_addr,
23484c27fe4cSMike Rapoport 			   unsigned long src_addr,
23498d103963SMike Rapoport 			   bool zeropage,
23504c27fe4cSMike Rapoport 			   struct page **pagep)
23514c27fe4cSMike Rapoport {
23524c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
23534c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
23544c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
23554c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
23564c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
23574c27fe4cSMike Rapoport 	void *page_kaddr;
23584c27fe4cSMike Rapoport 	struct page *page;
23594c27fe4cSMike Rapoport 	int ret;
23603460f6e5SAxel Rasmussen 	pgoff_t max_off;
23614c27fe4cSMike Rapoport 
23627ed9d238SAxel Rasmussen 	if (!shmem_inode_acct_block(inode, 1)) {
23637ed9d238SAxel Rasmussen 		/*
23647ed9d238SAxel Rasmussen 		 * We may have got a page, returned -ENOENT triggering a retry,
23657ed9d238SAxel Rasmussen 		 * and now we find ourselves with -ENOMEM. Release the page, to
23667ed9d238SAxel Rasmussen 		 * avoid a BUG_ON in our caller.
23677ed9d238SAxel Rasmussen 		 */
23687ed9d238SAxel Rasmussen 		if (unlikely(*pagep)) {
23697ed9d238SAxel Rasmussen 			put_page(*pagep);
23707ed9d238SAxel Rasmussen 			*pagep = NULL;
23717ed9d238SAxel Rasmussen 		}
23727d64ae3aSAxel Rasmussen 		return -ENOMEM;
23737ed9d238SAxel Rasmussen 	}
23744c27fe4cSMike Rapoport 
2375cb658a45SAndrea Arcangeli 	if (!*pagep) {
23767d64ae3aSAxel Rasmussen 		ret = -ENOMEM;
23774c27fe4cSMike Rapoport 		page = shmem_alloc_page(gfp, info, pgoff);
23784c27fe4cSMike Rapoport 		if (!page)
23790f079694SMike Rapoport 			goto out_unacct_blocks;
23804c27fe4cSMike Rapoport 
23813460f6e5SAxel Rasmussen 		if (!zeropage) {	/* COPY */
23824c27fe4cSMike Rapoport 			page_kaddr = kmap_atomic(page);
23838d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
23848d103963SMike Rapoport 					     (const void __user *)src_addr,
23854c27fe4cSMike Rapoport 					     PAGE_SIZE);
23864c27fe4cSMike Rapoport 			kunmap_atomic(page_kaddr);
23874c27fe4cSMike Rapoport 
2388c1e8d7c6SMichel Lespinasse 			/* fallback to copy_from_user outside mmap_lock */
23894c27fe4cSMike Rapoport 			if (unlikely(ret)) {
23904c27fe4cSMike Rapoport 				*pagep = page;
23917d64ae3aSAxel Rasmussen 				ret = -ENOENT;
23924c27fe4cSMike Rapoport 				/* don't free the page */
23937d64ae3aSAxel Rasmussen 				goto out_unacct_blocks;
23944c27fe4cSMike Rapoport 			}
23953460f6e5SAxel Rasmussen 		} else {		/* ZEROPAGE */
23968d103963SMike Rapoport 			clear_highpage(page);
23978d103963SMike Rapoport 		}
23984c27fe4cSMike Rapoport 	} else {
23994c27fe4cSMike Rapoport 		page = *pagep;
24004c27fe4cSMike Rapoport 		*pagep = NULL;
24014c27fe4cSMike Rapoport 	}
24024c27fe4cSMike Rapoport 
24033460f6e5SAxel Rasmussen 	VM_BUG_ON(PageLocked(page));
24043460f6e5SAxel Rasmussen 	VM_BUG_ON(PageSwapBacked(page));
24059cc90c66SAndrea Arcangeli 	__SetPageLocked(page);
24069cc90c66SAndrea Arcangeli 	__SetPageSwapBacked(page);
2407a425d358SAndrea Arcangeli 	__SetPageUptodate(page);
24089cc90c66SAndrea Arcangeli 
2409e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2410e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
24113460f6e5SAxel Rasmussen 	if (unlikely(pgoff >= max_off))
2412e2a50c1fSAndrea Arcangeli 		goto out_release;
2413e2a50c1fSAndrea Arcangeli 
24143fea5a49SJohannes Weiner 	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
24153fea5a49SJohannes Weiner 				      gfp & GFP_RECLAIM_MASK, dst_mm);
24164c27fe4cSMike Rapoport 	if (ret)
24174c27fe4cSMike Rapoport 		goto out_release;
24184c27fe4cSMike Rapoport 
24197d64ae3aSAxel Rasmussen 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
24207d64ae3aSAxel Rasmussen 				       page, true, false);
24217d64ae3aSAxel Rasmussen 	if (ret)
24227d64ae3aSAxel Rasmussen 		goto out_delete_from_cache;
24234c27fe4cSMike Rapoport 
242494b7cc01SYang Shi 	spin_lock_irq(&info->lock);
24254c27fe4cSMike Rapoport 	info->alloced++;
24264c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
24274c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
242894b7cc01SYang Shi 	spin_unlock_irq(&info->lock);
24294c27fe4cSMike Rapoport 
2430e2a50c1fSAndrea Arcangeli 	unlock_page(page);
24317d64ae3aSAxel Rasmussen 	return 0;
24327d64ae3aSAxel Rasmussen out_delete_from_cache:
2433e2a50c1fSAndrea Arcangeli 	delete_from_page_cache(page);
24344c27fe4cSMike Rapoport out_release:
24359cc90c66SAndrea Arcangeli 	unlock_page(page);
24364c27fe4cSMike Rapoport 	put_page(page);
24374c27fe4cSMike Rapoport out_unacct_blocks:
24380f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
24397d64ae3aSAxel Rasmussen 	return ret;
24404c27fe4cSMike Rapoport }
24413460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
24428d103963SMike Rapoport 
24431da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
244492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
244569f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
24461da177e4SLinus Torvalds 
24476d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
24486d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
24496d9d88d0SJarkko Sakkinen #else
24506d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
24516d9d88d0SJarkko Sakkinen #endif
24526d9d88d0SJarkko Sakkinen 
24531da177e4SLinus Torvalds static int
2454800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
2455800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
2456800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
24571da177e4SLinus Torvalds {
2458800d15a5SNick Piggin 	struct inode *inode = mapping->host;
245940e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
246009cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
246140e041a2SDavid Herrmann 
24629608703eSJan Kara 	/* i_rwsem is held by caller */
2463ab3948f5SJoel Fernandes (Google) 	if (unlikely(info->seals & (F_SEAL_GROW |
2464ab3948f5SJoel Fernandes (Google) 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2465ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
246640e041a2SDavid Herrmann 			return -EPERM;
246740e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
246840e041a2SDavid Herrmann 			return -EPERM;
246940e041a2SDavid Herrmann 	}
247040e041a2SDavid Herrmann 
2471d0b51bfbSLinus Torvalds 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2472800d15a5SNick Piggin }
2473800d15a5SNick Piggin 
2474800d15a5SNick Piggin static int
2475800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2476800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2477800d15a5SNick Piggin 			struct page *page, void *fsdata)
2478800d15a5SNick Piggin {
2479800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2480800d15a5SNick Piggin 
2481800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2482800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2483800d15a5SNick Piggin 
2484ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2485800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2486800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2487800d8c63SKirill A. Shutemov 			int i;
2488800d8c63SKirill A. Shutemov 
2489800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2490800d8c63SKirill A. Shutemov 				if (head + i == page)
2491800d8c63SKirill A. Shutemov 					continue;
2492800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2493800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2494800d8c63SKirill A. Shutemov 			}
2495800d8c63SKirill A. Shutemov 		}
249609cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
249709cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2498ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
249909cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2500ec9516fbSHugh Dickins 		}
2501800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2502ec9516fbSHugh Dickins 	}
2503d3602444SHugh Dickins 	set_page_dirty(page);
25046746aff7SWu Fengguang 	unlock_page(page);
250509cbfeafSKirill A. Shutemov 	put_page(page);
2506d3602444SHugh Dickins 
2507800d15a5SNick Piggin 	return copied;
25081da177e4SLinus Torvalds }
25091da177e4SLinus Torvalds 
25102ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
25111da177e4SLinus Torvalds {
25126e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
25136e58e79dSAl Viro 	struct inode *inode = file_inode(file);
25141da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
251541ffe5d5SHugh Dickins 	pgoff_t index;
251641ffe5d5SHugh Dickins 	unsigned long offset;
2517a0ee5ec5SHugh Dickins 	enum sgp_type sgp = SGP_READ;
2518f7c1d074SGeert Uytterhoeven 	int error = 0;
2519cb66a7a1SAl Viro 	ssize_t retval = 0;
25206e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2521a0ee5ec5SHugh Dickins 
2522a0ee5ec5SHugh Dickins 	/*
2523a0ee5ec5SHugh Dickins 	 * Might this read be for a stacking filesystem?  Then when reading
2524a0ee5ec5SHugh Dickins 	 * holes of a sparse file, we actually need to allocate those pages,
2525a0ee5ec5SHugh Dickins 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2526a0ee5ec5SHugh Dickins 	 */
2527777eda2cSAl Viro 	if (!iter_is_iovec(to))
252875edd345SHugh Dickins 		sgp = SGP_CACHE;
25291da177e4SLinus Torvalds 
253009cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
253109cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
25321da177e4SLinus Torvalds 
25331da177e4SLinus Torvalds 	for (;;) {
25341da177e4SLinus Torvalds 		struct page *page = NULL;
253541ffe5d5SHugh Dickins 		pgoff_t end_index;
253641ffe5d5SHugh Dickins 		unsigned long nr, ret;
25371da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
25381da177e4SLinus Torvalds 
253909cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25401da177e4SLinus Torvalds 		if (index > end_index)
25411da177e4SLinus Torvalds 			break;
25421da177e4SLinus Torvalds 		if (index == end_index) {
254309cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25441da177e4SLinus Torvalds 			if (nr <= offset)
25451da177e4SLinus Torvalds 				break;
25461da177e4SLinus Torvalds 		}
25471da177e4SLinus Torvalds 
25489e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, index, &page, sgp);
25496e58e79dSAl Viro 		if (error) {
25506e58e79dSAl Viro 			if (error == -EINVAL)
25516e58e79dSAl Viro 				error = 0;
25521da177e4SLinus Torvalds 			break;
25531da177e4SLinus Torvalds 		}
255475edd345SHugh Dickins 		if (page) {
255575edd345SHugh Dickins 			if (sgp == SGP_CACHE)
255675edd345SHugh Dickins 				set_page_dirty(page);
2557d3602444SHugh Dickins 			unlock_page(page);
255875edd345SHugh Dickins 		}
25591da177e4SLinus Torvalds 
25601da177e4SLinus Torvalds 		/*
25611da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
25629608703eSJan Kara 		 * are called without i_rwsem protection against truncate
25631da177e4SLinus Torvalds 		 */
256409cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
25651da177e4SLinus Torvalds 		i_size = i_size_read(inode);
256609cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25671da177e4SLinus Torvalds 		if (index == end_index) {
256809cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25691da177e4SLinus Torvalds 			if (nr <= offset) {
25701da177e4SLinus Torvalds 				if (page)
257109cbfeafSKirill A. Shutemov 					put_page(page);
25721da177e4SLinus Torvalds 				break;
25731da177e4SLinus Torvalds 			}
25741da177e4SLinus Torvalds 		}
25751da177e4SLinus Torvalds 		nr -= offset;
25761da177e4SLinus Torvalds 
25771da177e4SLinus Torvalds 		if (page) {
25781da177e4SLinus Torvalds 			/*
25791da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
25801da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
25811da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
25821da177e4SLinus Torvalds 			 */
25831da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
25841da177e4SLinus Torvalds 				flush_dcache_page(page);
25851da177e4SLinus Torvalds 			/*
25861da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
25871da177e4SLinus Torvalds 			 */
25881da177e4SLinus Torvalds 			if (!offset)
25891da177e4SLinus Torvalds 				mark_page_accessed(page);
2590b5810039SNick Piggin 		} else {
25911da177e4SLinus Torvalds 			page = ZERO_PAGE(0);
259209cbfeafSKirill A. Shutemov 			get_page(page);
2593b5810039SNick Piggin 		}
25941da177e4SLinus Torvalds 
25951da177e4SLinus Torvalds 		/*
25961da177e4SLinus Torvalds 		 * Ok, we have the page, and it's up-to-date, so
25971da177e4SLinus Torvalds 		 * now we can copy it to user space...
25981da177e4SLinus Torvalds 		 */
25992ba5bbedSAl Viro 		ret = copy_page_to_iter(page, offset, nr, to);
26006e58e79dSAl Viro 		retval += ret;
26011da177e4SLinus Torvalds 		offset += ret;
260209cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
260309cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
26041da177e4SLinus Torvalds 
260509cbfeafSKirill A. Shutemov 		put_page(page);
26062ba5bbedSAl Viro 		if (!iov_iter_count(to))
26071da177e4SLinus Torvalds 			break;
26086e58e79dSAl Viro 		if (ret < nr) {
26096e58e79dSAl Viro 			error = -EFAULT;
26106e58e79dSAl Viro 			break;
26116e58e79dSAl Viro 		}
26121da177e4SLinus Torvalds 		cond_resched();
26131da177e4SLinus Torvalds 	}
26141da177e4SLinus Torvalds 
261509cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
26166e58e79dSAl Viro 	file_accessed(file);
26176e58e79dSAl Viro 	return retval ? retval : error;
26181da177e4SLinus Torvalds }
26191da177e4SLinus Torvalds 
2620965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2621220f2ac9SHugh Dickins {
2622220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2623220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2624220f2ac9SHugh Dickins 
2625965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2626965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2627220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
262841139aa4SMatthew Wilcox (Oracle) 	if (offset < 0)
262941139aa4SMatthew Wilcox (Oracle) 		return -ENXIO;
263041139aa4SMatthew Wilcox (Oracle) 
26315955102cSAl Viro 	inode_lock(inode);
26329608703eSJan Kara 	/* We're holding i_rwsem so we can access i_size directly */
263341139aa4SMatthew Wilcox (Oracle) 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2634387aae6fSHugh Dickins 	if (offset >= 0)
263546a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
26365955102cSAl Viro 	inode_unlock(inode);
2637220f2ac9SHugh Dickins 	return offset;
2638220f2ac9SHugh Dickins }
2639220f2ac9SHugh Dickins 
264083e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
264183e4fa9cSHugh Dickins 							 loff_t len)
264283e4fa9cSHugh Dickins {
2643496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2644e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
264540e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
26461aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2647d144bf62SHugh Dickins 	pgoff_t start, index, end, undo_fallocend;
2648e2d12e22SHugh Dickins 	int error;
264983e4fa9cSHugh Dickins 
265013ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
265113ace4d0SHugh Dickins 		return -EOPNOTSUPP;
265213ace4d0SHugh Dickins 
26535955102cSAl Viro 	inode_lock(inode);
265483e4fa9cSHugh Dickins 
265583e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
265683e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
265783e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
265883e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
26598e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
266083e4fa9cSHugh Dickins 
26619608703eSJan Kara 		/* protected by i_rwsem */
2662ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
266340e041a2SDavid Herrmann 			error = -EPERM;
266440e041a2SDavid Herrmann 			goto out;
266540e041a2SDavid Herrmann 		}
266640e041a2SDavid Herrmann 
26678e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2668aa71ecd8SChen Jun 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2669f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2670f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2671f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2672f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2673f00cdc6dSHugh Dickins 
267483e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
267583e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
267683e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
267783e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
267883e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
26798e205f77SHugh Dickins 
26808e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
26818e205f77SHugh Dickins 		inode->i_private = NULL;
26828e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
26832055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
26848e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
268583e4fa9cSHugh Dickins 		error = 0;
26868e205f77SHugh Dickins 		goto out;
268783e4fa9cSHugh Dickins 	}
268883e4fa9cSHugh Dickins 
2689e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2690e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2691e2d12e22SHugh Dickins 	if (error)
2692e2d12e22SHugh Dickins 		goto out;
2693e2d12e22SHugh Dickins 
269440e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
269540e041a2SDavid Herrmann 		error = -EPERM;
269640e041a2SDavid Herrmann 		goto out;
269740e041a2SDavid Herrmann 	}
269840e041a2SDavid Herrmann 
269909cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
270009cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2701e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2702e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2703e2d12e22SHugh Dickins 		error = -ENOSPC;
2704e2d12e22SHugh Dickins 		goto out;
2705e2d12e22SHugh Dickins 	}
2706e2d12e22SHugh Dickins 
27078e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
27081aac1400SHugh Dickins 	shmem_falloc.start = start;
27091aac1400SHugh Dickins 	shmem_falloc.next  = start;
27101aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
27111aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
27121aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27131aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
27141aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
27151aac1400SHugh Dickins 
2716d144bf62SHugh Dickins 	/*
2717d144bf62SHugh Dickins 	 * info->fallocend is only relevant when huge pages might be
2718d144bf62SHugh Dickins 	 * involved: to prevent split_huge_page() freeing fallocated
2719d144bf62SHugh Dickins 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
2720d144bf62SHugh Dickins 	 */
2721d144bf62SHugh Dickins 	undo_fallocend = info->fallocend;
2722d144bf62SHugh Dickins 	if (info->fallocend < end)
2723d144bf62SHugh Dickins 		info->fallocend = end;
2724d144bf62SHugh Dickins 
2725050dcb5cSHugh Dickins 	for (index = start; index < end; ) {
2726e2d12e22SHugh Dickins 		struct page *page;
2727e2d12e22SHugh Dickins 
2728e2d12e22SHugh Dickins 		/*
2729e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2730e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2731e2d12e22SHugh Dickins 		 */
2732e2d12e22SHugh Dickins 		if (signal_pending(current))
2733e2d12e22SHugh Dickins 			error = -EINTR;
27341aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
27351aac1400SHugh Dickins 			error = -ENOMEM;
2736e2d12e22SHugh Dickins 		else
27379e18eb29SAndres Lagar-Cavilla 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2738e2d12e22SHugh Dickins 		if (error) {
2739d144bf62SHugh Dickins 			info->fallocend = undo_fallocend;
27401635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
27417f556567SHugh Dickins 			if (index > start) {
27421635f6a7SHugh Dickins 				shmem_undo_range(inode,
274309cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2744b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
27457f556567SHugh Dickins 			}
27461aac1400SHugh Dickins 			goto undone;
2747e2d12e22SHugh Dickins 		}
2748e2d12e22SHugh Dickins 
2749050dcb5cSHugh Dickins 		index++;
2750050dcb5cSHugh Dickins 		/*
2751050dcb5cSHugh Dickins 		 * Here is a more important optimization than it appears:
2752050dcb5cSHugh Dickins 		 * a second SGP_FALLOC on the same huge page will clear it,
2753050dcb5cSHugh Dickins 		 * making it PageUptodate and un-undoable if we fail later.
2754050dcb5cSHugh Dickins 		 */
2755050dcb5cSHugh Dickins 		if (PageTransCompound(page)) {
2756050dcb5cSHugh Dickins 			index = round_up(index, HPAGE_PMD_NR);
2757050dcb5cSHugh Dickins 			/* Beware 32-bit wraparound */
2758050dcb5cSHugh Dickins 			if (!index)
2759050dcb5cSHugh Dickins 				index--;
2760050dcb5cSHugh Dickins 		}
2761050dcb5cSHugh Dickins 
2762e2d12e22SHugh Dickins 		/*
27631aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
27641aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
27651aac1400SHugh Dickins 		 */
27661aac1400SHugh Dickins 		if (!PageUptodate(page))
2767050dcb5cSHugh Dickins 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
2768050dcb5cSHugh Dickins 		shmem_falloc.next = index;
27691aac1400SHugh Dickins 
27701aac1400SHugh Dickins 		/*
27711635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
27721635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
27731635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
2774e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
2775e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2776e2d12e22SHugh Dickins 		 */
2777e2d12e22SHugh Dickins 		set_page_dirty(page);
2778e2d12e22SHugh Dickins 		unlock_page(page);
277909cbfeafSKirill A. Shutemov 		put_page(page);
2780e2d12e22SHugh Dickins 		cond_resched();
2781e2d12e22SHugh Dickins 	}
2782e2d12e22SHugh Dickins 
2783e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2784e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
2785078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
27861aac1400SHugh Dickins undone:
27871aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27881aac1400SHugh Dickins 	inode->i_private = NULL;
27891aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2790e2d12e22SHugh Dickins out:
27915955102cSAl Viro 	inode_unlock(inode);
279283e4fa9cSHugh Dickins 	return error;
279383e4fa9cSHugh Dickins }
279483e4fa9cSHugh Dickins 
2795726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
27961da177e4SLinus Torvalds {
2797726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
27981da177e4SLinus Torvalds 
27991da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
280009cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
28011da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
28020edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
28031da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
280441ffe5d5SHugh Dickins 		buf->f_bavail =
280541ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
280641ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
28070edd73b3SHugh Dickins 	}
28080edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
28091da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
28101da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
28111da177e4SLinus Torvalds 	}
28121da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
281359cda49eSAmir Goldstein 
281459cda49eSAmir Goldstein 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
281559cda49eSAmir Goldstein 
28161da177e4SLinus Torvalds 	return 0;
28171da177e4SLinus Torvalds }
28181da177e4SLinus Torvalds 
28191da177e4SLinus Torvalds /*
28201da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
28211da177e4SLinus Torvalds  */
28221da177e4SLinus Torvalds static int
2823549c7297SChristian Brauner shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2824549c7297SChristian Brauner 	    struct dentry *dentry, umode_t mode, dev_t dev)
28251da177e4SLinus Torvalds {
28260b0a0806SHugh Dickins 	struct inode *inode;
28271da177e4SLinus Torvalds 	int error = -ENOSPC;
28281da177e4SLinus Torvalds 
2829454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
28301da177e4SLinus Torvalds 	if (inode) {
2831feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2832feda821eSChristoph Hellwig 		if (error)
2833feda821eSChristoph Hellwig 			goto out_iput;
28342a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
28359d8f13baSMimi Zohar 						     &dentry->d_name,
28366d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2837feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2838feda821eSChristoph Hellwig 			goto out_iput;
283937ec43cdSMimi Zohar 
2840718deb6bSAl Viro 		error = 0;
28411da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2842078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
28431da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
28441da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
28451da177e4SLinus Torvalds 	}
28461da177e4SLinus Torvalds 	return error;
2847feda821eSChristoph Hellwig out_iput:
2848feda821eSChristoph Hellwig 	iput(inode);
2849feda821eSChristoph Hellwig 	return error;
28501da177e4SLinus Torvalds }
28511da177e4SLinus Torvalds 
285260545d0dSAl Viro static int
2853549c7297SChristian Brauner shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2854549c7297SChristian Brauner 	      struct dentry *dentry, umode_t mode)
285560545d0dSAl Viro {
285660545d0dSAl Viro 	struct inode *inode;
285760545d0dSAl Viro 	int error = -ENOSPC;
285860545d0dSAl Viro 
285960545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
286060545d0dSAl Viro 	if (inode) {
286160545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
286260545d0dSAl Viro 						     NULL,
286360545d0dSAl Viro 						     shmem_initxattrs, NULL);
2864feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2865feda821eSChristoph Hellwig 			goto out_iput;
2866feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2867feda821eSChristoph Hellwig 		if (error)
2868feda821eSChristoph Hellwig 			goto out_iput;
286960545d0dSAl Viro 		d_tmpfile(dentry, inode);
287060545d0dSAl Viro 	}
287160545d0dSAl Viro 	return error;
2872feda821eSChristoph Hellwig out_iput:
2873feda821eSChristoph Hellwig 	iput(inode);
2874feda821eSChristoph Hellwig 	return error;
287560545d0dSAl Viro }
287660545d0dSAl Viro 
2877549c7297SChristian Brauner static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2878549c7297SChristian Brauner 		       struct dentry *dentry, umode_t mode)
28791da177e4SLinus Torvalds {
28801da177e4SLinus Torvalds 	int error;
28811da177e4SLinus Torvalds 
2882549c7297SChristian Brauner 	if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2883549c7297SChristian Brauner 				 mode | S_IFDIR, 0)))
28841da177e4SLinus Torvalds 		return error;
2885d8c76e6fSDave Hansen 	inc_nlink(dir);
28861da177e4SLinus Torvalds 	return 0;
28871da177e4SLinus Torvalds }
28881da177e4SLinus Torvalds 
2889549c7297SChristian Brauner static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2890549c7297SChristian Brauner 			struct dentry *dentry, umode_t mode, bool excl)
28911da177e4SLinus Torvalds {
2892549c7297SChristian Brauner 	return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
28931da177e4SLinus Torvalds }
28941da177e4SLinus Torvalds 
28951da177e4SLinus Torvalds /*
28961da177e4SLinus Torvalds  * Link a file..
28971da177e4SLinus Torvalds  */
28981da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
28991da177e4SLinus Torvalds {
290075c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
290129b00e60SDarrick J. Wong 	int ret = 0;
29021da177e4SLinus Torvalds 
29031da177e4SLinus Torvalds 	/*
29041da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
29051da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
29061da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
29071062af92SDarrick J. Wong 	 * But if an O_TMPFILE file is linked into the tmpfs, the
29081062af92SDarrick J. Wong 	 * first link must skip that, to get the accounting right.
29091da177e4SLinus Torvalds 	 */
29101062af92SDarrick J. Wong 	if (inode->i_nlink) {
2911e809d5f0SChris Down 		ret = shmem_reserve_inode(inode->i_sb, NULL);
29125b04c689SPavel Emelyanov 		if (ret)
29135b04c689SPavel Emelyanov 			goto out;
29141062af92SDarrick J. Wong 	}
29151da177e4SLinus Torvalds 
29161da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
2917078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2918d8c76e6fSDave Hansen 	inc_nlink(inode);
29197de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
29201da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
29211da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
29225b04c689SPavel Emelyanov out:
29235b04c689SPavel Emelyanov 	return ret;
29241da177e4SLinus Torvalds }
29251da177e4SLinus Torvalds 
29261da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
29271da177e4SLinus Torvalds {
292875c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
29291da177e4SLinus Torvalds 
29305b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
29315b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
29321da177e4SLinus Torvalds 
29331da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
2934078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
29359a53c3a7SDave Hansen 	drop_nlink(inode);
29361da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
29371da177e4SLinus Torvalds 	return 0;
29381da177e4SLinus Torvalds }
29391da177e4SLinus Torvalds 
29401da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
29411da177e4SLinus Torvalds {
29421da177e4SLinus Torvalds 	if (!simple_empty(dentry))
29431da177e4SLinus Torvalds 		return -ENOTEMPTY;
29441da177e4SLinus Torvalds 
294575c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
29469a53c3a7SDave Hansen 	drop_nlink(dir);
29471da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
29481da177e4SLinus Torvalds }
29491da177e4SLinus Torvalds 
2950549c7297SChristian Brauner static int shmem_whiteout(struct user_namespace *mnt_userns,
2951549c7297SChristian Brauner 			  struct inode *old_dir, struct dentry *old_dentry)
295246fdb794SMiklos Szeredi {
295346fdb794SMiklos Szeredi 	struct dentry *whiteout;
295446fdb794SMiklos Szeredi 	int error;
295546fdb794SMiklos Szeredi 
295646fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
295746fdb794SMiklos Szeredi 	if (!whiteout)
295846fdb794SMiklos Szeredi 		return -ENOMEM;
295946fdb794SMiklos Szeredi 
2960549c7297SChristian Brauner 	error = shmem_mknod(&init_user_ns, old_dir, whiteout,
296146fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
296246fdb794SMiklos Szeredi 	dput(whiteout);
296346fdb794SMiklos Szeredi 	if (error)
296446fdb794SMiklos Szeredi 		return error;
296546fdb794SMiklos Szeredi 
296646fdb794SMiklos Szeredi 	/*
296746fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
296846fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
296946fdb794SMiklos Szeredi 	 *
297046fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
297146fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
297246fdb794SMiklos Szeredi 	 */
297346fdb794SMiklos Szeredi 	d_rehash(whiteout);
297446fdb794SMiklos Szeredi 	return 0;
297546fdb794SMiklos Szeredi }
297646fdb794SMiklos Szeredi 
29771da177e4SLinus Torvalds /*
29781da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
29791da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
29801da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
29811da177e4SLinus Torvalds  * gets overwritten.
29821da177e4SLinus Torvalds  */
2983549c7297SChristian Brauner static int shmem_rename2(struct user_namespace *mnt_userns,
2984549c7297SChristian Brauner 			 struct inode *old_dir, struct dentry *old_dentry,
2985549c7297SChristian Brauner 			 struct inode *new_dir, struct dentry *new_dentry,
2986549c7297SChristian Brauner 			 unsigned int flags)
29871da177e4SLinus Torvalds {
298875c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
29891da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
29901da177e4SLinus Torvalds 
299146fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
29923b69ff51SMiklos Szeredi 		return -EINVAL;
29933b69ff51SMiklos Szeredi 
299437456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
29956429e463SLorenz Bauer 		return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
299637456771SMiklos Szeredi 
29971da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
29981da177e4SLinus Torvalds 		return -ENOTEMPTY;
29991da177e4SLinus Torvalds 
300046fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
300146fdb794SMiklos Szeredi 		int error;
300246fdb794SMiklos Szeredi 
3003549c7297SChristian Brauner 		error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
300446fdb794SMiklos Szeredi 		if (error)
300546fdb794SMiklos Szeredi 			return error;
300646fdb794SMiklos Szeredi 	}
300746fdb794SMiklos Szeredi 
300875c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
30091da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3010b928095bSMiklos Szeredi 		if (they_are_dirs) {
301175c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
30129a53c3a7SDave Hansen 			drop_nlink(old_dir);
3013b928095bSMiklos Szeredi 		}
30141da177e4SLinus Torvalds 	} else if (they_are_dirs) {
30159a53c3a7SDave Hansen 		drop_nlink(old_dir);
3016d8c76e6fSDave Hansen 		inc_nlink(new_dir);
30171da177e4SLinus Torvalds 	}
30181da177e4SLinus Torvalds 
30191da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
30201da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
30211da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
30221da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3023078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
30241da177e4SLinus Torvalds 	return 0;
30251da177e4SLinus Torvalds }
30261da177e4SLinus Torvalds 
3027549c7297SChristian Brauner static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3028549c7297SChristian Brauner 			 struct dentry *dentry, const char *symname)
30291da177e4SLinus Torvalds {
30301da177e4SLinus Torvalds 	int error;
30311da177e4SLinus Torvalds 	int len;
30321da177e4SLinus Torvalds 	struct inode *inode;
30339276aad6SHugh Dickins 	struct page *page;
30341da177e4SLinus Torvalds 
30351da177e4SLinus Torvalds 	len = strlen(symname) + 1;
303609cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
30371da177e4SLinus Torvalds 		return -ENAMETOOLONG;
30381da177e4SLinus Torvalds 
30390825a6f9SJoe Perches 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
30400825a6f9SJoe Perches 				VM_NORESERVE);
30411da177e4SLinus Torvalds 	if (!inode)
30421da177e4SLinus Torvalds 		return -ENOSPC;
30431da177e4SLinus Torvalds 
30449d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
30456d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3046343c3d7fSMateusz Nosek 	if (error && error != -EOPNOTSUPP) {
3047570bc1c2SStephen Smalley 		iput(inode);
3048570bc1c2SStephen Smalley 		return error;
3049570bc1c2SStephen Smalley 	}
3050570bc1c2SStephen Smalley 
30511da177e4SLinus Torvalds 	inode->i_size = len-1;
305269f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
30533ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
30543ed47db3SAl Viro 		if (!inode->i_link) {
305569f07ec9SHugh Dickins 			iput(inode);
305669f07ec9SHugh Dickins 			return -ENOMEM;
305769f07ec9SHugh Dickins 		}
305869f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
30591da177e4SLinus Torvalds 	} else {
3060e8ecde25SAl Viro 		inode_nohighmem(inode);
30619e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
30621da177e4SLinus Torvalds 		if (error) {
30631da177e4SLinus Torvalds 			iput(inode);
30641da177e4SLinus Torvalds 			return error;
30651da177e4SLinus Torvalds 		}
306614fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
30671da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
306821fc61c7SAl Viro 		memcpy(page_address(page), symname, len);
3069ec9516fbSHugh Dickins 		SetPageUptodate(page);
30701da177e4SLinus Torvalds 		set_page_dirty(page);
30716746aff7SWu Fengguang 		unlock_page(page);
307209cbfeafSKirill A. Shutemov 		put_page(page);
30731da177e4SLinus Torvalds 	}
30741da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3075078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
30761da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
30771da177e4SLinus Torvalds 	dget(dentry);
30781da177e4SLinus Torvalds 	return 0;
30791da177e4SLinus Torvalds }
30801da177e4SLinus Torvalds 
3081fceef393SAl Viro static void shmem_put_link(void *arg)
3082fceef393SAl Viro {
3083fceef393SAl Viro 	mark_page_accessed(arg);
3084fceef393SAl Viro 	put_page(arg);
3085fceef393SAl Viro }
3086fceef393SAl Viro 
30876b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3088fceef393SAl Viro 				  struct inode *inode,
3089fceef393SAl Viro 				  struct delayed_call *done)
30901da177e4SLinus Torvalds {
30911da177e4SLinus Torvalds 	struct page *page = NULL;
30926b255391SAl Viro 	int error;
30936a6c9904SAl Viro 	if (!dentry) {
30946a6c9904SAl Viro 		page = find_get_page(inode->i_mapping, 0);
30956a6c9904SAl Viro 		if (!page)
30966b255391SAl Viro 			return ERR_PTR(-ECHILD);
3097d0b51bfbSLinus Torvalds 		if (!PageUptodate(page)) {
30986a6c9904SAl Viro 			put_page(page);
30996a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
31006a6c9904SAl Viro 		}
31016a6c9904SAl Viro 	} else {
31029e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3103680baacbSAl Viro 		if (error)
3104680baacbSAl Viro 			return ERR_PTR(error);
3105d3602444SHugh Dickins 		unlock_page(page);
31061da177e4SLinus Torvalds 	}
3107fceef393SAl Viro 	set_delayed_call(done, shmem_put_link, page);
310821fc61c7SAl Viro 	return page_address(page);
31091da177e4SLinus Torvalds }
31101da177e4SLinus Torvalds 
3111b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3112b09e0fa4SEric Paris /*
3113b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3114b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3115b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3116b09e0fa4SEric Paris  * filesystem level, though.
3117b09e0fa4SEric Paris  */
3118b09e0fa4SEric Paris 
31196d9d88d0SJarkko Sakkinen /*
31206d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
31216d9d88d0SJarkko Sakkinen  */
31226d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
31236d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
31246d9d88d0SJarkko Sakkinen 			    void *fs_info)
31256d9d88d0SJarkko Sakkinen {
31266d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
31276d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
312838f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
31296d9d88d0SJarkko Sakkinen 	size_t len;
31306d9d88d0SJarkko Sakkinen 
31316d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
313238f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
31336d9d88d0SJarkko Sakkinen 		if (!new_xattr)
31346d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31356d9d88d0SJarkko Sakkinen 
31366d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
31376d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
31386d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
31396d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
31403bef735aSChengguang Xu 			kvfree(new_xattr);
31416d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31426d9d88d0SJarkko Sakkinen 		}
31436d9d88d0SJarkko Sakkinen 
31446d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
31456d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
31466d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
31476d9d88d0SJarkko Sakkinen 		       xattr->name, len);
31486d9d88d0SJarkko Sakkinen 
314938f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
31506d9d88d0SJarkko Sakkinen 	}
31516d9d88d0SJarkko Sakkinen 
31526d9d88d0SJarkko Sakkinen 	return 0;
31536d9d88d0SJarkko Sakkinen }
31546d9d88d0SJarkko Sakkinen 
3155aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3156b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3157b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3158aa7c5241SAndreas Gruenbacher {
3159b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3160aa7c5241SAndreas Gruenbacher 
3161aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3162aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3163aa7c5241SAndreas Gruenbacher }
3164aa7c5241SAndreas Gruenbacher 
3165aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3166e65ce2a5SChristian Brauner 				   struct user_namespace *mnt_userns,
316759301226SAl Viro 				   struct dentry *unused, struct inode *inode,
316859301226SAl Viro 				   const char *name, const void *value,
316959301226SAl Viro 				   size_t size, int flags)
3170aa7c5241SAndreas Gruenbacher {
317159301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3172aa7c5241SAndreas Gruenbacher 
3173aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3174a46a2295SDaniel Xu 	return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3175aa7c5241SAndreas Gruenbacher }
3176aa7c5241SAndreas Gruenbacher 
3177aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3178aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3179aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3180aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3181aa7c5241SAndreas Gruenbacher };
3182aa7c5241SAndreas Gruenbacher 
3183aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3184aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3185aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3186aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3187aa7c5241SAndreas Gruenbacher };
3188aa7c5241SAndreas Gruenbacher 
3189b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3190b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3191feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3192feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3193b09e0fa4SEric Paris #endif
3194aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3195aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3196b09e0fa4SEric Paris 	NULL
3197b09e0fa4SEric Paris };
3198b09e0fa4SEric Paris 
3199b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3200b09e0fa4SEric Paris {
320175c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3202786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3203b09e0fa4SEric Paris }
3204b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3205b09e0fa4SEric Paris 
320669f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
32076b255391SAl Viro 	.get_link	= simple_get_link,
3208b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3209b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3210b09e0fa4SEric Paris #endif
32111da177e4SLinus Torvalds };
32121da177e4SLinus Torvalds 
321392e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
32146b255391SAl Viro 	.get_link	= shmem_get_link,
3215b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3216b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
321739f0247dSAndreas Gruenbacher #endif
3218b09e0fa4SEric Paris };
321939f0247dSAndreas Gruenbacher 
322091828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
322191828a40SDavid M. Grimes {
322291828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
322391828a40SDavid M. Grimes }
322491828a40SDavid M. Grimes 
322591828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
322691828a40SDavid M. Grimes {
322791828a40SDavid M. Grimes 	__u32 *fh = vfh;
322891828a40SDavid M. Grimes 	__u64 inum = fh[2];
322991828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
323091828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
323191828a40SDavid M. Grimes }
323291828a40SDavid M. Grimes 
323312ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
323412ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
323512ba780dSAmir Goldstein {
323612ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
323712ba780dSAmir Goldstein 
323812ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
323912ba780dSAmir Goldstein }
324012ba780dSAmir Goldstein 
324112ba780dSAmir Goldstein 
3242480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3243480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
324491828a40SDavid M. Grimes {
324591828a40SDavid M. Grimes 	struct inode *inode;
3246480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
324735c2a7f4SHugh Dickins 	u64 inum;
324891828a40SDavid M. Grimes 
3249480b116cSChristoph Hellwig 	if (fh_len < 3)
3250480b116cSChristoph Hellwig 		return NULL;
3251480b116cSChristoph Hellwig 
325235c2a7f4SHugh Dickins 	inum = fid->raw[2];
325335c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
325435c2a7f4SHugh Dickins 
3255480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3256480b116cSChristoph Hellwig 			shmem_match, fid->raw);
325791828a40SDavid M. Grimes 	if (inode) {
325812ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
325991828a40SDavid M. Grimes 		iput(inode);
326091828a40SDavid M. Grimes 	}
326191828a40SDavid M. Grimes 
3262480b116cSChristoph Hellwig 	return dentry;
326391828a40SDavid M. Grimes }
326491828a40SDavid M. Grimes 
3265b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3266b0b0382bSAl Viro 				struct inode *parent)
326791828a40SDavid M. Grimes {
32685fe0c237SAneesh Kumar K.V 	if (*len < 3) {
32695fe0c237SAneesh Kumar K.V 		*len = 3;
327094e07a75SNamjae Jeon 		return FILEID_INVALID;
32715fe0c237SAneesh Kumar K.V 	}
327291828a40SDavid M. Grimes 
32731d3382cbSAl Viro 	if (inode_unhashed(inode)) {
327491828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
327591828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
327691828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
327791828a40SDavid M. Grimes 		 * to do it once
327891828a40SDavid M. Grimes 		 */
327991828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
328091828a40SDavid M. Grimes 		spin_lock(&lock);
32811d3382cbSAl Viro 		if (inode_unhashed(inode))
328291828a40SDavid M. Grimes 			__insert_inode_hash(inode,
328391828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
328491828a40SDavid M. Grimes 		spin_unlock(&lock);
328591828a40SDavid M. Grimes 	}
328691828a40SDavid M. Grimes 
328791828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
328891828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
328991828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
329091828a40SDavid M. Grimes 
329191828a40SDavid M. Grimes 	*len = 3;
329291828a40SDavid M. Grimes 	return 1;
329391828a40SDavid M. Grimes }
329491828a40SDavid M. Grimes 
329539655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
329691828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
329791828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3298480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
329991828a40SDavid M. Grimes };
330091828a40SDavid M. Grimes 
3301626c3920SAl Viro enum shmem_param {
3302626c3920SAl Viro 	Opt_gid,
3303626c3920SAl Viro 	Opt_huge,
3304626c3920SAl Viro 	Opt_mode,
3305626c3920SAl Viro 	Opt_mpol,
3306626c3920SAl Viro 	Opt_nr_blocks,
3307626c3920SAl Viro 	Opt_nr_inodes,
3308626c3920SAl Viro 	Opt_size,
3309626c3920SAl Viro 	Opt_uid,
3310ea3271f7SChris Down 	Opt_inode32,
3311ea3271f7SChris Down 	Opt_inode64,
3312626c3920SAl Viro };
33131da177e4SLinus Torvalds 
33145eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = {
33152710c957SAl Viro 	{"never",	SHMEM_HUGE_NEVER },
33162710c957SAl Viro 	{"always",	SHMEM_HUGE_ALWAYS },
33172710c957SAl Viro 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
33182710c957SAl Viro 	{"advise",	SHMEM_HUGE_ADVISE },
33192710c957SAl Viro 	{}
33202710c957SAl Viro };
33212710c957SAl Viro 
3322d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = {
3323626c3920SAl Viro 	fsparam_u32   ("gid",		Opt_gid),
33242710c957SAl Viro 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3325626c3920SAl Viro 	fsparam_u32oct("mode",		Opt_mode),
3326626c3920SAl Viro 	fsparam_string("mpol",		Opt_mpol),
3327626c3920SAl Viro 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3328626c3920SAl Viro 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3329626c3920SAl Viro 	fsparam_string("size",		Opt_size),
3330626c3920SAl Viro 	fsparam_u32   ("uid",		Opt_uid),
3331ea3271f7SChris Down 	fsparam_flag  ("inode32",	Opt_inode32),
3332ea3271f7SChris Down 	fsparam_flag  ("inode64",	Opt_inode64),
3333626c3920SAl Viro 	{}
3334626c3920SAl Viro };
3335626c3920SAl Viro 
3336f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3337626c3920SAl Viro {
3338f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3339626c3920SAl Viro 	struct fs_parse_result result;
3340e04dc423SAl Viro 	unsigned long long size;
3341626c3920SAl Viro 	char *rest;
3342626c3920SAl Viro 	int opt;
3343626c3920SAl Viro 
3344d7167b14SAl Viro 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3345f3235626SDavid Howells 	if (opt < 0)
3346626c3920SAl Viro 		return opt;
3347626c3920SAl Viro 
3348626c3920SAl Viro 	switch (opt) {
3349626c3920SAl Viro 	case Opt_size:
3350626c3920SAl Viro 		size = memparse(param->string, &rest);
3351e04dc423SAl Viro 		if (*rest == '%') {
3352e04dc423SAl Viro 			size <<= PAGE_SHIFT;
3353e04dc423SAl Viro 			size *= totalram_pages();
3354e04dc423SAl Viro 			do_div(size, 100);
3355e04dc423SAl Viro 			rest++;
3356e04dc423SAl Viro 		}
3357e04dc423SAl Viro 		if (*rest)
3358626c3920SAl Viro 			goto bad_value;
3359e04dc423SAl Viro 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3360e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3361626c3920SAl Viro 		break;
3362626c3920SAl Viro 	case Opt_nr_blocks:
3363626c3920SAl Viro 		ctx->blocks = memparse(param->string, &rest);
3364e04dc423SAl Viro 		if (*rest)
3365626c3920SAl Viro 			goto bad_value;
3366e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3367626c3920SAl Viro 		break;
3368626c3920SAl Viro 	case Opt_nr_inodes:
3369626c3920SAl Viro 		ctx->inodes = memparse(param->string, &rest);
3370e04dc423SAl Viro 		if (*rest)
3371626c3920SAl Viro 			goto bad_value;
3372e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_INODES;
3373626c3920SAl Viro 		break;
3374626c3920SAl Viro 	case Opt_mode:
3375626c3920SAl Viro 		ctx->mode = result.uint_32 & 07777;
3376626c3920SAl Viro 		break;
3377626c3920SAl Viro 	case Opt_uid:
3378626c3920SAl Viro 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3379e04dc423SAl Viro 		if (!uid_valid(ctx->uid))
3380626c3920SAl Viro 			goto bad_value;
3381626c3920SAl Viro 		break;
3382626c3920SAl Viro 	case Opt_gid:
3383626c3920SAl Viro 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3384e04dc423SAl Viro 		if (!gid_valid(ctx->gid))
3385626c3920SAl Viro 			goto bad_value;
3386626c3920SAl Viro 		break;
3387626c3920SAl Viro 	case Opt_huge:
3388626c3920SAl Viro 		ctx->huge = result.uint_32;
3389626c3920SAl Viro 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3390396bcc52SMatthew Wilcox (Oracle) 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3391626c3920SAl Viro 		      has_transparent_hugepage()))
3392626c3920SAl Viro 			goto unsupported_parameter;
3393e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_HUGE;
3394626c3920SAl Viro 		break;
3395626c3920SAl Viro 	case Opt_mpol:
3396626c3920SAl Viro 		if (IS_ENABLED(CONFIG_NUMA)) {
3397e04dc423SAl Viro 			mpol_put(ctx->mpol);
3398e04dc423SAl Viro 			ctx->mpol = NULL;
3399626c3920SAl Viro 			if (mpol_parse_str(param->string, &ctx->mpol))
3400626c3920SAl Viro 				goto bad_value;
3401626c3920SAl Viro 			break;
3402626c3920SAl Viro 		}
3403626c3920SAl Viro 		goto unsupported_parameter;
3404ea3271f7SChris Down 	case Opt_inode32:
3405ea3271f7SChris Down 		ctx->full_inums = false;
3406ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3407ea3271f7SChris Down 		break;
3408ea3271f7SChris Down 	case Opt_inode64:
3409ea3271f7SChris Down 		if (sizeof(ino_t) < 8) {
3410ea3271f7SChris Down 			return invalfc(fc,
3411ea3271f7SChris Down 				       "Cannot use inode64 with <64bit inums in kernel\n");
3412ea3271f7SChris Down 		}
3413ea3271f7SChris Down 		ctx->full_inums = true;
3414ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3415ea3271f7SChris Down 		break;
3416e04dc423SAl Viro 	}
3417e04dc423SAl Viro 	return 0;
3418e04dc423SAl Viro 
3419626c3920SAl Viro unsupported_parameter:
3420f35aa2bcSAl Viro 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3421626c3920SAl Viro bad_value:
3422f35aa2bcSAl Viro 	return invalfc(fc, "Bad value for '%s'", param->key);
3423e04dc423SAl Viro }
3424e04dc423SAl Viro 
3425f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data)
3426e04dc423SAl Viro {
3427f3235626SDavid Howells 	char *options = data;
3428f3235626SDavid Howells 
342933f37c64SAl Viro 	if (options) {
343033f37c64SAl Viro 		int err = security_sb_eat_lsm_opts(options, &fc->security);
343133f37c64SAl Viro 		if (err)
343233f37c64SAl Viro 			return err;
343333f37c64SAl Viro 	}
343433f37c64SAl Viro 
3435b00dc3adSHugh Dickins 	while (options != NULL) {
3436626c3920SAl Viro 		char *this_char = options;
3437b00dc3adSHugh Dickins 		for (;;) {
3438b00dc3adSHugh Dickins 			/*
3439b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3440b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3441b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3442b00dc3adSHugh Dickins 			 */
3443b00dc3adSHugh Dickins 			options = strchr(options, ',');
3444b00dc3adSHugh Dickins 			if (options == NULL)
3445b00dc3adSHugh Dickins 				break;
3446b00dc3adSHugh Dickins 			options++;
3447b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3448b00dc3adSHugh Dickins 				options[-1] = '\0';
3449b00dc3adSHugh Dickins 				break;
3450b00dc3adSHugh Dickins 			}
3451b00dc3adSHugh Dickins 		}
3452626c3920SAl Viro 		if (*this_char) {
3453626c3920SAl Viro 			char *value = strchr(this_char, '=');
3454f3235626SDavid Howells 			size_t len = 0;
3455626c3920SAl Viro 			int err;
3456626c3920SAl Viro 
3457626c3920SAl Viro 			if (value) {
3458626c3920SAl Viro 				*value++ = '\0';
3459f3235626SDavid Howells 				len = strlen(value);
34601da177e4SLinus Torvalds 			}
3461f3235626SDavid Howells 			err = vfs_parse_fs_string(fc, this_char, value, len);
3462f3235626SDavid Howells 			if (err < 0)
3463f3235626SDavid Howells 				return err;
34641da177e4SLinus Torvalds 		}
3465626c3920SAl Viro 	}
34661da177e4SLinus Torvalds 	return 0;
34671da177e4SLinus Torvalds }
34681da177e4SLinus Torvalds 
3469f3235626SDavid Howells /*
3470f3235626SDavid Howells  * Reconfigure a shmem filesystem.
3471f3235626SDavid Howells  *
3472f3235626SDavid Howells  * Note that we disallow change from limited->unlimited blocks/inodes while any
3473f3235626SDavid Howells  * are in use; but we must separately disallow unlimited->limited, because in
3474f3235626SDavid Howells  * that case we have no record of how much is already in use.
3475f3235626SDavid Howells  */
3476f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc)
34771da177e4SLinus Torvalds {
3478f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3479f3235626SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
34800edd73b3SHugh Dickins 	unsigned long inodes;
3481bf11b9a8SSebastian Andrzej Siewior 	struct mempolicy *mpol = NULL;
3482f3235626SDavid Howells 	const char *err;
34830edd73b3SHugh Dickins 
3484bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock(&sbinfo->stat_lock);
34850edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3486f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3487f3235626SDavid Howells 		if (!sbinfo->max_blocks) {
3488f3235626SDavid Howells 			err = "Cannot retroactively limit size";
34890edd73b3SHugh Dickins 			goto out;
34900b5071ddSAl Viro 		}
3491f3235626SDavid Howells 		if (percpu_counter_compare(&sbinfo->used_blocks,
3492f3235626SDavid Howells 					   ctx->blocks) > 0) {
3493f3235626SDavid Howells 			err = "Too small a size for current use";
34940b5071ddSAl Viro 			goto out;
3495f3235626SDavid Howells 		}
3496f3235626SDavid Howells 	}
3497f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3498f3235626SDavid Howells 		if (!sbinfo->max_inodes) {
3499f3235626SDavid Howells 			err = "Cannot retroactively limit inodes";
35000b5071ddSAl Viro 			goto out;
35010b5071ddSAl Viro 		}
3502f3235626SDavid Howells 		if (ctx->inodes < inodes) {
3503f3235626SDavid Howells 			err = "Too few inodes for current use";
3504f3235626SDavid Howells 			goto out;
3505f3235626SDavid Howells 		}
3506f3235626SDavid Howells 	}
35070edd73b3SHugh Dickins 
3508ea3271f7SChris Down 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3509ea3271f7SChris Down 	    sbinfo->next_ino > UINT_MAX) {
3510ea3271f7SChris Down 		err = "Current inum too high to switch to 32-bit inums";
3511ea3271f7SChris Down 		goto out;
3512ea3271f7SChris Down 	}
3513ea3271f7SChris Down 
3514f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_HUGE)
3515f3235626SDavid Howells 		sbinfo->huge = ctx->huge;
3516ea3271f7SChris Down 	if (ctx->seen & SHMEM_SEEN_INUMS)
3517ea3271f7SChris Down 		sbinfo->full_inums = ctx->full_inums;
3518f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3519f3235626SDavid Howells 		sbinfo->max_blocks  = ctx->blocks;
3520f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_INODES) {
3521f3235626SDavid Howells 		sbinfo->max_inodes  = ctx->inodes;
3522f3235626SDavid Howells 		sbinfo->free_inodes = ctx->inodes - inodes;
35230b5071ddSAl Viro 	}
352471fe804bSLee Schermerhorn 
35255f00110fSGreg Thelen 	/*
35265f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
35275f00110fSGreg Thelen 	 */
3528f3235626SDavid Howells 	if (ctx->mpol) {
3529bf11b9a8SSebastian Andrzej Siewior 		mpol = sbinfo->mpol;
3530f3235626SDavid Howells 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3531f3235626SDavid Howells 		ctx->mpol = NULL;
35325f00110fSGreg Thelen 	}
3533bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3534bf11b9a8SSebastian Andrzej Siewior 	mpol_put(mpol);
3535f3235626SDavid Howells 	return 0;
35360edd73b3SHugh Dickins out:
3537bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3538f35aa2bcSAl Viro 	return invalfc(fc, "%s", err);
35391da177e4SLinus Torvalds }
3540680d794bSakpm@linux-foundation.org 
354134c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3542680d794bSakpm@linux-foundation.org {
354334c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3544680d794bSakpm@linux-foundation.org 
3545680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3546680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
354709cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3548680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3549680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
35500825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
355109208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
35528751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
35538751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
35548751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
35558751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
35568751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
35578751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3558ea3271f7SChris Down 
3559ea3271f7SChris Down 	/*
3560ea3271f7SChris Down 	 * Showing inode{64,32} might be useful even if it's the system default,
3561ea3271f7SChris Down 	 * since then people don't have to resort to checking both here and
3562ea3271f7SChris Down 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
3563ea3271f7SChris Down 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3564ea3271f7SChris Down 	 *
3565ea3271f7SChris Down 	 * We hide it when inode64 isn't the default and we are using 32-bit
3566ea3271f7SChris Down 	 * inodes, since that probably just means the feature isn't even under
3567ea3271f7SChris Down 	 * consideration.
3568ea3271f7SChris Down 	 *
3569ea3271f7SChris Down 	 * As such:
3570ea3271f7SChris Down 	 *
3571ea3271f7SChris Down 	 *                     +-----------------+-----------------+
3572ea3271f7SChris Down 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3573ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3574ea3271f7SChris Down 	 *  | full_inums=true  | show            | show            |
3575ea3271f7SChris Down 	 *  | full_inums=false | show            | hide            |
3576ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3577ea3271f7SChris Down 	 *
3578ea3271f7SChris Down 	 */
3579ea3271f7SChris Down 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3580ea3271f7SChris Down 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3581396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
35825a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
35835a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
35845a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
35855a6e75f8SKirill A. Shutemov #endif
358671fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3587680d794bSakpm@linux-foundation.org 	return 0;
3588680d794bSakpm@linux-foundation.org }
35899183df25SDavid Herrmann 
3590680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
35911da177e4SLinus Torvalds 
35921da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
35931da177e4SLinus Torvalds {
3594602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3595602586a8SHugh Dickins 
3596e809d5f0SChris Down 	free_percpu(sbinfo->ino_batch);
3597602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
359849cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3599602586a8SHugh Dickins 	kfree(sbinfo);
36001da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
36011da177e4SLinus Torvalds }
36021da177e4SLinus Torvalds 
3603f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
36041da177e4SLinus Torvalds {
3605f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
36061da177e4SLinus Torvalds 	struct inode *inode;
36070edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3608680d794bSakpm@linux-foundation.org 
3609680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3610425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3611680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3612680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3613680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3614680d794bSakpm@linux-foundation.org 
3615680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
36161da177e4SLinus Torvalds 
36170edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
36181da177e4SLinus Torvalds 	/*
36191da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
36201da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
36211da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
36221da177e4SLinus Torvalds 	 */
36231751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3624f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3625f3235626SDavid Howells 			ctx->blocks = shmem_default_max_blocks();
3626f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3627f3235626SDavid Howells 			ctx->inodes = shmem_default_max_inodes();
3628ea3271f7SChris Down 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
3629ea3271f7SChris Down 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3630ca4e0519SAl Viro 	} else {
36311751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
36321da177e4SLinus Torvalds 	}
363391828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
36341751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOSEC;
36350edd73b3SHugh Dickins #else
36361751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
36370edd73b3SHugh Dickins #endif
3638f3235626SDavid Howells 	sbinfo->max_blocks = ctx->blocks;
3639f3235626SDavid Howells 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3640e809d5f0SChris Down 	if (sb->s_flags & SB_KERNMOUNT) {
3641e809d5f0SChris Down 		sbinfo->ino_batch = alloc_percpu(ino_t);
3642e809d5f0SChris Down 		if (!sbinfo->ino_batch)
3643e809d5f0SChris Down 			goto failed;
3644e809d5f0SChris Down 	}
3645f3235626SDavid Howells 	sbinfo->uid = ctx->uid;
3646f3235626SDavid Howells 	sbinfo->gid = ctx->gid;
3647ea3271f7SChris Down 	sbinfo->full_inums = ctx->full_inums;
3648f3235626SDavid Howells 	sbinfo->mode = ctx->mode;
3649f3235626SDavid Howells 	sbinfo->huge = ctx->huge;
3650f3235626SDavid Howells 	sbinfo->mpol = ctx->mpol;
3651f3235626SDavid Howells 	ctx->mpol = NULL;
36521da177e4SLinus Torvalds 
3653bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock_init(&sbinfo->stat_lock);
3654908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3655602586a8SHugh Dickins 		goto failed;
3656779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3657779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
36581da177e4SLinus Torvalds 
3659285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
366009cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
366109cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
36621da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
36631da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3664cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3665b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
366639f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3667b09e0fa4SEric Paris #endif
3668b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
36691751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
367039f0247dSAndreas Gruenbacher #endif
36712b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
36720edd73b3SHugh Dickins 
3673454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
36741da177e4SLinus Torvalds 	if (!inode)
36751da177e4SLinus Torvalds 		goto failed;
3676680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3677680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3678318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3679318ceed0SAl Viro 	if (!sb->s_root)
368048fde701SAl Viro 		goto failed;
36811da177e4SLinus Torvalds 	return 0;
36821da177e4SLinus Torvalds 
36831da177e4SLinus Torvalds failed:
36841da177e4SLinus Torvalds 	shmem_put_super(sb);
3685f2b346e4SMiaohe Lin 	return -ENOMEM;
36861da177e4SLinus Torvalds }
36871da177e4SLinus Torvalds 
3688f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc)
3689f3235626SDavid Howells {
3690f3235626SDavid Howells 	return get_tree_nodev(fc, shmem_fill_super);
3691f3235626SDavid Howells }
3692f3235626SDavid Howells 
3693f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc)
3694f3235626SDavid Howells {
3695f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3696f3235626SDavid Howells 
3697f3235626SDavid Howells 	if (ctx) {
3698f3235626SDavid Howells 		mpol_put(ctx->mpol);
3699f3235626SDavid Howells 		kfree(ctx);
3700f3235626SDavid Howells 	}
3701f3235626SDavid Howells }
3702f3235626SDavid Howells 
3703f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = {
3704f3235626SDavid Howells 	.free			= shmem_free_fc,
3705f3235626SDavid Howells 	.get_tree		= shmem_get_tree,
3706f3235626SDavid Howells #ifdef CONFIG_TMPFS
3707f3235626SDavid Howells 	.parse_monolithic	= shmem_parse_options,
3708f3235626SDavid Howells 	.parse_param		= shmem_parse_one,
3709f3235626SDavid Howells 	.reconfigure		= shmem_reconfigure,
3710f3235626SDavid Howells #endif
3711f3235626SDavid Howells };
3712f3235626SDavid Howells 
3713fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
37141da177e4SLinus Torvalds 
37151da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
37161da177e4SLinus Torvalds {
371741ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
371841ffe5d5SHugh Dickins 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
371941ffe5d5SHugh Dickins 	if (!info)
37201da177e4SLinus Torvalds 		return NULL;
372141ffe5d5SHugh Dickins 	return &info->vfs_inode;
37221da177e4SLinus Torvalds }
37231da177e4SLinus Torvalds 
372474b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode)
3725fa0d7e3dSNick Piggin {
372684e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
37273ed47db3SAl Viro 		kfree(inode->i_link);
3728fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3729fa0d7e3dSNick Piggin }
3730fa0d7e3dSNick Piggin 
37311da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
37321da177e4SLinus Torvalds {
373309208d15SAl Viro 	if (S_ISREG(inode->i_mode))
37341da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
37351da177e4SLinus Torvalds }
37361da177e4SLinus Torvalds 
373741ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
37381da177e4SLinus Torvalds {
373941ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
374041ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
37411da177e4SLinus Torvalds }
37421da177e4SLinus Torvalds 
37439a8ec03eSweiping zhang static void shmem_init_inodecache(void)
37441da177e4SLinus Torvalds {
37451da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
37461da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
37475d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
37481da177e4SLinus Torvalds }
37491da177e4SLinus Torvalds 
375041ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
37511da177e4SLinus Torvalds {
37521a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
37531da177e4SLinus Torvalds }
37541da177e4SLinus Torvalds 
375530e6a51dSHui Su const struct address_space_operations shmem_aops = {
37561da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
375776719325SKen Chen 	.set_page_dirty	= __set_page_dirty_no_writeback,
37581da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3759800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3760800d15a5SNick Piggin 	.write_end	= shmem_write_end,
37611da177e4SLinus Torvalds #endif
37621c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
3763304dbdb7SLee Schermerhorn 	.migratepage	= migrate_page,
37641c93923cSAndrew Morton #endif
3765d0b51bfbSLinus Torvalds 	.error_remove_page = generic_error_remove_page,
37661da177e4SLinus Torvalds };
376730e6a51dSHui Su EXPORT_SYMBOL(shmem_aops);
37681da177e4SLinus Torvalds 
376915ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
37701da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3771c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
37721da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3773220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
37742ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
37758174202bSAl Viro 	.write_iter	= generic_file_write_iter,
37761b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
377782c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3778f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
377983e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
37801da177e4SLinus Torvalds #endif
37811da177e4SLinus Torvalds };
37821da177e4SLinus Torvalds 
378392e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
378444a30220SYu Zhao 	.getattr	= shmem_getattr,
378594c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3786b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3787b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3788feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3789b09e0fa4SEric Paris #endif
37901da177e4SLinus Torvalds };
37911da177e4SLinus Torvalds 
379292e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
37931da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
37941da177e4SLinus Torvalds 	.create		= shmem_create,
37951da177e4SLinus Torvalds 	.lookup		= simple_lookup,
37961da177e4SLinus Torvalds 	.link		= shmem_link,
37971da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
37981da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
37991da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
38001da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
38011da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
38022773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
380360545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
38041da177e4SLinus Torvalds #endif
3805b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3806b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3807b09e0fa4SEric Paris #endif
380839f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
380994c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3810feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
381139f0247dSAndreas Gruenbacher #endif
381239f0247dSAndreas Gruenbacher };
381339f0247dSAndreas Gruenbacher 
381492e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3815b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3816b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3817b09e0fa4SEric Paris #endif
381839f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
381994c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3820feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
382139f0247dSAndreas Gruenbacher #endif
38221da177e4SLinus Torvalds };
38231da177e4SLinus Torvalds 
3824759b9775SHugh Dickins static const struct super_operations shmem_ops = {
38251da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
382674b1da56SAl Viro 	.free_inode	= shmem_free_in_core_inode,
38271da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
38281da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
38291da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
3830680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
38311da177e4SLinus Torvalds #endif
38321f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
38331da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
38341da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3835396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3836779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3837779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3838779750d2SKirill A. Shutemov #endif
38391da177e4SLinus Torvalds };
38401da177e4SLinus Torvalds 
3841f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
384254cb8821SNick Piggin 	.fault		= shmem_fault,
3843d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
38441da177e4SLinus Torvalds #ifdef CONFIG_NUMA
38451da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
38461da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
38471da177e4SLinus Torvalds #endif
38481da177e4SLinus Torvalds };
38491da177e4SLinus Torvalds 
3850f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc)
38511da177e4SLinus Torvalds {
3852f3235626SDavid Howells 	struct shmem_options *ctx;
3853f3235626SDavid Howells 
3854f3235626SDavid Howells 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3855f3235626SDavid Howells 	if (!ctx)
3856f3235626SDavid Howells 		return -ENOMEM;
3857f3235626SDavid Howells 
3858f3235626SDavid Howells 	ctx->mode = 0777 | S_ISVTX;
3859f3235626SDavid Howells 	ctx->uid = current_fsuid();
3860f3235626SDavid Howells 	ctx->gid = current_fsgid();
3861f3235626SDavid Howells 
3862f3235626SDavid Howells 	fc->fs_private = ctx;
3863f3235626SDavid Howells 	fc->ops = &shmem_fs_context_ops;
3864f3235626SDavid Howells 	return 0;
38651da177e4SLinus Torvalds }
38661da177e4SLinus Torvalds 
386741ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
38681da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
38691da177e4SLinus Torvalds 	.name		= "tmpfs",
3870f3235626SDavid Howells 	.init_fs_context = shmem_init_fs_context,
3871f3235626SDavid Howells #ifdef CONFIG_TMPFS
3872d7167b14SAl Viro 	.parameters	= shmem_fs_parameters,
3873f3235626SDavid Howells #endif
38741da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
3875ff36da69SMatthew Wilcox (Oracle) 	.fs_flags	= FS_USERNS_MOUNT,
38761da177e4SLinus Torvalds };
38771da177e4SLinus Torvalds 
387841ffe5d5SHugh Dickins int __init shmem_init(void)
38791da177e4SLinus Torvalds {
38801da177e4SLinus Torvalds 	int error;
38811da177e4SLinus Torvalds 
38829a8ec03eSweiping zhang 	shmem_init_inodecache();
38831da177e4SLinus Torvalds 
388441ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
38851da177e4SLinus Torvalds 	if (error) {
38861170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
38871da177e4SLinus Torvalds 		goto out2;
38881da177e4SLinus Torvalds 	}
388995dc112aSGreg Kroah-Hartman 
3890ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
38911da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
38921da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
38931170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
38941da177e4SLinus Torvalds 		goto out1;
38951da177e4SLinus Torvalds 	}
38965a6e75f8SKirill A. Shutemov 
3897396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3898435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
38995a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
39005a6e75f8SKirill A. Shutemov 	else
39015e6e5a12SHugh Dickins 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
39025a6e75f8SKirill A. Shutemov #endif
39031da177e4SLinus Torvalds 	return 0;
39041da177e4SLinus Torvalds 
39051da177e4SLinus Torvalds out1:
390641ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
39071da177e4SLinus Torvalds out2:
390841ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
39091da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
39101da177e4SLinus Torvalds 	return error;
39111da177e4SLinus Torvalds }
3912853ac43aSMatt Mackall 
3913396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
39145a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
39155a6e75f8SKirill A. Shutemov 				  struct kobj_attribute *attr, char *buf)
39165a6e75f8SKirill A. Shutemov {
391726083eb6SColin Ian King 	static const int values[] = {
39185a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
39195a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
39205a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
39215a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
39225a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
39235a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
39245a6e75f8SKirill A. Shutemov 	};
392579d4d38aSJoe Perches 	int len = 0;
392679d4d38aSJoe Perches 	int i;
39275a6e75f8SKirill A. Shutemov 
392879d4d38aSJoe Perches 	for (i = 0; i < ARRAY_SIZE(values); i++) {
392979d4d38aSJoe Perches 		len += sysfs_emit_at(buf, len,
393079d4d38aSJoe Perches 				     shmem_huge == values[i] ? "%s[%s]" : "%s%s",
393179d4d38aSJoe Perches 				     i ? " " : "",
39325a6e75f8SKirill A. Shutemov 				     shmem_format_huge(values[i]));
39335a6e75f8SKirill A. Shutemov 	}
393479d4d38aSJoe Perches 
393579d4d38aSJoe Perches 	len += sysfs_emit_at(buf, len, "\n");
393679d4d38aSJoe Perches 
393779d4d38aSJoe Perches 	return len;
39385a6e75f8SKirill A. Shutemov }
39395a6e75f8SKirill A. Shutemov 
39405a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
39415a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
39425a6e75f8SKirill A. Shutemov {
39435a6e75f8SKirill A. Shutemov 	char tmp[16];
39445a6e75f8SKirill A. Shutemov 	int huge;
39455a6e75f8SKirill A. Shutemov 
39465a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
39475a6e75f8SKirill A. Shutemov 		return -EINVAL;
39485a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
39495a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
39505a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
39515a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
39525a6e75f8SKirill A. Shutemov 
39535a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
39545a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
39555a6e75f8SKirill A. Shutemov 		return -EINVAL;
39565a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
39575a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
39585a6e75f8SKirill A. Shutemov 		return -EINVAL;
39595a6e75f8SKirill A. Shutemov 
39605a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
3961435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
39625a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
39635a6e75f8SKirill A. Shutemov 	return count;
39645a6e75f8SKirill A. Shutemov }
39655a6e75f8SKirill A. Shutemov 
39665a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr =
39675a6e75f8SKirill A. Shutemov 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3968396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
3969f3f0e1d2SKirill A. Shutemov 
3970853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
3971853ac43aSMatt Mackall 
3972853ac43aSMatt Mackall /*
3973853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3974853ac43aSMatt Mackall  *
3975853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
3976853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
3977853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
3978853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
3979853ac43aSMatt Mackall  */
3980853ac43aSMatt Mackall 
398141ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
3982853ac43aSMatt Mackall 	.name		= "tmpfs",
3983f3235626SDavid Howells 	.init_fs_context = ramfs_init_fs_context,
3984d7167b14SAl Viro 	.parameters	= ramfs_fs_parameters,
3985853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
39862b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
3987853ac43aSMatt Mackall };
3988853ac43aSMatt Mackall 
398941ffe5d5SHugh Dickins int __init shmem_init(void)
3990853ac43aSMatt Mackall {
399141ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3992853ac43aSMatt Mackall 
399341ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
3994853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
3995853ac43aSMatt Mackall 
3996853ac43aSMatt Mackall 	return 0;
3997853ac43aSMatt Mackall }
3998853ac43aSMatt Mackall 
3999b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
4000b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
4001853ac43aSMatt Mackall {
4002853ac43aSMatt Mackall 	return 0;
4003853ac43aSMatt Mackall }
4004853ac43aSMatt Mackall 
4005d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
40063f96b79aSHugh Dickins {
40073f96b79aSHugh Dickins 	return 0;
40083f96b79aSHugh Dickins }
40093f96b79aSHugh Dickins 
401024513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
401124513264SHugh Dickins {
401224513264SHugh Dickins }
401324513264SHugh Dickins 
4014c01d5b30SHugh Dickins #ifdef CONFIG_MMU
4015c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4016c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
4017c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
4018c01d5b30SHugh Dickins {
4019c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4020c01d5b30SHugh Dickins }
4021c01d5b30SHugh Dickins #endif
4022c01d5b30SHugh Dickins 
402341ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
402494c1e62dSHugh Dickins {
402541ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
402694c1e62dSHugh Dickins }
402794c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
402894c1e62dSHugh Dickins 
4029853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
40300b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
4031454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
40320b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
40330b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
4034853ac43aSMatt Mackall 
4035853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4036853ac43aSMatt Mackall 
4037853ac43aSMatt Mackall /* common code */
40381da177e4SLinus Torvalds 
4039703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4040c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
40411da177e4SLinus Torvalds {
40421da177e4SLinus Torvalds 	struct inode *inode;
404393dec2daSAl Viro 	struct file *res;
40441da177e4SLinus Torvalds 
4045703321b6SMatthew Auld 	if (IS_ERR(mnt))
4046703321b6SMatthew Auld 		return ERR_CAST(mnt);
40471da177e4SLinus Torvalds 
4048285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
40491da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
40501da177e4SLinus Torvalds 
40511da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
40521da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
40531da177e4SLinus Torvalds 
405493dec2daSAl Viro 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
405593dec2daSAl Viro 				flags);
4056dac2d1f6SAl Viro 	if (unlikely(!inode)) {
4057dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
4058dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
4059dac2d1f6SAl Viro 	}
4060c7277090SEric Paris 	inode->i_flags |= i_flags;
40611da177e4SLinus Torvalds 	inode->i_size = size;
40626d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
406326567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
406493dec2daSAl Viro 	if (!IS_ERR(res))
406593dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
40664b42af81SAl Viro 				&shmem_file_operations);
40676b4d0b27SAl Viro 	if (IS_ERR(res))
406893dec2daSAl Viro 		iput(inode);
40696b4d0b27SAl Viro 	return res;
40701da177e4SLinus Torvalds }
4071c7277090SEric Paris 
4072c7277090SEric Paris /**
4073c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4074c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
4075c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
4076e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
4077e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
4078c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4079c7277090SEric Paris  * @size: size to be set for the file
4080c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4081c7277090SEric Paris  */
4082c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4083c7277090SEric Paris {
4084703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4085c7277090SEric Paris }
4086c7277090SEric Paris 
4087c7277090SEric Paris /**
4088c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4089c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4090c7277090SEric Paris  * @size: size to be set for the file
4091c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4092c7277090SEric Paris  */
4093c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4094c7277090SEric Paris {
4095703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4096c7277090SEric Paris }
4097395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
40981da177e4SLinus Torvalds 
409946711810SRandy Dunlap /**
4100703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4101703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
4102703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
4103703321b6SMatthew Auld  * @size: size to be set for the file
4104703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4105703321b6SMatthew Auld  */
4106703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4107703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
4108703321b6SMatthew Auld {
4109703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
4110703321b6SMatthew Auld }
4111703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4112703321b6SMatthew Auld 
4113703321b6SMatthew Auld /**
41141da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
411545e55300SPeter Collingbourne  * @vma: the vma to be mmapped is prepared by do_mmap
41161da177e4SLinus Torvalds  */
41171da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
41181da177e4SLinus Torvalds {
41191da177e4SLinus Torvalds 	struct file *file;
41201da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
41211da177e4SLinus Torvalds 
412266fc1303SHugh Dickins 	/*
4123c1e8d7c6SMichel Lespinasse 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
412466fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
412566fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
412666fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
412766fc1303SHugh Dickins 	 */
4128703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
41291da177e4SLinus Torvalds 	if (IS_ERR(file))
41301da177e4SLinus Torvalds 		return PTR_ERR(file);
41311da177e4SLinus Torvalds 
41321da177e4SLinus Torvalds 	if (vma->vm_file)
41331da177e4SLinus Torvalds 		fput(vma->vm_file);
41341da177e4SLinus Torvalds 	vma->vm_file = file;
41351da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
4136f3f0e1d2SKirill A. Shutemov 
4137396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4138f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4139f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
4140f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
4141f3f0e1d2SKirill A. Shutemov 	}
4142f3f0e1d2SKirill A. Shutemov 
41431da177e4SLinus Torvalds 	return 0;
41441da177e4SLinus Torvalds }
4145d9d90e5eSHugh Dickins 
4146d9d90e5eSHugh Dickins /**
4147d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4148d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4149d9d90e5eSHugh Dickins  * @index:	the page index
4150d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4151d9d90e5eSHugh Dickins  *
4152d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4153d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
4154d9d90e5eSHugh Dickins  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4155d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4156d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4157d9d90e5eSHugh Dickins  *
415868da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
415968da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4160d9d90e5eSHugh Dickins  */
4161d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4162d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4163d9d90e5eSHugh Dickins {
416468da9f05SHugh Dickins #ifdef CONFIG_SHMEM
416568da9f05SHugh Dickins 	struct inode *inode = mapping->host;
41669276aad6SHugh Dickins 	struct page *page;
416768da9f05SHugh Dickins 	int error;
416868da9f05SHugh Dickins 
416930e6a51dSHui Su 	BUG_ON(!shmem_mapping(mapping));
41709e18eb29SAndres Lagar-Cavilla 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4171cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
417268da9f05SHugh Dickins 	if (error)
417368da9f05SHugh Dickins 		page = ERR_PTR(error);
417468da9f05SHugh Dickins 	else
417568da9f05SHugh Dickins 		unlock_page(page);
417668da9f05SHugh Dickins 	return page;
417768da9f05SHugh Dickins #else
417868da9f05SHugh Dickins 	/*
417968da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
418068da9f05SHugh Dickins 	 */
4181d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
418268da9f05SHugh Dickins #endif
4183d9d90e5eSHugh Dickins }
4184d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4185