xref: /openbmc/linux/mm/shmem.c (revision bf11b9a8e9a93c1fc0ebfc2929622d5cf7d43888)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31853ac43aSMatt Mackall #include <linux/mm.h>
3246c9a946SArnd Bergmann #include <linux/random.h>
33174cd4b1SIngo Molnar #include <linux/sched/signal.h>
34b95f1b31SPaul Gortmaker #include <linux/export.h>
35853ac43aSMatt Mackall #include <linux/swap.h>
36e2e40f2cSChristoph Hellwig #include <linux/uio.h>
37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h>
38749df87bSMike Kravetz #include <linux/hugetlb.h>
39b56a2d8aSVineeth Remanan Pillai #include <linux/frontswap.h>
40626c3920SAl Viro #include <linux/fs_parser.h>
41853ac43aSMatt Mackall 
4295cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
4395cc09d6SAndrea Arcangeli 
44853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
45853ac43aSMatt Mackall 
46853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
471da177e4SLinus Torvalds /*
481da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
491da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
501da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
511da177e4SLinus Torvalds  */
521da177e4SLinus Torvalds 
5339f0247dSAndreas Gruenbacher #include <linux/xattr.h>
54a5694255SChristoph Hellwig #include <linux/exportfs.h>
551c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
56feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
571da177e4SLinus Torvalds #include <linux/mman.h>
581da177e4SLinus Torvalds #include <linux/string.h>
591da177e4SLinus Torvalds #include <linux/slab.h>
601da177e4SLinus Torvalds #include <linux/backing-dev.h>
611da177e4SLinus Torvalds #include <linux/shmem_fs.h>
621da177e4SLinus Torvalds #include <linux/writeback.h>
631da177e4SLinus Torvalds #include <linux/blkdev.h>
64bda97eabSHugh Dickins #include <linux/pagevec.h>
6541ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6683e4fa9cSHugh Dickins #include <linux/falloc.h>
67708e3508SHugh Dickins #include <linux/splice.h>
681da177e4SLinus Torvalds #include <linux/security.h>
691da177e4SLinus Torvalds #include <linux/swapops.h>
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/namei.h>
72b00dc3adSHugh Dickins #include <linux/ctype.h>
73304dbdb7SLee Schermerhorn #include <linux/migrate.h>
74c1f60a5aSChristoph Lameter #include <linux/highmem.h>
75680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7692562927SMimi Zohar #include <linux/magic.h>
779183df25SDavid Herrmann #include <linux/syscalls.h>
7840e041a2SDavid Herrmann #include <linux/fcntl.h>
799183df25SDavid Herrmann #include <uapi/linux/memfd.h>
80cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
814c27fe4cSMike Rapoport #include <linux/rmap.h>
822b4db796SAmir Goldstein #include <linux/uuid.h>
83304dbdb7SLee Schermerhorn 
847c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
851da177e4SLinus Torvalds 
86dd56b046SMel Gorman #include "internal.h"
87dd56b046SMel Gorman 
8809cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8909cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
921da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
931da177e4SLinus Torvalds 
9469f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9569f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9669f07ec9SHugh Dickins 
971aac1400SHugh Dickins /*
98f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99f00cdc6dSHugh Dickins  * inode->i_private (with i_mutex making sure that it has only one user at
100f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
1011aac1400SHugh Dickins  */
1021aac1400SHugh Dickins struct shmem_falloc {
1038e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1041aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1051aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1061aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1071aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1081aac1400SHugh Dickins };
1091aac1400SHugh Dickins 
1100b5071ddSAl Viro struct shmem_options {
1110b5071ddSAl Viro 	unsigned long long blocks;
1120b5071ddSAl Viro 	unsigned long long inodes;
1130b5071ddSAl Viro 	struct mempolicy *mpol;
1140b5071ddSAl Viro 	kuid_t uid;
1150b5071ddSAl Viro 	kgid_t gid;
1160b5071ddSAl Viro 	umode_t mode;
117ea3271f7SChris Down 	bool full_inums;
1180b5071ddSAl Viro 	int huge;
1190b5071ddSAl Viro 	int seen;
1200b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1
1210b5071ddSAl Viro #define SHMEM_SEEN_INODES 2
1220b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4
123ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8
1240b5071ddSAl Viro };
1250b5071ddSAl Viro 
126b76db735SAndrew Morton #ifdef CONFIG_TMPFS
127680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
128680d794bSakpm@linux-foundation.org {
129ca79b0c2SArun KS 	return totalram_pages() / 2;
130680d794bSakpm@linux-foundation.org }
131680d794bSakpm@linux-foundation.org 
132680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
133680d794bSakpm@linux-foundation.org {
134ca79b0c2SArun KS 	unsigned long nr_pages = totalram_pages();
135ca79b0c2SArun KS 
136ca79b0c2SArun KS 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
137680d794bSakpm@linux-foundation.org }
138b76db735SAndrew Morton #endif
139680d794bSakpm@linux-foundation.org 
140bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
142bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index);
143c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
144c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
145c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
146c5bf121eSVineeth Remanan Pillai 			     vm_fault_t *fault_type);
14768da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1489e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp,
149cfda0526SMike Rapoport 		gfp_t gfp, struct vm_area_struct *vma,
1502b740303SSouptick Joarder 		struct vm_fault *vmf, vm_fault_t *fault_type);
15168da9f05SHugh Dickins 
152f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index,
1539e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp)
15468da9f05SHugh Dickins {
15568da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
156cfda0526SMike Rapoport 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
15768da9f05SHugh Dickins }
1581da177e4SLinus Torvalds 
1591da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1601da177e4SLinus Torvalds {
1611da177e4SLinus Torvalds 	return sb->s_fs_info;
1621da177e4SLinus Torvalds }
1631da177e4SLinus Torvalds 
1641da177e4SLinus Torvalds /*
1651da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1661da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1671da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1681da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1691da177e4SLinus Torvalds  */
1701da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1711da177e4SLinus Torvalds {
1720b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
173191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1741da177e4SLinus Torvalds }
1751da177e4SLinus Torvalds 
1761da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1771da177e4SLinus Torvalds {
1780b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1791da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1801da177e4SLinus Torvalds }
1811da177e4SLinus Torvalds 
18277142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
18377142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
18477142517SKonstantin Khlebnikov {
18577142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
18677142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
18777142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
18877142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
18977142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
19077142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
19177142517SKonstantin Khlebnikov 	}
19277142517SKonstantin Khlebnikov 	return 0;
19377142517SKonstantin Khlebnikov }
19477142517SKonstantin Khlebnikov 
1951da177e4SLinus Torvalds /*
1961da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
19775edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
1981da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1991da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
2001da177e4SLinus Torvalds  */
201800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
2021da177e4SLinus Torvalds {
203800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
204800d8c63SKirill A. Shutemov 		return 0;
205800d8c63SKirill A. Shutemov 
206800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
207800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
2081da177e4SLinus Torvalds }
2091da177e4SLinus Torvalds 
2101da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
2111da177e4SLinus Torvalds {
2120b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
21309cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
2141da177e4SLinus Torvalds }
2151da177e4SLinus Torvalds 
2160f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
2170f079694SMike Rapoport {
2180f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2190f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2200f079694SMike Rapoport 
2210f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
2220f079694SMike Rapoport 		return false;
2230f079694SMike Rapoport 
2240f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2250f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2260f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2270f079694SMike Rapoport 			goto unacct;
2280f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2290f079694SMike Rapoport 	}
2300f079694SMike Rapoport 
2310f079694SMike Rapoport 	return true;
2320f079694SMike Rapoport 
2330f079694SMike Rapoport unacct:
2340f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2350f079694SMike Rapoport 	return false;
2360f079694SMike Rapoport }
2370f079694SMike Rapoport 
2380f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2390f079694SMike Rapoport {
2400f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2410f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2420f079694SMike Rapoport 
2430f079694SMike Rapoport 	if (sbinfo->max_blocks)
2440f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2450f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2460f079694SMike Rapoport }
2470f079694SMike Rapoport 
248759b9775SHugh Dickins static const struct super_operations shmem_ops;
24930e6a51dSHui Su const struct address_space_operations shmem_aops;
25015ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
25192e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
25292e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
25392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
254f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
255779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2561da177e4SLinus Torvalds 
257b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
258b0506e48SMike Rapoport {
259b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
260b0506e48SMike Rapoport }
261b0506e48SMike Rapoport 
2621da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
263cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2641da177e4SLinus Torvalds 
265e809d5f0SChris Down /*
266e809d5f0SChris Down  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
267e809d5f0SChris Down  * produces a novel ino for the newly allocated inode.
268e809d5f0SChris Down  *
269e809d5f0SChris Down  * It may also be called when making a hard link to permit the space needed by
270e809d5f0SChris Down  * each dentry. However, in that case, no new inode number is needed since that
271e809d5f0SChris Down  * internally draws from another pool of inode numbers (currently global
272e809d5f0SChris Down  * get_next_ino()). This case is indicated by passing NULL as inop.
273e809d5f0SChris Down  */
274e809d5f0SChris Down #define SHMEM_INO_BATCH 1024
275e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
2765b04c689SPavel Emelyanov {
2775b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
278e809d5f0SChris Down 	ino_t ino;
279e809d5f0SChris Down 
280e809d5f0SChris Down 	if (!(sb->s_flags & SB_KERNMOUNT)) {
281*bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
282bb3e96d6SByron Stanoszek 		if (sbinfo->max_inodes) {
2835b04c689SPavel Emelyanov 			if (!sbinfo->free_inodes) {
284*bf11b9a8SSebastian Andrzej Siewior 				raw_spin_unlock(&sbinfo->stat_lock);
2855b04c689SPavel Emelyanov 				return -ENOSPC;
2865b04c689SPavel Emelyanov 			}
2875b04c689SPavel Emelyanov 			sbinfo->free_inodes--;
288bb3e96d6SByron Stanoszek 		}
289e809d5f0SChris Down 		if (inop) {
290e809d5f0SChris Down 			ino = sbinfo->next_ino++;
291e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
292e809d5f0SChris Down 				ino = sbinfo->next_ino++;
293ea3271f7SChris Down 			if (unlikely(!sbinfo->full_inums &&
294ea3271f7SChris Down 				     ino > UINT_MAX)) {
295e809d5f0SChris Down 				/*
296e809d5f0SChris Down 				 * Emulate get_next_ino uint wraparound for
297e809d5f0SChris Down 				 * compatibility
298e809d5f0SChris Down 				 */
299ea3271f7SChris Down 				if (IS_ENABLED(CONFIG_64BIT))
300ea3271f7SChris Down 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
301ea3271f7SChris Down 						__func__, MINOR(sb->s_dev));
302ea3271f7SChris Down 				sbinfo->next_ino = 1;
303ea3271f7SChris Down 				ino = sbinfo->next_ino++;
3045b04c689SPavel Emelyanov 			}
305e809d5f0SChris Down 			*inop = ino;
306e809d5f0SChris Down 		}
307*bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
308e809d5f0SChris Down 	} else if (inop) {
309e809d5f0SChris Down 		/*
310e809d5f0SChris Down 		 * __shmem_file_setup, one of our callers, is lock-free: it
311e809d5f0SChris Down 		 * doesn't hold stat_lock in shmem_reserve_inode since
312e809d5f0SChris Down 		 * max_inodes is always 0, and is called from potentially
313e809d5f0SChris Down 		 * unknown contexts. As such, use a per-cpu batched allocator
314e809d5f0SChris Down 		 * which doesn't require the per-sb stat_lock unless we are at
315e809d5f0SChris Down 		 * the batch boundary.
316ea3271f7SChris Down 		 *
317ea3271f7SChris Down 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
318ea3271f7SChris Down 		 * shmem mounts are not exposed to userspace, so we don't need
319ea3271f7SChris Down 		 * to worry about things like glibc compatibility.
320e809d5f0SChris Down 		 */
321e809d5f0SChris Down 		ino_t *next_ino;
322*bf11b9a8SSebastian Andrzej Siewior 
323e809d5f0SChris Down 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
324e809d5f0SChris Down 		ino = *next_ino;
325e809d5f0SChris Down 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
326*bf11b9a8SSebastian Andrzej Siewior 			raw_spin_lock(&sbinfo->stat_lock);
327e809d5f0SChris Down 			ino = sbinfo->next_ino;
328e809d5f0SChris Down 			sbinfo->next_ino += SHMEM_INO_BATCH;
329*bf11b9a8SSebastian Andrzej Siewior 			raw_spin_unlock(&sbinfo->stat_lock);
330e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
331e809d5f0SChris Down 				ino++;
332e809d5f0SChris Down 		}
333e809d5f0SChris Down 		*inop = ino;
334e809d5f0SChris Down 		*next_ino = ++ino;
335e809d5f0SChris Down 		put_cpu();
336e809d5f0SChris Down 	}
337e809d5f0SChris Down 
3385b04c689SPavel Emelyanov 	return 0;
3395b04c689SPavel Emelyanov }
3405b04c689SPavel Emelyanov 
3415b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
3425b04c689SPavel Emelyanov {
3435b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3445b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
345*bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);
3465b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
347*bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
3485b04c689SPavel Emelyanov 	}
3495b04c689SPavel Emelyanov }
3505b04c689SPavel Emelyanov 
35146711810SRandy Dunlap /**
35241ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
3531da177e4SLinus Torvalds  * @inode: inode to recalc
3541da177e4SLinus Torvalds  *
3551da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
3561da177e4SLinus Torvalds  * undirtied hole pages behind our back.
3571da177e4SLinus Torvalds  *
3581da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
3591da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
3601da177e4SLinus Torvalds  *
3611da177e4SLinus Torvalds  * It has to be called with the spinlock held.
3621da177e4SLinus Torvalds  */
3631da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
3641da177e4SLinus Torvalds {
3651da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
3661da177e4SLinus Torvalds 	long freed;
3671da177e4SLinus Torvalds 
3681da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
3691da177e4SLinus Torvalds 	if (freed > 0) {
3701da177e4SLinus Torvalds 		info->alloced -= freed;
37154af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
3720f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
3731da177e4SLinus Torvalds 	}
3741da177e4SLinus Torvalds }
3751da177e4SLinus Torvalds 
376800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
377800d8c63SKirill A. Shutemov {
378800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3794595ef88SKirill A. Shutemov 	unsigned long flags;
380800d8c63SKirill A. Shutemov 
3810f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
382800d8c63SKirill A. Shutemov 		return false;
383b1cc94abSMike Rapoport 
384aaa52e34SHugh Dickins 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
385aaa52e34SHugh Dickins 	inode->i_mapping->nrpages += pages;
386aaa52e34SHugh Dickins 
3874595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
388800d8c63SKirill A. Shutemov 	info->alloced += pages;
389800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
390800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3914595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
392800d8c63SKirill A. Shutemov 
393800d8c63SKirill A. Shutemov 	return true;
394800d8c63SKirill A. Shutemov }
395800d8c63SKirill A. Shutemov 
396800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
397800d8c63SKirill A. Shutemov {
398800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3994595ef88SKirill A. Shutemov 	unsigned long flags;
400800d8c63SKirill A. Shutemov 
401aaa52e34SHugh Dickins 	/* nrpages adjustment done by __delete_from_page_cache() or caller */
402aaa52e34SHugh Dickins 
4034595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
404800d8c63SKirill A. Shutemov 	info->alloced -= pages;
405800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
406800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
4074595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
408800d8c63SKirill A. Shutemov 
4090f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
410800d8c63SKirill A. Shutemov }
411800d8c63SKirill A. Shutemov 
4127a5d0fbbSHugh Dickins /*
41362f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
4147a5d0fbbSHugh Dickins  */
41562f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
4167a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
4177a5d0fbbSHugh Dickins {
41862f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
4196dbaf22cSJohannes Weiner 	void *item;
4207a5d0fbbSHugh Dickins 
4217a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
4226dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
42362f945b6SMatthew Wilcox 	item = xas_load(&xas);
4247a5d0fbbSHugh Dickins 	if (item != expected)
4257a5d0fbbSHugh Dickins 		return -ENOENT;
42662f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
4277a5d0fbbSHugh Dickins 	return 0;
4287a5d0fbbSHugh Dickins }
4297a5d0fbbSHugh Dickins 
4307a5d0fbbSHugh Dickins /*
431d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
432d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
433d1899228SHugh Dickins  *
434d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
435d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
436d1899228SHugh Dickins  */
437d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
438d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
439d1899228SHugh Dickins {
440a12831bfSMatthew Wilcox 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
441d1899228SHugh Dickins }
442d1899228SHugh Dickins 
443d1899228SHugh Dickins /*
4445a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
4455a6e75f8SKirill A. Shutemov  *
4465a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
4475a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
4485a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
4495a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
4505a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
4515a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
4525a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
4535a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
4545a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
4555a6e75f8SKirill A. Shutemov  */
4565a6e75f8SKirill A. Shutemov 
4575a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
4585a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
4595a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
4605a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
4615a6e75f8SKirill A. Shutemov 
4625a6e75f8SKirill A. Shutemov /*
4635a6e75f8SKirill A. Shutemov  * Special values.
4645a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
4655a6e75f8SKirill A. Shutemov  *
4665a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
4675a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
4685a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
4695a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
4705a6e75f8SKirill A. Shutemov  *
4715a6e75f8SKirill A. Shutemov  */
4725a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
4735a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
4745a6e75f8SKirill A. Shutemov 
475396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4765a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
4775a6e75f8SKirill A. Shutemov 
4785b9c98f3SMike Kravetz static int shmem_huge __read_mostly;
4795a6e75f8SKirill A. Shutemov 
480e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS)
4815a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
4825a6e75f8SKirill A. Shutemov {
4835a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
4845a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
4855a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
4865a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
4875a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
4885a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
4895a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
4905a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
4915a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
4925a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
4935a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
4945a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
4955a6e75f8SKirill A. Shutemov 	return -EINVAL;
4965a6e75f8SKirill A. Shutemov }
497e5f2249aSArnd Bergmann #endif
4985a6e75f8SKirill A. Shutemov 
499e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
5005a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
5015a6e75f8SKirill A. Shutemov {
5025a6e75f8SKirill A. Shutemov 	switch (huge) {
5035a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
5045a6e75f8SKirill A. Shutemov 		return "never";
5055a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
5065a6e75f8SKirill A. Shutemov 		return "always";
5075a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
5085a6e75f8SKirill A. Shutemov 		return "within_size";
5095a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
5105a6e75f8SKirill A. Shutemov 		return "advise";
5115a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
5125a6e75f8SKirill A. Shutemov 		return "deny";
5135a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
5145a6e75f8SKirill A. Shutemov 		return "force";
5155a6e75f8SKirill A. Shutemov 	default:
5165a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
5175a6e75f8SKirill A. Shutemov 		return "bad_val";
5185a6e75f8SKirill A. Shutemov 	}
5195a6e75f8SKirill A. Shutemov }
520f1f5929cSJérémy Lefaure #endif
5215a6e75f8SKirill A. Shutemov 
522779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
523779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
524779750d2SKirill A. Shutemov {
525779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
526253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
527779750d2SKirill A. Shutemov 	struct inode *inode;
528779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
529779750d2SKirill A. Shutemov 	struct page *page;
530779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
531779750d2SKirill A. Shutemov 	int removed = 0, split = 0;
532779750d2SKirill A. Shutemov 
533779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
534779750d2SKirill A. Shutemov 		return SHRINK_STOP;
535779750d2SKirill A. Shutemov 
536779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
537779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
538779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
539779750d2SKirill A. Shutemov 
540779750d2SKirill A. Shutemov 		/* pin the inode */
541779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
542779750d2SKirill A. Shutemov 
543779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
544779750d2SKirill A. Shutemov 		if (!inode) {
545779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
546779750d2SKirill A. Shutemov 			removed++;
547779750d2SKirill A. Shutemov 			goto next;
548779750d2SKirill A. Shutemov 		}
549779750d2SKirill A. Shutemov 
550779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
551779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
552779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
553253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
554779750d2SKirill A. Shutemov 			removed++;
555779750d2SKirill A. Shutemov 			goto next;
556779750d2SKirill A. Shutemov 		}
557779750d2SKirill A. Shutemov 
558779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
559779750d2SKirill A. Shutemov next:
560779750d2SKirill A. Shutemov 		if (!--batch)
561779750d2SKirill A. Shutemov 			break;
562779750d2SKirill A. Shutemov 	}
563779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
564779750d2SKirill A. Shutemov 
565253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
566253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
567253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
568253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
569253fd0f0SKirill A. Shutemov 		iput(inode);
570253fd0f0SKirill A. Shutemov 	}
571253fd0f0SKirill A. Shutemov 
572779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
573779750d2SKirill A. Shutemov 		int ret;
574779750d2SKirill A. Shutemov 
575779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
576779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
577779750d2SKirill A. Shutemov 
578b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
579b3cd54b2SKirill A. Shutemov 			goto leave;
580779750d2SKirill A. Shutemov 
581b3cd54b2SKirill A. Shutemov 		page = find_get_page(inode->i_mapping,
582779750d2SKirill A. Shutemov 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
583779750d2SKirill A. Shutemov 		if (!page)
584779750d2SKirill A. Shutemov 			goto drop;
585779750d2SKirill A. Shutemov 
586b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
587779750d2SKirill A. Shutemov 		if (!PageTransHuge(page)) {
588779750d2SKirill A. Shutemov 			put_page(page);
589779750d2SKirill A. Shutemov 			goto drop;
590779750d2SKirill A. Shutemov 		}
591779750d2SKirill A. Shutemov 
592b3cd54b2SKirill A. Shutemov 		/*
593b3cd54b2SKirill A. Shutemov 		 * Leave the inode on the list if we failed to lock
594b3cd54b2SKirill A. Shutemov 		 * the page at this time.
595b3cd54b2SKirill A. Shutemov 		 *
596b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
597b3cd54b2SKirill A. Shutemov 		 * reclaim path.
598b3cd54b2SKirill A. Shutemov 		 */
599b3cd54b2SKirill A. Shutemov 		if (!trylock_page(page)) {
600b3cd54b2SKirill A. Shutemov 			put_page(page);
601b3cd54b2SKirill A. Shutemov 			goto leave;
602b3cd54b2SKirill A. Shutemov 		}
603b3cd54b2SKirill A. Shutemov 
604779750d2SKirill A. Shutemov 		ret = split_huge_page(page);
605779750d2SKirill A. Shutemov 		unlock_page(page);
606779750d2SKirill A. Shutemov 		put_page(page);
607779750d2SKirill A. Shutemov 
608b3cd54b2SKirill A. Shutemov 		/* If split failed leave the inode on the list */
609b3cd54b2SKirill A. Shutemov 		if (ret)
610b3cd54b2SKirill A. Shutemov 			goto leave;
611779750d2SKirill A. Shutemov 
612779750d2SKirill A. Shutemov 		split++;
613779750d2SKirill A. Shutemov drop:
614779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
615779750d2SKirill A. Shutemov 		removed++;
616b3cd54b2SKirill A. Shutemov leave:
617779750d2SKirill A. Shutemov 		iput(inode);
618779750d2SKirill A. Shutemov 	}
619779750d2SKirill A. Shutemov 
620779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
621779750d2SKirill A. Shutemov 	list_splice_tail(&list, &sbinfo->shrinklist);
622779750d2SKirill A. Shutemov 	sbinfo->shrinklist_len -= removed;
623779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
624779750d2SKirill A. Shutemov 
625779750d2SKirill A. Shutemov 	return split;
626779750d2SKirill A. Shutemov }
627779750d2SKirill A. Shutemov 
628779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
629779750d2SKirill A. Shutemov 		struct shrink_control *sc)
630779750d2SKirill A. Shutemov {
631779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
632779750d2SKirill A. Shutemov 
633779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
634779750d2SKirill A. Shutemov 		return SHRINK_STOP;
635779750d2SKirill A. Shutemov 
636779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
637779750d2SKirill A. Shutemov }
638779750d2SKirill A. Shutemov 
639779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
640779750d2SKirill A. Shutemov 		struct shrink_control *sc)
641779750d2SKirill A. Shutemov {
642779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
643779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
644779750d2SKirill A. Shutemov }
645396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
6465a6e75f8SKirill A. Shutemov 
6475a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
6485a6e75f8SKirill A. Shutemov 
649779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
650779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
651779750d2SKirill A. Shutemov {
652779750d2SKirill A. Shutemov 	return 0;
653779750d2SKirill A. Shutemov }
654396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6555a6e75f8SKirill A. Shutemov 
65689fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
65789fdcd26SYang Shi {
658396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
65989fdcd26SYang Shi 	    (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
66089fdcd26SYang Shi 	    shmem_huge != SHMEM_HUGE_DENY)
66189fdcd26SYang Shi 		return true;
66289fdcd26SYang Shi 	return false;
66389fdcd26SYang Shi }
66489fdcd26SYang Shi 
6655a6e75f8SKirill A. Shutemov /*
66646f65ec1SHugh Dickins  * Like add_to_page_cache_locked, but error if expected item has gone.
66746f65ec1SHugh Dickins  */
66846f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page,
66946f65ec1SHugh Dickins 				   struct address_space *mapping,
6703fea5a49SJohannes Weiner 				   pgoff_t index, void *expected, gfp_t gfp,
6713fea5a49SJohannes Weiner 				   struct mm_struct *charge_mm)
67246f65ec1SHugh Dickins {
673552446a4SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
674552446a4SMatthew Wilcox 	unsigned long i = 0;
675d8c6546bSMatthew Wilcox (Oracle) 	unsigned long nr = compound_nr(page);
6763fea5a49SJohannes Weiner 	int error;
67746f65ec1SHugh Dickins 
678800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
679800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
680309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
681309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
682800d8c63SKirill A. Shutemov 	VM_BUG_ON(expected && PageTransHuge(page));
68346f65ec1SHugh Dickins 
684800d8c63SKirill A. Shutemov 	page_ref_add(page, nr);
68546f65ec1SHugh Dickins 	page->mapping = mapping;
68646f65ec1SHugh Dickins 	page->index = index;
68746f65ec1SHugh Dickins 
6884c6355b2SJohannes Weiner 	if (!PageSwapCache(page)) {
689d9eb1ea2SJohannes Weiner 		error = mem_cgroup_charge(page, charge_mm, gfp);
6903fea5a49SJohannes Weiner 		if (error) {
6914c6355b2SJohannes Weiner 			if (PageTransHuge(page)) {
6923fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK);
6933fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
6943fea5a49SJohannes Weiner 			}
6953fea5a49SJohannes Weiner 			goto error;
6963fea5a49SJohannes Weiner 		}
6974c6355b2SJohannes Weiner 	}
6983fea5a49SJohannes Weiner 	cgroup_throttle_swaprate(page, gfp);
6993fea5a49SJohannes Weiner 
700552446a4SMatthew Wilcox 	do {
701552446a4SMatthew Wilcox 		void *entry;
702552446a4SMatthew Wilcox 		xas_lock_irq(&xas);
703552446a4SMatthew Wilcox 		entry = xas_find_conflict(&xas);
704552446a4SMatthew Wilcox 		if (entry != expected)
705552446a4SMatthew Wilcox 			xas_set_err(&xas, -EEXIST);
706552446a4SMatthew Wilcox 		xas_create_range(&xas);
707552446a4SMatthew Wilcox 		if (xas_error(&xas))
708552446a4SMatthew Wilcox 			goto unlock;
709552446a4SMatthew Wilcox next:
7104101196bSMatthew Wilcox (Oracle) 		xas_store(&xas, page);
711552446a4SMatthew Wilcox 		if (++i < nr) {
712552446a4SMatthew Wilcox 			xas_next(&xas);
713552446a4SMatthew Wilcox 			goto next;
714552446a4SMatthew Wilcox 		}
715800d8c63SKirill A. Shutemov 		if (PageTransHuge(page)) {
716800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
71757b2847dSMuchun Song 			__mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
718552446a4SMatthew Wilcox 		}
719552446a4SMatthew Wilcox 		mapping->nrpages += nr;
7200d1c2072SJohannes Weiner 		__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
7210d1c2072SJohannes Weiner 		__mod_lruvec_page_state(page, NR_SHMEM, nr);
722552446a4SMatthew Wilcox unlock:
723552446a4SMatthew Wilcox 		xas_unlock_irq(&xas);
724552446a4SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
725552446a4SMatthew Wilcox 
726552446a4SMatthew Wilcox 	if (xas_error(&xas)) {
7273fea5a49SJohannes Weiner 		error = xas_error(&xas);
7283fea5a49SJohannes Weiner 		goto error;
72946f65ec1SHugh Dickins 	}
730552446a4SMatthew Wilcox 
731552446a4SMatthew Wilcox 	return 0;
7323fea5a49SJohannes Weiner error:
7333fea5a49SJohannes Weiner 	page->mapping = NULL;
7343fea5a49SJohannes Weiner 	page_ref_sub(page, nr);
7353fea5a49SJohannes Weiner 	return error;
73646f65ec1SHugh Dickins }
73746f65ec1SHugh Dickins 
73846f65ec1SHugh Dickins /*
7396922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
7406922c0c7SHugh Dickins  */
7416922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
7426922c0c7SHugh Dickins {
7436922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
7446922c0c7SHugh Dickins 	int error;
7456922c0c7SHugh Dickins 
746800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
747800d8c63SKirill A. Shutemov 
748b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
74962f945b6SMatthew Wilcox 	error = shmem_replace_entry(mapping, page->index, page, radswap);
7506922c0c7SHugh Dickins 	page->mapping = NULL;
7516922c0c7SHugh Dickins 	mapping->nrpages--;
7520d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_FILE_PAGES);
7530d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_SHMEM);
754b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
75509cbfeafSKirill A. Shutemov 	put_page(page);
7566922c0c7SHugh Dickins 	BUG_ON(error);
7576922c0c7SHugh Dickins }
7586922c0c7SHugh Dickins 
7596922c0c7SHugh Dickins /*
760c121d3bbSMatthew Wilcox  * Remove swap entry from page cache, free the swap and its page cache.
7617a5d0fbbSHugh Dickins  */
7627a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
7637a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
7647a5d0fbbSHugh Dickins {
7656dbaf22cSJohannes Weiner 	void *old;
7667a5d0fbbSHugh Dickins 
76755f3f7eaSMatthew Wilcox 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
7686dbaf22cSJohannes Weiner 	if (old != radswap)
7696dbaf22cSJohannes Weiner 		return -ENOENT;
7707a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
7716dbaf22cSJohannes Weiner 	return 0;
7727a5d0fbbSHugh Dickins }
7737a5d0fbbSHugh Dickins 
7747a5d0fbbSHugh Dickins /*
7756a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
77648131e03SVlastimil Babka  * given offsets are swapped out.
7776a15a370SVlastimil Babka  *
778b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
7796a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
7806a15a370SVlastimil Babka  */
78148131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
78248131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
7836a15a370SVlastimil Babka {
7847ae3424fSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
7856a15a370SVlastimil Babka 	struct page *page;
78648131e03SVlastimil Babka 	unsigned long swapped = 0;
7876a15a370SVlastimil Babka 
7886a15a370SVlastimil Babka 	rcu_read_lock();
7897ae3424fSMatthew Wilcox 	xas_for_each(&xas, page, end - 1) {
7907ae3424fSMatthew Wilcox 		if (xas_retry(&xas, page))
7912cf938aaSMatthew Wilcox 			continue;
7923159f943SMatthew Wilcox 		if (xa_is_value(page))
7936a15a370SVlastimil Babka 			swapped++;
7946a15a370SVlastimil Babka 
7956a15a370SVlastimil Babka 		if (need_resched()) {
7967ae3424fSMatthew Wilcox 			xas_pause(&xas);
7976a15a370SVlastimil Babka 			cond_resched_rcu();
7986a15a370SVlastimil Babka 		}
7996a15a370SVlastimil Babka 	}
8006a15a370SVlastimil Babka 
8016a15a370SVlastimil Babka 	rcu_read_unlock();
8026a15a370SVlastimil Babka 
8036a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
8046a15a370SVlastimil Babka }
8056a15a370SVlastimil Babka 
8066a15a370SVlastimil Babka /*
80748131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
80848131e03SVlastimil Babka  * given vma is swapped out.
80948131e03SVlastimil Babka  *
810b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
81148131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
81248131e03SVlastimil Babka  */
81348131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
81448131e03SVlastimil Babka {
81548131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
81648131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
81748131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
81848131e03SVlastimil Babka 	unsigned long swapped;
81948131e03SVlastimil Babka 
82048131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
82148131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
82248131e03SVlastimil Babka 
82348131e03SVlastimil Babka 	/*
82448131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
82548131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
82648131e03SVlastimil Babka 	 * already track.
82748131e03SVlastimil Babka 	 */
82848131e03SVlastimil Babka 	if (!swapped)
82948131e03SVlastimil Babka 		return 0;
83048131e03SVlastimil Babka 
83148131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
83248131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
83348131e03SVlastimil Babka 
83448131e03SVlastimil Babka 	/* Here comes the more involved part */
83548131e03SVlastimil Babka 	return shmem_partial_swap_usage(mapping,
83648131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_start),
83748131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_end));
83848131e03SVlastimil Babka }
83948131e03SVlastimil Babka 
84048131e03SVlastimil Babka /*
84124513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
84224513264SHugh Dickins  */
84324513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
84424513264SHugh Dickins {
84524513264SHugh Dickins 	struct pagevec pvec;
84624513264SHugh Dickins 	pgoff_t index = 0;
84724513264SHugh Dickins 
84886679820SMel Gorman 	pagevec_init(&pvec);
84924513264SHugh Dickins 	/*
85024513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
85124513264SHugh Dickins 	 */
85224513264SHugh Dickins 	while (!mapping_unevictable(mapping)) {
85396888e0aSMatthew Wilcox (Oracle) 		if (!pagevec_lookup(&pvec, mapping, &index))
85424513264SHugh Dickins 			break;
85564e3d12fSKuo-Hsin Yang 		check_move_unevictable_pages(&pvec);
85624513264SHugh Dickins 		pagevec_release(&pvec);
85724513264SHugh Dickins 		cond_resched();
85824513264SHugh Dickins 	}
8597a5d0fbbSHugh Dickins }
8607a5d0fbbSHugh Dickins 
8617a5d0fbbSHugh Dickins /*
86271725ed1SHugh Dickins  * Check whether a hole-punch or truncation needs to split a huge page,
86371725ed1SHugh Dickins  * returning true if no split was required, or the split has been successful.
86471725ed1SHugh Dickins  *
86571725ed1SHugh Dickins  * Eviction (or truncation to 0 size) should never need to split a huge page;
86671725ed1SHugh Dickins  * but in rare cases might do so, if shmem_undo_range() failed to trylock on
86771725ed1SHugh Dickins  * head, and then succeeded to trylock on tail.
86871725ed1SHugh Dickins  *
86971725ed1SHugh Dickins  * A split can only succeed when there are no additional references on the
87071725ed1SHugh Dickins  * huge page: so the split below relies upon find_get_entries() having stopped
87171725ed1SHugh Dickins  * when it found a subpage of the huge page, without getting further references.
87271725ed1SHugh Dickins  */
87371725ed1SHugh Dickins static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
87471725ed1SHugh Dickins {
87571725ed1SHugh Dickins 	if (!PageTransCompound(page))
87671725ed1SHugh Dickins 		return true;
87771725ed1SHugh Dickins 
87871725ed1SHugh Dickins 	/* Just proceed to delete a huge page wholly within the range punched */
87971725ed1SHugh Dickins 	if (PageHead(page) &&
88071725ed1SHugh Dickins 	    page->index >= start && page->index + HPAGE_PMD_NR <= end)
88171725ed1SHugh Dickins 		return true;
88271725ed1SHugh Dickins 
88371725ed1SHugh Dickins 	/* Try to split huge page, so we can truly punch the hole or truncate */
88471725ed1SHugh Dickins 	return split_huge_page(page) >= 0;
88571725ed1SHugh Dickins }
88671725ed1SHugh Dickins 
88771725ed1SHugh Dickins /*
8887f4446eeSMatthew Wilcox  * Remove range of pages and swap entries from page cache, and free them.
8891635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
8907a5d0fbbSHugh Dickins  */
8911635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
8921635f6a7SHugh Dickins 								 bool unfalloc)
8931da177e4SLinus Torvalds {
894285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
8951da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
89609cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
89709cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
89809cbfeafSKirill A. Shutemov 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
89909cbfeafSKirill A. Shutemov 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
900bda97eabSHugh Dickins 	struct pagevec pvec;
9017a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
9027a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
903285b2c4fSHugh Dickins 	pgoff_t index;
904bda97eabSHugh Dickins 	int i;
9051da177e4SLinus Torvalds 
90683e4fa9cSHugh Dickins 	if (lend == -1)
90783e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
908bda97eabSHugh Dickins 
90986679820SMel Gorman 	pagevec_init(&pvec);
910bda97eabSHugh Dickins 	index = start;
9115c211ba2SMatthew Wilcox (Oracle) 	while (index < end && find_lock_entries(mapping, index, end - 1,
9125c211ba2SMatthew Wilcox (Oracle) 			&pvec, indices)) {
913bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
914bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
915bda97eabSHugh Dickins 
9167a5d0fbbSHugh Dickins 			index = indices[i];
917bda97eabSHugh Dickins 
9183159f943SMatthew Wilcox 			if (xa_is_value(page)) {
9191635f6a7SHugh Dickins 				if (unfalloc)
9201635f6a7SHugh Dickins 					continue;
9217a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
9227a5d0fbbSHugh Dickins 								index, page);
9237a5d0fbbSHugh Dickins 				continue;
9247a5d0fbbSHugh Dickins 			}
9255c211ba2SMatthew Wilcox (Oracle) 			index += thp_nr_pages(page) - 1;
9267a5d0fbbSHugh Dickins 
9275c211ba2SMatthew Wilcox (Oracle) 			if (!unfalloc || !PageUptodate(page))
928bda97eabSHugh Dickins 				truncate_inode_page(mapping, page);
929bda97eabSHugh Dickins 			unlock_page(page);
930bda97eabSHugh Dickins 		}
9310cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
93224513264SHugh Dickins 		pagevec_release(&pvec);
933bda97eabSHugh Dickins 		cond_resched();
934bda97eabSHugh Dickins 		index++;
935bda97eabSHugh Dickins 	}
936bda97eabSHugh Dickins 
93783e4fa9cSHugh Dickins 	if (partial_start) {
938bda97eabSHugh Dickins 		struct page *page = NULL;
9399e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, start - 1, &page, SGP_READ);
940bda97eabSHugh Dickins 		if (page) {
94109cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
94283e4fa9cSHugh Dickins 			if (start > end) {
94383e4fa9cSHugh Dickins 				top = partial_end;
94483e4fa9cSHugh Dickins 				partial_end = 0;
94583e4fa9cSHugh Dickins 			}
94683e4fa9cSHugh Dickins 			zero_user_segment(page, partial_start, top);
947bda97eabSHugh Dickins 			set_page_dirty(page);
948bda97eabSHugh Dickins 			unlock_page(page);
94909cbfeafSKirill A. Shutemov 			put_page(page);
950bda97eabSHugh Dickins 		}
951bda97eabSHugh Dickins 	}
95283e4fa9cSHugh Dickins 	if (partial_end) {
95383e4fa9cSHugh Dickins 		struct page *page = NULL;
9549e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, end, &page, SGP_READ);
95583e4fa9cSHugh Dickins 		if (page) {
95683e4fa9cSHugh Dickins 			zero_user_segment(page, 0, partial_end);
95783e4fa9cSHugh Dickins 			set_page_dirty(page);
95883e4fa9cSHugh Dickins 			unlock_page(page);
95909cbfeafSKirill A. Shutemov 			put_page(page);
96083e4fa9cSHugh Dickins 		}
96183e4fa9cSHugh Dickins 	}
96283e4fa9cSHugh Dickins 	if (start >= end)
96383e4fa9cSHugh Dickins 		return;
964bda97eabSHugh Dickins 
965bda97eabSHugh Dickins 	index = start;
966b1a36650SHugh Dickins 	while (index < end) {
967bda97eabSHugh Dickins 		cond_resched();
9680cd6144aSJohannes Weiner 
969cf2039afSMatthew Wilcox (Oracle) 		if (!find_get_entries(mapping, index, end - 1, &pvec,
970cf2039afSMatthew Wilcox (Oracle) 				indices)) {
971b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
972b1a36650SHugh Dickins 			if (index == start || end != -1)
973bda97eabSHugh Dickins 				break;
974b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
975bda97eabSHugh Dickins 			index = start;
976bda97eabSHugh Dickins 			continue;
977bda97eabSHugh Dickins 		}
978bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
979bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
980bda97eabSHugh Dickins 
9817a5d0fbbSHugh Dickins 			index = indices[i];
9823159f943SMatthew Wilcox 			if (xa_is_value(page)) {
9831635f6a7SHugh Dickins 				if (unfalloc)
9841635f6a7SHugh Dickins 					continue;
985b1a36650SHugh Dickins 				if (shmem_free_swap(mapping, index, page)) {
986b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
987b1a36650SHugh Dickins 					index--;
988b1a36650SHugh Dickins 					break;
989b1a36650SHugh Dickins 				}
990b1a36650SHugh Dickins 				nr_swaps_freed++;
9917a5d0fbbSHugh Dickins 				continue;
9927a5d0fbbSHugh Dickins 			}
9937a5d0fbbSHugh Dickins 
994bda97eabSHugh Dickins 			lock_page(page);
995800d8c63SKirill A. Shutemov 
9961635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
99771725ed1SHugh Dickins 				if (page_mapping(page) != mapping) {
998b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
999b1a36650SHugh Dickins 					unlock_page(page);
1000b1a36650SHugh Dickins 					index--;
1001b1a36650SHugh Dickins 					break;
10027a5d0fbbSHugh Dickins 				}
100371725ed1SHugh Dickins 				VM_BUG_ON_PAGE(PageWriteback(page), page);
100471725ed1SHugh Dickins 				if (shmem_punch_compound(page, start, end))
100571725ed1SHugh Dickins 					truncate_inode_page(mapping, page);
10060783ac95SHugh Dickins 				else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
100771725ed1SHugh Dickins 					/* Wipe the page and don't get stuck */
100871725ed1SHugh Dickins 					clear_highpage(page);
100971725ed1SHugh Dickins 					flush_dcache_page(page);
101071725ed1SHugh Dickins 					set_page_dirty(page);
101171725ed1SHugh Dickins 					if (index <
101271725ed1SHugh Dickins 					    round_up(start, HPAGE_PMD_NR))
101371725ed1SHugh Dickins 						start = index + 1;
101471725ed1SHugh Dickins 				}
10151635f6a7SHugh Dickins 			}
1016bda97eabSHugh Dickins 			unlock_page(page);
1017bda97eabSHugh Dickins 		}
10180cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
101924513264SHugh Dickins 		pagevec_release(&pvec);
1020bda97eabSHugh Dickins 		index++;
1021bda97eabSHugh Dickins 	}
102294c1e62dSHugh Dickins 
10234595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
10247a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
10251da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
10264595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
10271635f6a7SHugh Dickins }
10281da177e4SLinus Torvalds 
10291635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
10301635f6a7SHugh Dickins {
10311635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
1032078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
10331da177e4SLinus Torvalds }
103494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
10351da177e4SLinus Torvalds 
1036549c7297SChristian Brauner static int shmem_getattr(struct user_namespace *mnt_userns,
1037549c7297SChristian Brauner 			 const struct path *path, struct kstat *stat,
1038a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
103944a30220SYu Zhao {
1040a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
104144a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
104289fdcd26SYang Shi 	struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
104344a30220SYu Zhao 
1044d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
10454595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
104644a30220SYu Zhao 		shmem_recalc_inode(inode);
10474595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1048d0424c42SHugh Dickins 	}
10490d56a451SChristian Brauner 	generic_fillattr(&init_user_ns, inode, stat);
105089fdcd26SYang Shi 
105189fdcd26SYang Shi 	if (is_huge_enabled(sb_info))
105289fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
105389fdcd26SYang Shi 
105444a30220SYu Zhao 	return 0;
105544a30220SYu Zhao }
105644a30220SYu Zhao 
1057549c7297SChristian Brauner static int shmem_setattr(struct user_namespace *mnt_userns,
1058549c7297SChristian Brauner 			 struct dentry *dentry, struct iattr *attr)
10591da177e4SLinus Torvalds {
106075c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
106140e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
1062779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
10631da177e4SLinus Torvalds 	int error;
10641da177e4SLinus Torvalds 
10652f221d6fSChristian Brauner 	error = setattr_prepare(&init_user_ns, dentry, attr);
1066db78b877SChristoph Hellwig 	if (error)
1067db78b877SChristoph Hellwig 		return error;
1068db78b877SChristoph Hellwig 
106994c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
107094c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
107194c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
10723889e6e7Snpiggin@suse.de 
107340e041a2SDavid Herrmann 		/* protected by i_mutex */
107440e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
107540e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
107640e041a2SDavid Herrmann 			return -EPERM;
107740e041a2SDavid Herrmann 
107894c1e62dSHugh Dickins 		if (newsize != oldsize) {
107977142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
108077142517SKonstantin Khlebnikov 					oldsize, newsize);
108177142517SKonstantin Khlebnikov 			if (error)
108277142517SKonstantin Khlebnikov 				return error;
108394c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1084078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
108594c1e62dSHugh Dickins 		}
1086afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
108794c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1088d0424c42SHugh Dickins 			if (oldsize > holebegin)
1089d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1090d0424c42SHugh Dickins 							holebegin, 0, 1);
1091d0424c42SHugh Dickins 			if (info->alloced)
1092d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1093d0424c42SHugh Dickins 							newsize, (loff_t)-1);
109494c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1095d0424c42SHugh Dickins 			if (oldsize > holebegin)
1096d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1097d0424c42SHugh Dickins 							holebegin, 0, 1);
1098779750d2SKirill A. Shutemov 
1099779750d2SKirill A. Shutemov 			/*
1100779750d2SKirill A. Shutemov 			 * Part of the huge page can be beyond i_size: subject
1101779750d2SKirill A. Shutemov 			 * to shrink under memory pressure.
1102779750d2SKirill A. Shutemov 			 */
1103396bcc52SMatthew Wilcox (Oracle) 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1104779750d2SKirill A. Shutemov 				spin_lock(&sbinfo->shrinklist_lock);
1105d041353dSCong Wang 				/*
1106d041353dSCong Wang 				 * _careful to defend against unlocked access to
1107d041353dSCong Wang 				 * ->shrink_list in shmem_unused_huge_shrink()
1108d041353dSCong Wang 				 */
1109d041353dSCong Wang 				if (list_empty_careful(&info->shrinklist)) {
1110779750d2SKirill A. Shutemov 					list_add_tail(&info->shrinklist,
1111779750d2SKirill A. Shutemov 							&sbinfo->shrinklist);
1112779750d2SKirill A. Shutemov 					sbinfo->shrinklist_len++;
1113779750d2SKirill A. Shutemov 				}
1114779750d2SKirill A. Shutemov 				spin_unlock(&sbinfo->shrinklist_lock);
1115779750d2SKirill A. Shutemov 			}
111694c1e62dSHugh Dickins 		}
11171da177e4SLinus Torvalds 	}
11181da177e4SLinus Torvalds 
11192f221d6fSChristian Brauner 	setattr_copy(&init_user_ns, inode, attr);
1120db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1121e65ce2a5SChristian Brauner 		error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
11221da177e4SLinus Torvalds 	return error;
11231da177e4SLinus Torvalds }
11241da177e4SLinus Torvalds 
11251f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
11261da177e4SLinus Torvalds {
11271da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1128779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
11291da177e4SLinus Torvalds 
113030e6a51dSHui Su 	if (shmem_mapping(inode->i_mapping)) {
11311da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
11321da177e4SLinus Torvalds 		inode->i_size = 0;
11333889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1134779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1135779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1136779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1137779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1138779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1139779750d2SKirill A. Shutemov 			}
1140779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1141779750d2SKirill A. Shutemov 		}
1142af53d3e9SHugh Dickins 		while (!list_empty(&info->swaplist)) {
1143af53d3e9SHugh Dickins 			/* Wait while shmem_unuse() is scanning this inode... */
1144af53d3e9SHugh Dickins 			wait_var_event(&info->stop_eviction,
1145af53d3e9SHugh Dickins 				       !atomic_read(&info->stop_eviction));
1146cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
1147af53d3e9SHugh Dickins 			/* ...but beware of the race if we peeked too early */
1148af53d3e9SHugh Dickins 			if (!atomic_read(&info->stop_eviction))
11491da177e4SLinus Torvalds 				list_del_init(&info->swaplist);
1150cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
11511da177e4SLinus Torvalds 		}
11523ed47db3SAl Viro 	}
1153b09e0fa4SEric Paris 
115438f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
11550f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
11565b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1157dbd5768fSJan Kara 	clear_inode(inode);
11581da177e4SLinus Torvalds }
11591da177e4SLinus Torvalds 
1160b56a2d8aSVineeth Remanan Pillai extern struct swap_info_struct *swap_info[];
1161b56a2d8aSVineeth Remanan Pillai 
1162b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1163b56a2d8aSVineeth Remanan Pillai 				   pgoff_t start, unsigned int nr_entries,
1164b56a2d8aSVineeth Remanan Pillai 				   struct page **entries, pgoff_t *indices,
116587039546SHugh Dickins 				   unsigned int type, bool frontswap)
1166478922e2SMatthew Wilcox {
1167b56a2d8aSVineeth Remanan Pillai 	XA_STATE(xas, &mapping->i_pages, start);
1168b56a2d8aSVineeth Remanan Pillai 	struct page *page;
116987039546SHugh Dickins 	swp_entry_t entry;
1170b56a2d8aSVineeth Remanan Pillai 	unsigned int ret = 0;
1171b56a2d8aSVineeth Remanan Pillai 
1172b56a2d8aSVineeth Remanan Pillai 	if (!nr_entries)
1173b56a2d8aSVineeth Remanan Pillai 		return 0;
1174478922e2SMatthew Wilcox 
1175478922e2SMatthew Wilcox 	rcu_read_lock();
1176b56a2d8aSVineeth Remanan Pillai 	xas_for_each(&xas, page, ULONG_MAX) {
1177b56a2d8aSVineeth Remanan Pillai 		if (xas_retry(&xas, page))
11785b9c98f3SMike Kravetz 			continue;
1179b56a2d8aSVineeth Remanan Pillai 
1180b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1181478922e2SMatthew Wilcox 			continue;
1182b56a2d8aSVineeth Remanan Pillai 
118387039546SHugh Dickins 		entry = radix_to_swp_entry(page);
118487039546SHugh Dickins 		if (swp_type(entry) != type)
1185b56a2d8aSVineeth Remanan Pillai 			continue;
118687039546SHugh Dickins 		if (frontswap &&
118787039546SHugh Dickins 		    !frontswap_test(swap_info[type], swp_offset(entry)))
118887039546SHugh Dickins 			continue;
1189b56a2d8aSVineeth Remanan Pillai 
1190b56a2d8aSVineeth Remanan Pillai 		indices[ret] = xas.xa_index;
1191b56a2d8aSVineeth Remanan Pillai 		entries[ret] = page;
1192b56a2d8aSVineeth Remanan Pillai 
1193b56a2d8aSVineeth Remanan Pillai 		if (need_resched()) {
1194e21a2955SMatthew Wilcox 			xas_pause(&xas);
1195478922e2SMatthew Wilcox 			cond_resched_rcu();
1196478922e2SMatthew Wilcox 		}
1197b56a2d8aSVineeth Remanan Pillai 		if (++ret == nr_entries)
1198b56a2d8aSVineeth Remanan Pillai 			break;
1199b56a2d8aSVineeth Remanan Pillai 	}
1200478922e2SMatthew Wilcox 	rcu_read_unlock();
1201e21a2955SMatthew Wilcox 
1202b56a2d8aSVineeth Remanan Pillai 	return ret;
1203b56a2d8aSVineeth Remanan Pillai }
1204b56a2d8aSVineeth Remanan Pillai 
1205b56a2d8aSVineeth Remanan Pillai /*
1206b56a2d8aSVineeth Remanan Pillai  * Move the swapped pages for an inode to page cache. Returns the count
1207b56a2d8aSVineeth Remanan Pillai  * of pages swapped in, or the error in case of failure.
1208b56a2d8aSVineeth Remanan Pillai  */
1209b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1210b56a2d8aSVineeth Remanan Pillai 				    pgoff_t *indices)
1211b56a2d8aSVineeth Remanan Pillai {
1212b56a2d8aSVineeth Remanan Pillai 	int i = 0;
1213b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
1214b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1215b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1216b56a2d8aSVineeth Remanan Pillai 
1217b56a2d8aSVineeth Remanan Pillai 	for (i = 0; i < pvec.nr; i++) {
1218b56a2d8aSVineeth Remanan Pillai 		struct page *page = pvec.pages[i];
1219b56a2d8aSVineeth Remanan Pillai 
1220b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1221b56a2d8aSVineeth Remanan Pillai 			continue;
1222b56a2d8aSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, indices[i],
1223b56a2d8aSVineeth Remanan Pillai 					  &page, SGP_CACHE,
1224b56a2d8aSVineeth Remanan Pillai 					  mapping_gfp_mask(mapping),
1225b56a2d8aSVineeth Remanan Pillai 					  NULL, NULL);
1226b56a2d8aSVineeth Remanan Pillai 		if (error == 0) {
1227b56a2d8aSVineeth Remanan Pillai 			unlock_page(page);
1228b56a2d8aSVineeth Remanan Pillai 			put_page(page);
1229b56a2d8aSVineeth Remanan Pillai 			ret++;
1230b56a2d8aSVineeth Remanan Pillai 		}
1231b56a2d8aSVineeth Remanan Pillai 		if (error == -ENOMEM)
1232b56a2d8aSVineeth Remanan Pillai 			break;
1233b56a2d8aSVineeth Remanan Pillai 		error = 0;
1234b56a2d8aSVineeth Remanan Pillai 	}
1235b56a2d8aSVineeth Remanan Pillai 	return error ? error : ret;
1236478922e2SMatthew Wilcox }
1237478922e2SMatthew Wilcox 
123846f65ec1SHugh Dickins /*
123946f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
124046f65ec1SHugh Dickins  */
1241b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1242b56a2d8aSVineeth Remanan Pillai 			     bool frontswap, unsigned long *fs_pages_to_unuse)
12431da177e4SLinus Torvalds {
1244b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1245b56a2d8aSVineeth Remanan Pillai 	pgoff_t start = 0;
1246b56a2d8aSVineeth Remanan Pillai 	struct pagevec pvec;
1247b56a2d8aSVineeth Remanan Pillai 	pgoff_t indices[PAGEVEC_SIZE];
1248b56a2d8aSVineeth Remanan Pillai 	bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1249b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
12501da177e4SLinus Torvalds 
1251b56a2d8aSVineeth Remanan Pillai 	pagevec_init(&pvec);
1252b56a2d8aSVineeth Remanan Pillai 	do {
1253b56a2d8aSVineeth Remanan Pillai 		unsigned int nr_entries = PAGEVEC_SIZE;
12542e0e26c7SHugh Dickins 
1255b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1256b56a2d8aSVineeth Remanan Pillai 			nr_entries = *fs_pages_to_unuse;
12572e0e26c7SHugh Dickins 
1258b56a2d8aSVineeth Remanan Pillai 		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1259b56a2d8aSVineeth Remanan Pillai 						  pvec.pages, indices,
126087039546SHugh Dickins 						  type, frontswap);
1261b56a2d8aSVineeth Remanan Pillai 		if (pvec.nr == 0) {
1262b56a2d8aSVineeth Remanan Pillai 			ret = 0;
1263778dd893SHugh Dickins 			break;
1264b56a2d8aSVineeth Remanan Pillai 		}
1265b56a2d8aSVineeth Remanan Pillai 
1266b56a2d8aSVineeth Remanan Pillai 		ret = shmem_unuse_swap_entries(inode, pvec, indices);
1267b56a2d8aSVineeth Remanan Pillai 		if (ret < 0)
1268b56a2d8aSVineeth Remanan Pillai 			break;
1269b56a2d8aSVineeth Remanan Pillai 
1270b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial) {
1271b56a2d8aSVineeth Remanan Pillai 			*fs_pages_to_unuse -= ret;
1272b56a2d8aSVineeth Remanan Pillai 			if (*fs_pages_to_unuse == 0) {
1273b56a2d8aSVineeth Remanan Pillai 				ret = FRONTSWAP_PAGES_UNUSED;
1274b56a2d8aSVineeth Remanan Pillai 				break;
1275b56a2d8aSVineeth Remanan Pillai 			}
1276b56a2d8aSVineeth Remanan Pillai 		}
1277b56a2d8aSVineeth Remanan Pillai 
1278b56a2d8aSVineeth Remanan Pillai 		start = indices[pvec.nr - 1];
1279b56a2d8aSVineeth Remanan Pillai 	} while (true);
1280b56a2d8aSVineeth Remanan Pillai 
1281b56a2d8aSVineeth Remanan Pillai 	return ret;
1282b56a2d8aSVineeth Remanan Pillai }
1283b56a2d8aSVineeth Remanan Pillai 
1284b56a2d8aSVineeth Remanan Pillai /*
1285b56a2d8aSVineeth Remanan Pillai  * Read all the shared memory data that resides in the swap
1286b56a2d8aSVineeth Remanan Pillai  * device 'type' back into memory, so the swap device can be
1287b56a2d8aSVineeth Remanan Pillai  * unused.
1288b56a2d8aSVineeth Remanan Pillai  */
1289b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
1290b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
1291b56a2d8aSVineeth Remanan Pillai {
1292b56a2d8aSVineeth Remanan Pillai 	struct shmem_inode_info *info, *next;
1293b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1294b56a2d8aSVineeth Remanan Pillai 
1295b56a2d8aSVineeth Remanan Pillai 	if (list_empty(&shmem_swaplist))
1296b56a2d8aSVineeth Remanan Pillai 		return 0;
1297b56a2d8aSVineeth Remanan Pillai 
1298b56a2d8aSVineeth Remanan Pillai 	mutex_lock(&shmem_swaplist_mutex);
1299b56a2d8aSVineeth Remanan Pillai 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1300b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped) {
1301b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1302b56a2d8aSVineeth Remanan Pillai 			continue;
1303b56a2d8aSVineeth Remanan Pillai 		}
1304af53d3e9SHugh Dickins 		/*
1305af53d3e9SHugh Dickins 		 * Drop the swaplist mutex while searching the inode for swap;
1306af53d3e9SHugh Dickins 		 * but before doing so, make sure shmem_evict_inode() will not
1307af53d3e9SHugh Dickins 		 * remove placeholder inode from swaplist, nor let it be freed
1308af53d3e9SHugh Dickins 		 * (igrab() would protect from unlink, but not from unmount).
1309af53d3e9SHugh Dickins 		 */
1310af53d3e9SHugh Dickins 		atomic_inc(&info->stop_eviction);
1311b56a2d8aSVineeth Remanan Pillai 		mutex_unlock(&shmem_swaplist_mutex);
1312b56a2d8aSVineeth Remanan Pillai 
1313af53d3e9SHugh Dickins 		error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1314b56a2d8aSVineeth Remanan Pillai 					  fs_pages_to_unuse);
1315b56a2d8aSVineeth Remanan Pillai 		cond_resched();
1316b56a2d8aSVineeth Remanan Pillai 
1317b56a2d8aSVineeth Remanan Pillai 		mutex_lock(&shmem_swaplist_mutex);
1318b56a2d8aSVineeth Remanan Pillai 		next = list_next_entry(info, swaplist);
1319b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped)
1320b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1321af53d3e9SHugh Dickins 		if (atomic_dec_and_test(&info->stop_eviction))
1322af53d3e9SHugh Dickins 			wake_up_var(&info->stop_eviction);
1323b56a2d8aSVineeth Remanan Pillai 		if (error)
1324b56a2d8aSVineeth Remanan Pillai 			break;
13251da177e4SLinus Torvalds 	}
1326cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1327778dd893SHugh Dickins 
1328778dd893SHugh Dickins 	return error;
13291da177e4SLinus Torvalds }
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds /*
13321da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
13331da177e4SLinus Torvalds  */
13341da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
13351da177e4SLinus Torvalds {
13361da177e4SLinus Torvalds 	struct shmem_inode_info *info;
13371da177e4SLinus Torvalds 	struct address_space *mapping;
13381da177e4SLinus Torvalds 	struct inode *inode;
13396922c0c7SHugh Dickins 	swp_entry_t swap;
13406922c0c7SHugh Dickins 	pgoff_t index;
13411da177e4SLinus Torvalds 
1342800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
13431da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
13441da177e4SLinus Torvalds 	mapping = page->mapping;
13451da177e4SLinus Torvalds 	index = page->index;
13461da177e4SLinus Torvalds 	inode = mapping->host;
13471da177e4SLinus Torvalds 	info = SHMEM_I(inode);
13481da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
13491da177e4SLinus Torvalds 		goto redirty;
1350d9fe526aSHugh Dickins 	if (!total_swap_pages)
13511da177e4SLinus Torvalds 		goto redirty;
13521da177e4SLinus Torvalds 
1353d9fe526aSHugh Dickins 	/*
135497b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
135597b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
135697b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
135797b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
135897b713baSChristoph Hellwig 	 * threads or sync.
1359d9fe526aSHugh Dickins 	 */
136048f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
136148f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
136248f170fbSHugh Dickins 		goto redirty;
136348f170fbSHugh Dickins 	}
13641635f6a7SHugh Dickins 
13651635f6a7SHugh Dickins 	/*
13661635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13671635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
13681635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
13691aac1400SHugh Dickins 	 *
13701aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
13711aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
13721aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
13731aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
13741aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
13751635f6a7SHugh Dickins 	 */
13761635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
13771aac1400SHugh Dickins 		if (inode->i_private) {
13781aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
13791aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
13801aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
13811aac1400SHugh Dickins 			if (shmem_falloc &&
13828e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
13831aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
13841aac1400SHugh Dickins 			    index < shmem_falloc->next)
13851aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
13861aac1400SHugh Dickins 			else
13871aac1400SHugh Dickins 				shmem_falloc = NULL;
13881aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
13891aac1400SHugh Dickins 			if (shmem_falloc)
13901aac1400SHugh Dickins 				goto redirty;
13911aac1400SHugh Dickins 		}
13921635f6a7SHugh Dickins 		clear_highpage(page);
13931635f6a7SHugh Dickins 		flush_dcache_page(page);
13941635f6a7SHugh Dickins 		SetPageUptodate(page);
13951635f6a7SHugh Dickins 	}
13961635f6a7SHugh Dickins 
139738d8b4e6SHuang Ying 	swap = get_swap_page(page);
139848f170fbSHugh Dickins 	if (!swap.val)
139948f170fbSHugh Dickins 		goto redirty;
1400d9fe526aSHugh Dickins 
1401b1dea800SHugh Dickins 	/*
1402b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
14036922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
14046922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1405b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
14066922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
14076922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1408b1dea800SHugh Dickins 	 */
1409b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
141005bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
1411b56a2d8aSVineeth Remanan Pillai 		list_add(&info->swaplist, &shmem_swaplist);
1412b1dea800SHugh Dickins 
14134afab1cdSYang Shi 	if (add_to_swap_cache(page, swap,
14143852f676SJoonsoo Kim 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
14153852f676SJoonsoo Kim 			NULL) == 0) {
14164595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1417267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1418267a4c76SHugh Dickins 		info->swapped++;
14194595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1420267a4c76SHugh Dickins 
1421aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
14226922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
14236922c0c7SHugh Dickins 
14246922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1425d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
14269fab5619SHugh Dickins 		swap_writepage(page, wbc);
14271da177e4SLinus Torvalds 		return 0;
14281da177e4SLinus Torvalds 	}
14291da177e4SLinus Torvalds 
14306922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
143175f6d6d2SMinchan Kim 	put_swap_page(page, swap);
14321da177e4SLinus Torvalds redirty:
14331da177e4SLinus Torvalds 	set_page_dirty(page);
1434d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1435d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1436d9fe526aSHugh Dickins 	unlock_page(page);
1437d9fe526aSHugh Dickins 	return 0;
14381da177e4SLinus Torvalds }
14391da177e4SLinus Torvalds 
144075edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
144171fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1442680d794bSakpm@linux-foundation.org {
1443680d794bSakpm@linux-foundation.org 	char buffer[64];
1444680d794bSakpm@linux-foundation.org 
144571fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1446095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1447095f1fc4SLee Schermerhorn 
1448a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1449095f1fc4SLee Schermerhorn 
1450095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1451680d794bSakpm@linux-foundation.org }
145271fe804bSLee Schermerhorn 
145371fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
145471fe804bSLee Schermerhorn {
145571fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
145671fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
1457*bf11b9a8SSebastian Andrzej Siewior 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
145871fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
145971fe804bSLee Schermerhorn 		mpol_get(mpol);
1460*bf11b9a8SSebastian Andrzej Siewior 		raw_spin_unlock(&sbinfo->stat_lock);
146171fe804bSLee Schermerhorn 	}
146271fe804bSLee Schermerhorn 	return mpol;
146371fe804bSLee Schermerhorn }
146475edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
146575edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
146675edd345SHugh Dickins {
146775edd345SHugh Dickins }
146875edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
146975edd345SHugh Dickins {
147075edd345SHugh Dickins 	return NULL;
147175edd345SHugh Dickins }
147275edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
147375edd345SHugh Dickins #ifndef CONFIG_NUMA
147475edd345SHugh Dickins #define vm_policy vm_private_data
147575edd345SHugh Dickins #endif
1476680d794bSakpm@linux-foundation.org 
1477800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1478800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1479800d8c63SKirill A. Shutemov {
1480800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
14812c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1482800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1483800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1484800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1485800d8c63SKirill A. Shutemov }
1486800d8c63SKirill A. Shutemov 
1487800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1488800d8c63SKirill A. Shutemov {
1489800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1490800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1491800d8c63SKirill A. Shutemov }
1492800d8c63SKirill A. Shutemov 
149341ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
149441ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
14951da177e4SLinus Torvalds {
14961da177e4SLinus Torvalds 	struct vm_area_struct pvma;
149718a2f371SMel Gorman 	struct page *page;
14988c63ca5bSWill Deacon 	struct vm_fault vmf = {
14998c63ca5bSWill Deacon 		.vma = &pvma,
15008c63ca5bSWill Deacon 	};
15011da177e4SLinus Torvalds 
1502800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1503e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1504800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
150518a2f371SMel Gorman 
1506800d8c63SKirill A. Shutemov 	return page;
1507800d8c63SKirill A. Shutemov }
150818a2f371SMel Gorman 
150978cc8cdcSRik van Riel /*
151078cc8cdcSRik van Riel  * Make sure huge_gfp is always more limited than limit_gfp.
151178cc8cdcSRik van Riel  * Some of the flags set permissions, while others set limitations.
151278cc8cdcSRik van Riel  */
151378cc8cdcSRik van Riel static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
151478cc8cdcSRik van Riel {
151578cc8cdcSRik van Riel 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
151678cc8cdcSRik van Riel 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1517187df5ddSRik van Riel 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1518187df5ddSRik van Riel 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1519187df5ddSRik van Riel 
1520187df5ddSRik van Riel 	/* Allow allocations only from the originally specified zones. */
1521187df5ddSRik van Riel 	result |= zoneflags;
152278cc8cdcSRik van Riel 
152378cc8cdcSRik van Riel 	/*
152478cc8cdcSRik van Riel 	 * Minimize the result gfp by taking the union with the deny flags,
152578cc8cdcSRik van Riel 	 * and the intersection of the allow flags.
152678cc8cdcSRik van Riel 	 */
152778cc8cdcSRik van Riel 	result |= (limit_gfp & denyflags);
152878cc8cdcSRik van Riel 	result |= (huge_gfp & limit_gfp) & allowflags;
152978cc8cdcSRik van Riel 
153078cc8cdcSRik van Riel 	return result;
153178cc8cdcSRik van Riel }
153278cc8cdcSRik van Riel 
1533800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp,
1534800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1535800d8c63SKirill A. Shutemov {
1536800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
15377b8d046fSMatthew Wilcox 	struct address_space *mapping = info->vfs_inode.i_mapping;
15387b8d046fSMatthew Wilcox 	pgoff_t hindex;
1539800d8c63SKirill A. Shutemov 	struct page *page;
1540800d8c63SKirill A. Shutemov 
15414620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
15427b8d046fSMatthew Wilcox 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
15437b8d046fSMatthew Wilcox 								XA_PRESENT))
1544800d8c63SKirill A. Shutemov 		return NULL;
1545800d8c63SKirill A. Shutemov 
1546800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1547164cc4feSRik van Riel 	page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(),
1548164cc4feSRik van Riel 			       true);
1549800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1550800d8c63SKirill A. Shutemov 	if (page)
1551800d8c63SKirill A. Shutemov 		prep_transhuge_page(page);
1552dcdf11eeSDavid Rientjes 	else
1553dcdf11eeSDavid Rientjes 		count_vm_event(THP_FILE_FALLBACK);
155418a2f371SMel Gorman 	return page;
155518a2f371SMel Gorman }
155618a2f371SMel Gorman 
155718a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp,
155818a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
155918a2f371SMel Gorman {
156018a2f371SMel Gorman 	struct vm_area_struct pvma;
156118a2f371SMel Gorman 	struct page *page;
156218a2f371SMel Gorman 
1563800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1564800d8c63SKirill A. Shutemov 	page = alloc_page_vma(gfp, &pvma, 0);
1565800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
156618a2f371SMel Gorman 
1567800d8c63SKirill A. Shutemov 	return page;
1568800d8c63SKirill A. Shutemov }
1569800d8c63SKirill A. Shutemov 
1570800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
15710f079694SMike Rapoport 		struct inode *inode,
1572800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1573800d8c63SKirill A. Shutemov {
15740f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
1575800d8c63SKirill A. Shutemov 	struct page *page;
1576800d8c63SKirill A. Shutemov 	int nr;
1577800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1578800d8c63SKirill A. Shutemov 
1579396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1580800d8c63SKirill A. Shutemov 		huge = false;
1581800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1582800d8c63SKirill A. Shutemov 
15830f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1584800d8c63SKirill A. Shutemov 		goto failed;
1585800d8c63SKirill A. Shutemov 
1586800d8c63SKirill A. Shutemov 	if (huge)
1587800d8c63SKirill A. Shutemov 		page = shmem_alloc_hugepage(gfp, info, index);
1588800d8c63SKirill A. Shutemov 	else
1589800d8c63SKirill A. Shutemov 		page = shmem_alloc_page(gfp, info, index);
159075edd345SHugh Dickins 	if (page) {
159175edd345SHugh Dickins 		__SetPageLocked(page);
159275edd345SHugh Dickins 		__SetPageSwapBacked(page);
1593800d8c63SKirill A. Shutemov 		return page;
159475edd345SHugh Dickins 	}
159518a2f371SMel Gorman 
1596800d8c63SKirill A. Shutemov 	err = -ENOMEM;
15970f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1598800d8c63SKirill A. Shutemov failed:
1599800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
16001da177e4SLinus Torvalds }
160171fe804bSLee Schermerhorn 
16021da177e4SLinus Torvalds /*
1603bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1604bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1605bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1606bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1607bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1608bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1609bde05d1cSHugh Dickins  *
1610bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1611bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1612bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1613bde05d1cSHugh Dickins  */
1614bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1615bde05d1cSHugh Dickins {
1616bde05d1cSHugh Dickins 	return page_zonenum(page) > gfp_zone(gfp);
1617bde05d1cSHugh Dickins }
1618bde05d1cSHugh Dickins 
1619bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1620bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1621bde05d1cSHugh Dickins {
1622bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
1623bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1624c1cb20d4SYu Zhao 	swp_entry_t entry;
1625bde05d1cSHugh Dickins 	pgoff_t swap_index;
1626bde05d1cSHugh Dickins 	int error;
1627bde05d1cSHugh Dickins 
1628bde05d1cSHugh Dickins 	oldpage = *pagep;
1629c1cb20d4SYu Zhao 	entry.val = page_private(oldpage);
1630c1cb20d4SYu Zhao 	swap_index = swp_offset(entry);
1631bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1632bde05d1cSHugh Dickins 
1633bde05d1cSHugh Dickins 	/*
1634bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1635bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1636bde05d1cSHugh Dickins 	 */
1637bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1638bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1639bde05d1cSHugh Dickins 	if (!newpage)
1640bde05d1cSHugh Dickins 		return -ENOMEM;
1641bde05d1cSHugh Dickins 
164209cbfeafSKirill A. Shutemov 	get_page(newpage);
1643bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
16440142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1645bde05d1cSHugh Dickins 
16469956edf3SHugh Dickins 	__SetPageLocked(newpage);
16479956edf3SHugh Dickins 	__SetPageSwapBacked(newpage);
1648bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1649c1cb20d4SYu Zhao 	set_page_private(newpage, entry.val);
1650bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1651bde05d1cSHugh Dickins 
1652bde05d1cSHugh Dickins 	/*
1653bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1654bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1655bde05d1cSHugh Dickins 	 */
1656b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
165762f945b6SMatthew Wilcox 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
16580142ef6cSHugh Dickins 	if (!error) {
16590d1c2072SJohannes Weiner 		mem_cgroup_migrate(oldpage, newpage);
16600d1c2072SJohannes Weiner 		__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
16610d1c2072SJohannes Weiner 		__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
16620142ef6cSHugh Dickins 	}
1663b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1664bde05d1cSHugh Dickins 
16650142ef6cSHugh Dickins 	if (unlikely(error)) {
16660142ef6cSHugh Dickins 		/*
16670142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
16680142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
16690142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
16700142ef6cSHugh Dickins 		 */
16710142ef6cSHugh Dickins 		oldpage = newpage;
16720142ef6cSHugh Dickins 	} else {
16736058eaecSJohannes Weiner 		lru_cache_add(newpage);
16740142ef6cSHugh Dickins 		*pagep = newpage;
16750142ef6cSHugh Dickins 	}
1676bde05d1cSHugh Dickins 
1677bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1678bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1679bde05d1cSHugh Dickins 
1680bde05d1cSHugh Dickins 	unlock_page(oldpage);
168109cbfeafSKirill A. Shutemov 	put_page(oldpage);
168209cbfeafSKirill A. Shutemov 	put_page(oldpage);
16830142ef6cSHugh Dickins 	return error;
1684bde05d1cSHugh Dickins }
1685bde05d1cSHugh Dickins 
1686bde05d1cSHugh Dickins /*
1687c5bf121eSVineeth Remanan Pillai  * Swap in the page pointed to by *pagep.
1688c5bf121eSVineeth Remanan Pillai  * Caller has to make sure that *pagep contains a valid swapped page.
1689c5bf121eSVineeth Remanan Pillai  * Returns 0 and the page in pagep if success. On failure, returns the
1690af44c12fSRandy Dunlap  * error code and NULL in *pagep.
16911da177e4SLinus Torvalds  */
1692c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1693c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
1694c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
16952b740303SSouptick Joarder 			     vm_fault_t *fault_type)
16961da177e4SLinus Torvalds {
16971da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
169823f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
169904f94e3fSDan Schatzberg 	struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1700b1e1ef34SYang Shi 	struct page *page;
17011da177e4SLinus Torvalds 	swp_entry_t swap;
17021da177e4SLinus Torvalds 	int error;
17031da177e4SLinus Torvalds 
1704c5bf121eSVineeth Remanan Pillai 	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1705c5bf121eSVineeth Remanan Pillai 	swap = radix_to_swp_entry(*pagep);
1706c5bf121eSVineeth Remanan Pillai 	*pagep = NULL;
170754af6042SHugh Dickins 
17081da177e4SLinus Torvalds 	/* Look it up and read it in.. */
1709ec560175SHuang Ying 	page = lookup_swap_cache(swap, NULL, 0);
171027ab7006SHugh Dickins 	if (!page) {
17119e18eb29SAndres Lagar-Cavilla 		/* Or update major stats only when swapin succeeds?? */
17129e18eb29SAndres Lagar-Cavilla 		if (fault_type) {
171368da9f05SHugh Dickins 			*fault_type |= VM_FAULT_MAJOR;
17149e18eb29SAndres Lagar-Cavilla 			count_vm_event(PGMAJFAULT);
17152262185cSRoman Gushchin 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
17169e18eb29SAndres Lagar-Cavilla 		}
17179e18eb29SAndres Lagar-Cavilla 		/* Here we actually start the io */
171841ffe5d5SHugh Dickins 		page = shmem_swapin(swap, gfp, info, index);
171927ab7006SHugh Dickins 		if (!page) {
17201da177e4SLinus Torvalds 			error = -ENOMEM;
172154af6042SHugh Dickins 			goto failed;
1722285b2c4fSHugh Dickins 		}
17231da177e4SLinus Torvalds 	}
17241da177e4SLinus Torvalds 
17251da177e4SLinus Torvalds 	/* We have to do this with page locked to prevent races */
172654af6042SHugh Dickins 	lock_page(page);
17270142ef6cSHugh Dickins 	if (!PageSwapCache(page) || page_private(page) != swap.val ||
1728d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap)) {
1729c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1730d1899228SHugh Dickins 		goto unlock;
1731bde05d1cSHugh Dickins 	}
173227ab7006SHugh Dickins 	if (!PageUptodate(page)) {
17331da177e4SLinus Torvalds 		error = -EIO;
173454af6042SHugh Dickins 		goto failed;
173554af6042SHugh Dickins 	}
173654af6042SHugh Dickins 	wait_on_page_writeback(page);
173754af6042SHugh Dickins 
17388a84802eSSteven Price 	/*
17398a84802eSSteven Price 	 * Some architectures may have to restore extra metadata to the
17408a84802eSSteven Price 	 * physical page after reading from swap.
17418a84802eSSteven Price 	 */
17428a84802eSSteven Price 	arch_swap_restore(swap, page);
17438a84802eSSteven Price 
1744bde05d1cSHugh Dickins 	if (shmem_should_replace_page(page, gfp)) {
1745bde05d1cSHugh Dickins 		error = shmem_replace_page(&page, gfp, info, index);
1746bde05d1cSHugh Dickins 		if (error)
174754af6042SHugh Dickins 			goto failed;
17481da177e4SLinus Torvalds 	}
17491da177e4SLinus Torvalds 
17503fea5a49SJohannes Weiner 	error = shmem_add_to_page_cache(page, mapping, index,
17513fea5a49SJohannes Weiner 					swp_to_radix_entry(swap), gfp,
17523fea5a49SJohannes Weiner 					charge_mm);
175354af6042SHugh Dickins 	if (error)
175454af6042SHugh Dickins 		goto failed;
175554af6042SHugh Dickins 
17564595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
175754af6042SHugh Dickins 	info->swapped--;
175854af6042SHugh Dickins 	shmem_recalc_inode(inode);
17594595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
176027ab7006SHugh Dickins 
176166d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
176266d2f4d2SHugh Dickins 		mark_page_accessed(page);
176366d2f4d2SHugh Dickins 
176427ab7006SHugh Dickins 	delete_from_swap_cache(page);
176527ab7006SHugh Dickins 	set_page_dirty(page);
176627ab7006SHugh Dickins 	swap_free(swap);
176727ab7006SHugh Dickins 
1768c5bf121eSVineeth Remanan Pillai 	*pagep = page;
1769c5bf121eSVineeth Remanan Pillai 	return 0;
1770c5bf121eSVineeth Remanan Pillai failed:
1771c5bf121eSVineeth Remanan Pillai 	if (!shmem_confirm_swap(mapping, index, swap))
1772c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1773c5bf121eSVineeth Remanan Pillai unlock:
1774c5bf121eSVineeth Remanan Pillai 	if (page) {
1775c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1776c5bf121eSVineeth Remanan Pillai 		put_page(page);
1777c5bf121eSVineeth Remanan Pillai 	}
1778c5bf121eSVineeth Remanan Pillai 
1779c5bf121eSVineeth Remanan Pillai 	return error;
1780c5bf121eSVineeth Remanan Pillai }
1781c5bf121eSVineeth Remanan Pillai 
1782c5bf121eSVineeth Remanan Pillai /*
1783c5bf121eSVineeth Remanan Pillai  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1784c5bf121eSVineeth Remanan Pillai  *
1785c5bf121eSVineeth Remanan Pillai  * If we allocate a new one we do not mark it dirty. That's up to the
1786c5bf121eSVineeth Remanan Pillai  * vm. If we swap it in we mark it dirty since we also free the swap
1787c5bf121eSVineeth Remanan Pillai  * entry since a page cannot live in both the swap and page cache.
1788c5bf121eSVineeth Remanan Pillai  *
1789c949b097SAxel Rasmussen  * vma, vmf, and fault_type are only supplied by shmem_fault:
1790c5bf121eSVineeth Remanan Pillai  * otherwise they are NULL.
1791c5bf121eSVineeth Remanan Pillai  */
1792c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1793c5bf121eSVineeth Remanan Pillai 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1794c5bf121eSVineeth Remanan Pillai 	struct vm_area_struct *vma, struct vm_fault *vmf,
1795c5bf121eSVineeth Remanan Pillai 			vm_fault_t *fault_type)
1796c5bf121eSVineeth Remanan Pillai {
1797c5bf121eSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1798c5bf121eSVineeth Remanan Pillai 	struct shmem_inode_info *info = SHMEM_I(inode);
1799c5bf121eSVineeth Remanan Pillai 	struct shmem_sb_info *sbinfo;
1800c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm;
1801c5bf121eSVineeth Remanan Pillai 	struct page *page;
1802c5bf121eSVineeth Remanan Pillai 	enum sgp_type sgp_huge = sgp;
1803c5bf121eSVineeth Remanan Pillai 	pgoff_t hindex = index;
1804164cc4feSRik van Riel 	gfp_t huge_gfp;
1805c5bf121eSVineeth Remanan Pillai 	int error;
1806c5bf121eSVineeth Remanan Pillai 	int once = 0;
1807c5bf121eSVineeth Remanan Pillai 	int alloced = 0;
1808c5bf121eSVineeth Remanan Pillai 
1809c5bf121eSVineeth Remanan Pillai 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1810c5bf121eSVineeth Remanan Pillai 		return -EFBIG;
1811c5bf121eSVineeth Remanan Pillai 	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1812c5bf121eSVineeth Remanan Pillai 		sgp = SGP_CACHE;
1813c5bf121eSVineeth Remanan Pillai repeat:
1814c5bf121eSVineeth Remanan Pillai 	if (sgp <= SGP_CACHE &&
1815c5bf121eSVineeth Remanan Pillai 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1816c5bf121eSVineeth Remanan Pillai 		return -EINVAL;
1817c5bf121eSVineeth Remanan Pillai 	}
1818c5bf121eSVineeth Remanan Pillai 
1819c5bf121eSVineeth Remanan Pillai 	sbinfo = SHMEM_SB(inode->i_sb);
182004f94e3fSDan Schatzberg 	charge_mm = vma ? vma->vm_mm : NULL;
1821c5bf121eSVineeth Remanan Pillai 
182244835d20SMatthew Wilcox (Oracle) 	page = pagecache_get_page(mapping, index,
182344835d20SMatthew Wilcox (Oracle) 					FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
1824c949b097SAxel Rasmussen 
1825c949b097SAxel Rasmussen 	if (page && vma && userfaultfd_minor(vma)) {
1826c949b097SAxel Rasmussen 		if (!xa_is_value(page)) {
1827c949b097SAxel Rasmussen 			unlock_page(page);
1828c949b097SAxel Rasmussen 			put_page(page);
1829c949b097SAxel Rasmussen 		}
1830c949b097SAxel Rasmussen 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1831c949b097SAxel Rasmussen 		return 0;
1832c949b097SAxel Rasmussen 	}
1833c949b097SAxel Rasmussen 
1834c5bf121eSVineeth Remanan Pillai 	if (xa_is_value(page)) {
1835c5bf121eSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, index, &page,
1836c5bf121eSVineeth Remanan Pillai 					  sgp, gfp, vma, fault_type);
1837c5bf121eSVineeth Remanan Pillai 		if (error == -EEXIST)
1838c5bf121eSVineeth Remanan Pillai 			goto repeat;
1839c5bf121eSVineeth Remanan Pillai 
1840c5bf121eSVineeth Remanan Pillai 		*pagep = page;
1841c5bf121eSVineeth Remanan Pillai 		return error;
1842c5bf121eSVineeth Remanan Pillai 	}
1843c5bf121eSVineeth Remanan Pillai 
184463ec1973SMatthew Wilcox (Oracle) 	if (page)
184563ec1973SMatthew Wilcox (Oracle) 		hindex = page->index;
1846c5bf121eSVineeth Remanan Pillai 	if (page && sgp == SGP_WRITE)
1847c5bf121eSVineeth Remanan Pillai 		mark_page_accessed(page);
1848c5bf121eSVineeth Remanan Pillai 
1849c5bf121eSVineeth Remanan Pillai 	/* fallocated page? */
1850c5bf121eSVineeth Remanan Pillai 	if (page && !PageUptodate(page)) {
1851c5bf121eSVineeth Remanan Pillai 		if (sgp != SGP_READ)
1852c5bf121eSVineeth Remanan Pillai 			goto clear;
1853c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1854c5bf121eSVineeth Remanan Pillai 		put_page(page);
1855c5bf121eSVineeth Remanan Pillai 		page = NULL;
185663ec1973SMatthew Wilcox (Oracle) 		hindex = index;
1857c5bf121eSVineeth Remanan Pillai 	}
185863ec1973SMatthew Wilcox (Oracle) 	if (page || sgp == SGP_READ)
185963ec1973SMatthew Wilcox (Oracle) 		goto out;
1860c5bf121eSVineeth Remanan Pillai 
1861c5bf121eSVineeth Remanan Pillai 	/*
1862c5bf121eSVineeth Remanan Pillai 	 * Fast cache lookup did not find it:
1863c5bf121eSVineeth Remanan Pillai 	 * bring it back from swap or allocate.
1864c5bf121eSVineeth Remanan Pillai 	 */
1865c5bf121eSVineeth Remanan Pillai 
1866cfda0526SMike Rapoport 	if (vma && userfaultfd_missing(vma)) {
1867cfda0526SMike Rapoport 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1868cfda0526SMike Rapoport 		return 0;
1869cfda0526SMike Rapoport 	}
1870cfda0526SMike Rapoport 
1871800d8c63SKirill A. Shutemov 	/* shmem_symlink() */
187230e6a51dSHui Su 	if (!shmem_mapping(mapping))
1873800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1874657e3038SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1875800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1876800d8c63SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
1877800d8c63SKirill A. Shutemov 		goto alloc_huge;
1878800d8c63SKirill A. Shutemov 	switch (sbinfo->huge) {
1879800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
1880800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
188127d80fa2SKees Cook 	case SHMEM_HUGE_WITHIN_SIZE: {
188227d80fa2SKees Cook 		loff_t i_size;
188327d80fa2SKees Cook 		pgoff_t off;
188427d80fa2SKees Cook 
1885800d8c63SKirill A. Shutemov 		off = round_up(index, HPAGE_PMD_NR);
1886800d8c63SKirill A. Shutemov 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1887800d8c63SKirill A. Shutemov 		if (i_size >= HPAGE_PMD_SIZE &&
1888800d8c63SKirill A. Shutemov 		    i_size >> PAGE_SHIFT >= off)
1889800d8c63SKirill A. Shutemov 			goto alloc_huge;
189027d80fa2SKees Cook 
189127d80fa2SKees Cook 		fallthrough;
189227d80fa2SKees Cook 	}
1893800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
1894657e3038SKirill A. Shutemov 		if (sgp_huge == SGP_HUGE)
1895657e3038SKirill A. Shutemov 			goto alloc_huge;
1896657e3038SKirill A. Shutemov 		/* TODO: implement fadvise() hints */
1897800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
189859a16eadSHugh Dickins 	}
18991da177e4SLinus Torvalds 
1900800d8c63SKirill A. Shutemov alloc_huge:
1901164cc4feSRik van Riel 	huge_gfp = vma_thp_gfp_mask(vma);
190278cc8cdcSRik van Riel 	huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1903164cc4feSRik van Riel 	page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
1904800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1905c5bf121eSVineeth Remanan Pillai alloc_nohuge:
1906c5bf121eSVineeth Remanan Pillai 		page = shmem_alloc_and_acct_page(gfp, inode,
1907800d8c63SKirill A. Shutemov 						 index, false);
190854af6042SHugh Dickins 	}
1909800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1910779750d2SKirill A. Shutemov 		int retry = 5;
1911c5bf121eSVineeth Remanan Pillai 
1912800d8c63SKirill A. Shutemov 		error = PTR_ERR(page);
1913800d8c63SKirill A. Shutemov 		page = NULL;
1914779750d2SKirill A. Shutemov 		if (error != -ENOSPC)
1915c5bf121eSVineeth Remanan Pillai 			goto unlock;
1916779750d2SKirill A. Shutemov 		/*
1917c5bf121eSVineeth Remanan Pillai 		 * Try to reclaim some space by splitting a huge page
1918779750d2SKirill A. Shutemov 		 * beyond i_size on the filesystem.
1919779750d2SKirill A. Shutemov 		 */
1920779750d2SKirill A. Shutemov 		while (retry--) {
1921779750d2SKirill A. Shutemov 			int ret;
1922c5bf121eSVineeth Remanan Pillai 
1923779750d2SKirill A. Shutemov 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1924779750d2SKirill A. Shutemov 			if (ret == SHRINK_STOP)
1925779750d2SKirill A. Shutemov 				break;
1926779750d2SKirill A. Shutemov 			if (ret)
1927779750d2SKirill A. Shutemov 				goto alloc_nohuge;
1928779750d2SKirill A. Shutemov 		}
1929c5bf121eSVineeth Remanan Pillai 		goto unlock;
1930800d8c63SKirill A. Shutemov 	}
1931800d8c63SKirill A. Shutemov 
1932800d8c63SKirill A. Shutemov 	if (PageTransHuge(page))
1933800d8c63SKirill A. Shutemov 		hindex = round_down(index, HPAGE_PMD_NR);
1934800d8c63SKirill A. Shutemov 	else
1935800d8c63SKirill A. Shutemov 		hindex = index;
1936800d8c63SKirill A. Shutemov 
193766d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1938eb39d618SHugh Dickins 		__SetPageReferenced(page);
193966d2f4d2SHugh Dickins 
1940800d8c63SKirill A. Shutemov 	error = shmem_add_to_page_cache(page, mapping, hindex,
19413fea5a49SJohannes Weiner 					NULL, gfp & GFP_RECLAIM_MASK,
19423fea5a49SJohannes Weiner 					charge_mm);
19433fea5a49SJohannes Weiner 	if (error)
1944800d8c63SKirill A. Shutemov 		goto unacct;
19456058eaecSJohannes Weiner 	lru_cache_add(page);
194654af6042SHugh Dickins 
19474595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
1948d8c6546bSMatthew Wilcox (Oracle) 	info->alloced += compound_nr(page);
1949800d8c63SKirill A. Shutemov 	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
195054af6042SHugh Dickins 	shmem_recalc_inode(inode);
19514595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
19521635f6a7SHugh Dickins 	alloced = true;
195354af6042SHugh Dickins 
1954779750d2SKirill A. Shutemov 	if (PageTransHuge(page) &&
1955779750d2SKirill A. Shutemov 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1956779750d2SKirill A. Shutemov 			hindex + HPAGE_PMD_NR - 1) {
1957779750d2SKirill A. Shutemov 		/*
1958779750d2SKirill A. Shutemov 		 * Part of the huge page is beyond i_size: subject
1959779750d2SKirill A. Shutemov 		 * to shrink under memory pressure.
1960779750d2SKirill A. Shutemov 		 */
1961779750d2SKirill A. Shutemov 		spin_lock(&sbinfo->shrinklist_lock);
1962d041353dSCong Wang 		/*
1963d041353dSCong Wang 		 * _careful to defend against unlocked access to
1964d041353dSCong Wang 		 * ->shrink_list in shmem_unused_huge_shrink()
1965d041353dSCong Wang 		 */
1966d041353dSCong Wang 		if (list_empty_careful(&info->shrinklist)) {
1967779750d2SKirill A. Shutemov 			list_add_tail(&info->shrinklist,
1968779750d2SKirill A. Shutemov 				      &sbinfo->shrinklist);
1969779750d2SKirill A. Shutemov 			sbinfo->shrinklist_len++;
1970779750d2SKirill A. Shutemov 		}
1971779750d2SKirill A. Shutemov 		spin_unlock(&sbinfo->shrinklist_lock);
1972779750d2SKirill A. Shutemov 	}
1973779750d2SKirill A. Shutemov 
1974ec9516fbSHugh Dickins 	/*
19751635f6a7SHugh Dickins 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
19761635f6a7SHugh Dickins 	 */
19771635f6a7SHugh Dickins 	if (sgp == SGP_FALLOC)
19781635f6a7SHugh Dickins 		sgp = SGP_WRITE;
19791635f6a7SHugh Dickins clear:
19801635f6a7SHugh Dickins 	/*
19811635f6a7SHugh Dickins 	 * Let SGP_WRITE caller clear ends if write does not fill page;
19821635f6a7SHugh Dickins 	 * but SGP_FALLOC on a page fallocated earlier must initialize
19831635f6a7SHugh Dickins 	 * it now, lest undo on failure cancel our earlier guarantee.
1984ec9516fbSHugh Dickins 	 */
1985800d8c63SKirill A. Shutemov 	if (sgp != SGP_WRITE && !PageUptodate(page)) {
1986800d8c63SKirill A. Shutemov 		int i;
1987800d8c63SKirill A. Shutemov 
198863ec1973SMatthew Wilcox (Oracle) 		for (i = 0; i < compound_nr(page); i++) {
198963ec1973SMatthew Wilcox (Oracle) 			clear_highpage(page + i);
199063ec1973SMatthew Wilcox (Oracle) 			flush_dcache_page(page + i);
1991800d8c63SKirill A. Shutemov 		}
199263ec1973SMatthew Wilcox (Oracle) 		SetPageUptodate(page);
1993ec9516fbSHugh Dickins 	}
1994bde05d1cSHugh Dickins 
199554af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
199675edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
199709cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1998267a4c76SHugh Dickins 		if (alloced) {
1999267a4c76SHugh Dickins 			ClearPageDirty(page);
2000267a4c76SHugh Dickins 			delete_from_page_cache(page);
20014595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
2002267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
20034595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
2004267a4c76SHugh Dickins 		}
200554af6042SHugh Dickins 		error = -EINVAL;
2006267a4c76SHugh Dickins 		goto unlock;
2007ff36b801SShaohua Li 	}
200863ec1973SMatthew Wilcox (Oracle) out:
2009800d8c63SKirill A. Shutemov 	*pagep = page + index - hindex;
201054af6042SHugh Dickins 	return 0;
2011d00806b1SNick Piggin 
2012d0217ac0SNick Piggin 	/*
201354af6042SHugh Dickins 	 * Error recovery.
20141da177e4SLinus Torvalds 	 */
201554af6042SHugh Dickins unacct:
2016d8c6546bSMatthew Wilcox (Oracle) 	shmem_inode_unacct_blocks(inode, compound_nr(page));
2017800d8c63SKirill A. Shutemov 
2018800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
2019800d8c63SKirill A. Shutemov 		unlock_page(page);
2020800d8c63SKirill A. Shutemov 		put_page(page);
2021800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
2022800d8c63SKirill A. Shutemov 	}
2023d1899228SHugh Dickins unlock:
202427ab7006SHugh Dickins 	if (page) {
202554af6042SHugh Dickins 		unlock_page(page);
202609cbfeafSKirill A. Shutemov 		put_page(page);
202754af6042SHugh Dickins 	}
202854af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
20294595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
203054af6042SHugh Dickins 		shmem_recalc_inode(inode);
20314595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
20321da177e4SLinus Torvalds 		goto repeat;
2033d8dc74f2SAdrian Bunk 	}
20347f4446eeSMatthew Wilcox 	if (error == -EEXIST)
203554af6042SHugh Dickins 		goto repeat;
203654af6042SHugh Dickins 	return error;
20371da177e4SLinus Torvalds }
20381da177e4SLinus Torvalds 
203910d20bd2SLinus Torvalds /*
204010d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
204110d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
204210d20bd2SLinus Torvalds  * target.
204310d20bd2SLinus Torvalds  */
2044ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
204510d20bd2SLinus Torvalds {
204610d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
20472055da97SIngo Molnar 	list_del_init(&wait->entry);
204810d20bd2SLinus Torvalds 	return ret;
204910d20bd2SLinus Torvalds }
205010d20bd2SLinus Torvalds 
205120acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
20521da177e4SLinus Torvalds {
205311bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
2054496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
20559e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2056657e3038SKirill A. Shutemov 	enum sgp_type sgp;
205720acce67SSouptick Joarder 	int err;
205820acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
20591da177e4SLinus Torvalds 
2060f00cdc6dSHugh Dickins 	/*
2061f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
2062f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
2063f00cdc6dSHugh Dickins 	 * locks writers out with its hold on i_mutex.  So refrain from
20648e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
20658e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
20668e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
20678e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
20688e205f77SHugh Dickins 	 *
20698e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
20708e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
20718e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
20728e205f77SHugh Dickins 	 *
20738e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
20748e205f77SHugh Dickins 	 * standard mutex or completion: but we cannot take i_mutex in fault,
20758e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
2076f00cdc6dSHugh Dickins 	 */
2077f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
2078f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
2079f00cdc6dSHugh Dickins 
2080f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2081f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
20828e205f77SHugh Dickins 		if (shmem_falloc &&
20838e205f77SHugh Dickins 		    shmem_falloc->waitq &&
20848e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
20858e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
20868897c1b1SKirill A. Shutemov 			struct file *fpin;
20878e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
208810d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
20898e205f77SHugh Dickins 
20908e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
20918897c1b1SKirill A. Shutemov 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
20928897c1b1SKirill A. Shutemov 			if (fpin)
20938e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
20948e205f77SHugh Dickins 
20958e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
20968e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
20978e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
20988e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20998e205f77SHugh Dickins 			schedule();
21008e205f77SHugh Dickins 
21018e205f77SHugh Dickins 			/*
21028e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
21038e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
21048e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
21058e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
21068e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
21078e205f77SHugh Dickins 			 */
21088e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
21098e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
21108e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
21118897c1b1SKirill A. Shutemov 
21128897c1b1SKirill A. Shutemov 			if (fpin)
21138897c1b1SKirill A. Shutemov 				fput(fpin);
21148e205f77SHugh Dickins 			return ret;
2115f00cdc6dSHugh Dickins 		}
21168e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2117f00cdc6dSHugh Dickins 	}
2118f00cdc6dSHugh Dickins 
2119657e3038SKirill A. Shutemov 	sgp = SGP_CACHE;
212018600332SMichal Hocko 
212118600332SMichal Hocko 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
212218600332SMichal Hocko 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2123657e3038SKirill A. Shutemov 		sgp = SGP_NOHUGE;
212418600332SMichal Hocko 	else if (vma->vm_flags & VM_HUGEPAGE)
212518600332SMichal Hocko 		sgp = SGP_HUGE;
2126657e3038SKirill A. Shutemov 
212720acce67SSouptick Joarder 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2128cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
212920acce67SSouptick Joarder 	if (err)
213020acce67SSouptick Joarder 		return vmf_error(err);
213168da9f05SHugh Dickins 	return ret;
21321da177e4SLinus Torvalds }
21331da177e4SLinus Torvalds 
2134c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2135c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2136c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2137c01d5b30SHugh Dickins {
2138c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2139c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2140c01d5b30SHugh Dickins 	unsigned long addr;
2141c01d5b30SHugh Dickins 	unsigned long offset;
2142c01d5b30SHugh Dickins 	unsigned long inflated_len;
2143c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2144c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2145c01d5b30SHugh Dickins 
2146c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2147c01d5b30SHugh Dickins 		return -ENOMEM;
2148c01d5b30SHugh Dickins 
2149c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2150c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2151c01d5b30SHugh Dickins 
2152396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2153c01d5b30SHugh Dickins 		return addr;
2154c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2155c01d5b30SHugh Dickins 		return addr;
2156c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2157c01d5b30SHugh Dickins 		return addr;
2158c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2159c01d5b30SHugh Dickins 		return addr;
2160c01d5b30SHugh Dickins 
2161c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2162c01d5b30SHugh Dickins 		return addr;
2163c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2164c01d5b30SHugh Dickins 		return addr;
2165c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2166c01d5b30SHugh Dickins 		return addr;
2167c01d5b30SHugh Dickins 	/*
2168c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2169c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
217099158997SKirill A. Shutemov 	 * But if caller specified an address hint and we allocated area there
217199158997SKirill A. Shutemov 	 * successfully, respect that as before.
2172c01d5b30SHugh Dickins 	 */
217399158997SKirill A. Shutemov 	if (uaddr == addr)
2174c01d5b30SHugh Dickins 		return addr;
2175c01d5b30SHugh Dickins 
2176c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2177c01d5b30SHugh Dickins 		struct super_block *sb;
2178c01d5b30SHugh Dickins 
2179c01d5b30SHugh Dickins 		if (file) {
2180c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2181c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2182c01d5b30SHugh Dickins 		} else {
2183c01d5b30SHugh Dickins 			/*
2184c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2185c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2186c01d5b30SHugh Dickins 			 */
2187c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2188c01d5b30SHugh Dickins 				return addr;
2189c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2190c01d5b30SHugh Dickins 		}
21913089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2192c01d5b30SHugh Dickins 			return addr;
2193c01d5b30SHugh Dickins 	}
2194c01d5b30SHugh Dickins 
2195c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2196c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2197c01d5b30SHugh Dickins 		return addr;
2198c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2199c01d5b30SHugh Dickins 		return addr;
2200c01d5b30SHugh Dickins 
2201c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2202c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2203c01d5b30SHugh Dickins 		return addr;
2204c01d5b30SHugh Dickins 	if (inflated_len < len)
2205c01d5b30SHugh Dickins 		return addr;
2206c01d5b30SHugh Dickins 
220799158997SKirill A. Shutemov 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2208c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2209c01d5b30SHugh Dickins 		return addr;
2210c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2211c01d5b30SHugh Dickins 		return addr;
2212c01d5b30SHugh Dickins 
2213c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2214c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2215c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2216c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2217c01d5b30SHugh Dickins 
2218c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2219c01d5b30SHugh Dickins 		return addr;
2220c01d5b30SHugh Dickins 	return inflated_addr;
2221c01d5b30SHugh Dickins }
2222c01d5b30SHugh Dickins 
22231da177e4SLinus Torvalds #ifdef CONFIG_NUMA
222441ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
22251da177e4SLinus Torvalds {
2226496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
222741ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
22281da177e4SLinus Torvalds }
22291da177e4SLinus Torvalds 
2230d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2231d8dc74f2SAdrian Bunk 					  unsigned long addr)
22321da177e4SLinus Torvalds {
2233496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
223441ffe5d5SHugh Dickins 	pgoff_t index;
22351da177e4SLinus Torvalds 
223641ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
223741ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
22381da177e4SLinus Torvalds }
22391da177e4SLinus Torvalds #endif
22401da177e4SLinus Torvalds 
2241d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
22421da177e4SLinus Torvalds {
2243496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
22441da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
22451da177e4SLinus Torvalds 	int retval = -ENOMEM;
22461da177e4SLinus Torvalds 
2247ea0dfeb4SHugh Dickins 	/*
2248ea0dfeb4SHugh Dickins 	 * What serializes the accesses to info->flags?
2249ea0dfeb4SHugh Dickins 	 * ipc_lock_object() when called from shmctl_do_lock(),
2250ea0dfeb4SHugh Dickins 	 * no serialization needed when called from shm_destroy().
2251ea0dfeb4SHugh Dickins 	 */
22521da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
2253d7c9e99aSAlexey Gladkov 		if (!user_shm_lock(inode->i_size, ucounts))
22541da177e4SLinus Torvalds 			goto out_nomem;
22551da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
225689e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
22571da177e4SLinus Torvalds 	}
2258d7c9e99aSAlexey Gladkov 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2259d7c9e99aSAlexey Gladkov 		user_shm_unlock(inode->i_size, ucounts);
22601da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
226189e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
22621da177e4SLinus Torvalds 	}
22631da177e4SLinus Torvalds 	retval = 0;
226489e004eaSLee Schermerhorn 
22651da177e4SLinus Torvalds out_nomem:
22661da177e4SLinus Torvalds 	return retval;
22671da177e4SLinus Torvalds }
22681da177e4SLinus Torvalds 
22699b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
22701da177e4SLinus Torvalds {
2271ab3948f5SJoel Fernandes (Google) 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
227222247efdSPeter Xu 	int ret;
2273ab3948f5SJoel Fernandes (Google) 
227422247efdSPeter Xu 	ret = seal_check_future_write(info->seals, vma);
227522247efdSPeter Xu 	if (ret)
227622247efdSPeter Xu 		return ret;
2277ab3948f5SJoel Fernandes (Google) 
227851b0bff2SCatalin Marinas 	/* arm64 - allow memory tagging on RAM-based files */
227951b0bff2SCatalin Marinas 	vma->vm_flags |= VM_MTE_ALLOWED;
228051b0bff2SCatalin Marinas 
22811da177e4SLinus Torvalds 	file_accessed(file);
22821da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
2283396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2284f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2285f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
2286f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
2287f3f0e1d2SKirill A. Shutemov 	}
22881da177e4SLinus Torvalds 	return 0;
22891da177e4SLinus Torvalds }
22901da177e4SLinus Torvalds 
2291454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
229209208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
22931da177e4SLinus Torvalds {
22941da177e4SLinus Torvalds 	struct inode *inode;
22951da177e4SLinus Torvalds 	struct shmem_inode_info *info;
22961da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2297e809d5f0SChris Down 	ino_t ino;
22981da177e4SLinus Torvalds 
2299e809d5f0SChris Down 	if (shmem_reserve_inode(sb, &ino))
23001da177e4SLinus Torvalds 		return NULL;
23011da177e4SLinus Torvalds 
23021da177e4SLinus Torvalds 	inode = new_inode(sb);
23031da177e4SLinus Torvalds 	if (inode) {
2304e809d5f0SChris Down 		inode->i_ino = ino;
230521cb47beSChristian Brauner 		inode_init_owner(&init_user_ns, inode, dir, mode);
23061da177e4SLinus Torvalds 		inode->i_blocks = 0;
2307078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
230846c9a946SArnd Bergmann 		inode->i_generation = prandom_u32();
23091da177e4SLinus Torvalds 		info = SHMEM_I(inode);
23101da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
23111da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
2312af53d3e9SHugh Dickins 		atomic_set(&info->stop_eviction, 0);
231340e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
23140b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2315779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
23161da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
231738f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
231872c04902SAl Viro 		cache_no_acl(inode);
23191da177e4SLinus Torvalds 
23201da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
23211da177e4SLinus Torvalds 		default:
232239f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
23231da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
23241da177e4SLinus Torvalds 			break;
23251da177e4SLinus Torvalds 		case S_IFREG:
232614fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
23271da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
23281da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
232971fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
233071fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
23311da177e4SLinus Torvalds 			break;
23321da177e4SLinus Torvalds 		case S_IFDIR:
2333d8c76e6fSDave Hansen 			inc_nlink(inode);
23341da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
23351da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
23361da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
23371da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
23381da177e4SLinus Torvalds 			break;
23391da177e4SLinus Torvalds 		case S_IFLNK:
23401da177e4SLinus Torvalds 			/*
23411da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
23421da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
23431da177e4SLinus Torvalds 			 */
234471fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
23451da177e4SLinus Torvalds 			break;
23461da177e4SLinus Torvalds 		}
2347b45d71fbSJoel Fernandes (Google) 
2348b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
23495b04c689SPavel Emelyanov 	} else
23505b04c689SPavel Emelyanov 		shmem_free_inode(sb);
23511da177e4SLinus Torvalds 	return inode;
23521da177e4SLinus Torvalds }
23531da177e4SLinus Torvalds 
23543460f6e5SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
23553460f6e5SAxel Rasmussen int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
23564c27fe4cSMike Rapoport 			   pmd_t *dst_pmd,
23574c27fe4cSMike Rapoport 			   struct vm_area_struct *dst_vma,
23584c27fe4cSMike Rapoport 			   unsigned long dst_addr,
23594c27fe4cSMike Rapoport 			   unsigned long src_addr,
23608d103963SMike Rapoport 			   bool zeropage,
23614c27fe4cSMike Rapoport 			   struct page **pagep)
23624c27fe4cSMike Rapoport {
23634c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
23644c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
23654c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
23664c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
23674c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
23684c27fe4cSMike Rapoport 	void *page_kaddr;
23694c27fe4cSMike Rapoport 	struct page *page;
23704c27fe4cSMike Rapoport 	int ret;
23713460f6e5SAxel Rasmussen 	pgoff_t max_off;
23724c27fe4cSMike Rapoport 
23737ed9d238SAxel Rasmussen 	if (!shmem_inode_acct_block(inode, 1)) {
23747ed9d238SAxel Rasmussen 		/*
23757ed9d238SAxel Rasmussen 		 * We may have got a page, returned -ENOENT triggering a retry,
23767ed9d238SAxel Rasmussen 		 * and now we find ourselves with -ENOMEM. Release the page, to
23777ed9d238SAxel Rasmussen 		 * avoid a BUG_ON in our caller.
23787ed9d238SAxel Rasmussen 		 */
23797ed9d238SAxel Rasmussen 		if (unlikely(*pagep)) {
23807ed9d238SAxel Rasmussen 			put_page(*pagep);
23817ed9d238SAxel Rasmussen 			*pagep = NULL;
23827ed9d238SAxel Rasmussen 		}
23837d64ae3aSAxel Rasmussen 		return -ENOMEM;
23847ed9d238SAxel Rasmussen 	}
23854c27fe4cSMike Rapoport 
2386cb658a45SAndrea Arcangeli 	if (!*pagep) {
23877d64ae3aSAxel Rasmussen 		ret = -ENOMEM;
23884c27fe4cSMike Rapoport 		page = shmem_alloc_page(gfp, info, pgoff);
23894c27fe4cSMike Rapoport 		if (!page)
23900f079694SMike Rapoport 			goto out_unacct_blocks;
23914c27fe4cSMike Rapoport 
23923460f6e5SAxel Rasmussen 		if (!zeropage) {	/* COPY */
23934c27fe4cSMike Rapoport 			page_kaddr = kmap_atomic(page);
23948d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
23958d103963SMike Rapoport 					     (const void __user *)src_addr,
23964c27fe4cSMike Rapoport 					     PAGE_SIZE);
23974c27fe4cSMike Rapoport 			kunmap_atomic(page_kaddr);
23984c27fe4cSMike Rapoport 
2399c1e8d7c6SMichel Lespinasse 			/* fallback to copy_from_user outside mmap_lock */
24004c27fe4cSMike Rapoport 			if (unlikely(ret)) {
24014c27fe4cSMike Rapoport 				*pagep = page;
24027d64ae3aSAxel Rasmussen 				ret = -ENOENT;
24034c27fe4cSMike Rapoport 				/* don't free the page */
24047d64ae3aSAxel Rasmussen 				goto out_unacct_blocks;
24054c27fe4cSMike Rapoport 			}
24063460f6e5SAxel Rasmussen 		} else {		/* ZEROPAGE */
24078d103963SMike Rapoport 			clear_highpage(page);
24088d103963SMike Rapoport 		}
24094c27fe4cSMike Rapoport 	} else {
24104c27fe4cSMike Rapoport 		page = *pagep;
24114c27fe4cSMike Rapoport 		*pagep = NULL;
24124c27fe4cSMike Rapoport 	}
24134c27fe4cSMike Rapoport 
24143460f6e5SAxel Rasmussen 	VM_BUG_ON(PageLocked(page));
24153460f6e5SAxel Rasmussen 	VM_BUG_ON(PageSwapBacked(page));
24169cc90c66SAndrea Arcangeli 	__SetPageLocked(page);
24179cc90c66SAndrea Arcangeli 	__SetPageSwapBacked(page);
2418a425d358SAndrea Arcangeli 	__SetPageUptodate(page);
24199cc90c66SAndrea Arcangeli 
2420e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2421e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
24223460f6e5SAxel Rasmussen 	if (unlikely(pgoff >= max_off))
2423e2a50c1fSAndrea Arcangeli 		goto out_release;
2424e2a50c1fSAndrea Arcangeli 
24253fea5a49SJohannes Weiner 	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
24263fea5a49SJohannes Weiner 				      gfp & GFP_RECLAIM_MASK, dst_mm);
24274c27fe4cSMike Rapoport 	if (ret)
24284c27fe4cSMike Rapoport 		goto out_release;
24294c27fe4cSMike Rapoport 
24307d64ae3aSAxel Rasmussen 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
24317d64ae3aSAxel Rasmussen 				       page, true, false);
24327d64ae3aSAxel Rasmussen 	if (ret)
24337d64ae3aSAxel Rasmussen 		goto out_delete_from_cache;
24344c27fe4cSMike Rapoport 
243594b7cc01SYang Shi 	spin_lock_irq(&info->lock);
24364c27fe4cSMike Rapoport 	info->alloced++;
24374c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
24384c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
243994b7cc01SYang Shi 	spin_unlock_irq(&info->lock);
24404c27fe4cSMike Rapoport 
24417d64ae3aSAxel Rasmussen 	SetPageDirty(page);
2442e2a50c1fSAndrea Arcangeli 	unlock_page(page);
24437d64ae3aSAxel Rasmussen 	return 0;
24447d64ae3aSAxel Rasmussen out_delete_from_cache:
2445e2a50c1fSAndrea Arcangeli 	delete_from_page_cache(page);
24464c27fe4cSMike Rapoport out_release:
24479cc90c66SAndrea Arcangeli 	unlock_page(page);
24484c27fe4cSMike Rapoport 	put_page(page);
24494c27fe4cSMike Rapoport out_unacct_blocks:
24500f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
24517d64ae3aSAxel Rasmussen 	return ret;
24524c27fe4cSMike Rapoport }
24533460f6e5SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
24548d103963SMike Rapoport 
24551da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
245692e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
245769f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
24581da177e4SLinus Torvalds 
24596d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
24606d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
24616d9d88d0SJarkko Sakkinen #else
24626d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
24636d9d88d0SJarkko Sakkinen #endif
24646d9d88d0SJarkko Sakkinen 
24651da177e4SLinus Torvalds static int
2466800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
2467800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
2468800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
24691da177e4SLinus Torvalds {
2470800d15a5SNick Piggin 	struct inode *inode = mapping->host;
247140e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
247209cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
247340e041a2SDavid Herrmann 
247440e041a2SDavid Herrmann 	/* i_mutex is held by caller */
2475ab3948f5SJoel Fernandes (Google) 	if (unlikely(info->seals & (F_SEAL_GROW |
2476ab3948f5SJoel Fernandes (Google) 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2477ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
247840e041a2SDavid Herrmann 			return -EPERM;
247940e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
248040e041a2SDavid Herrmann 			return -EPERM;
248140e041a2SDavid Herrmann 	}
248240e041a2SDavid Herrmann 
24839e18eb29SAndres Lagar-Cavilla 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2484800d15a5SNick Piggin }
2485800d15a5SNick Piggin 
2486800d15a5SNick Piggin static int
2487800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2488800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2489800d15a5SNick Piggin 			struct page *page, void *fsdata)
2490800d15a5SNick Piggin {
2491800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2492800d15a5SNick Piggin 
2493800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2494800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2495800d15a5SNick Piggin 
2496ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2497800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2498800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2499800d8c63SKirill A. Shutemov 			int i;
2500800d8c63SKirill A. Shutemov 
2501800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2502800d8c63SKirill A. Shutemov 				if (head + i == page)
2503800d8c63SKirill A. Shutemov 					continue;
2504800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2505800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2506800d8c63SKirill A. Shutemov 			}
2507800d8c63SKirill A. Shutemov 		}
250809cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
250909cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2510ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
251109cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2512ec9516fbSHugh Dickins 		}
2513800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2514ec9516fbSHugh Dickins 	}
2515d3602444SHugh Dickins 	set_page_dirty(page);
25166746aff7SWu Fengguang 	unlock_page(page);
251709cbfeafSKirill A. Shutemov 	put_page(page);
2518d3602444SHugh Dickins 
2519800d15a5SNick Piggin 	return copied;
25201da177e4SLinus Torvalds }
25211da177e4SLinus Torvalds 
25222ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
25231da177e4SLinus Torvalds {
25246e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
25256e58e79dSAl Viro 	struct inode *inode = file_inode(file);
25261da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
252741ffe5d5SHugh Dickins 	pgoff_t index;
252841ffe5d5SHugh Dickins 	unsigned long offset;
2529a0ee5ec5SHugh Dickins 	enum sgp_type sgp = SGP_READ;
2530f7c1d074SGeert Uytterhoeven 	int error = 0;
2531cb66a7a1SAl Viro 	ssize_t retval = 0;
25326e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2533a0ee5ec5SHugh Dickins 
2534a0ee5ec5SHugh Dickins 	/*
2535a0ee5ec5SHugh Dickins 	 * Might this read be for a stacking filesystem?  Then when reading
2536a0ee5ec5SHugh Dickins 	 * holes of a sparse file, we actually need to allocate those pages,
2537a0ee5ec5SHugh Dickins 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2538a0ee5ec5SHugh Dickins 	 */
2539777eda2cSAl Viro 	if (!iter_is_iovec(to))
254075edd345SHugh Dickins 		sgp = SGP_CACHE;
25411da177e4SLinus Torvalds 
254209cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
254309cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
25441da177e4SLinus Torvalds 
25451da177e4SLinus Torvalds 	for (;;) {
25461da177e4SLinus Torvalds 		struct page *page = NULL;
254741ffe5d5SHugh Dickins 		pgoff_t end_index;
254841ffe5d5SHugh Dickins 		unsigned long nr, ret;
25491da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
25501da177e4SLinus Torvalds 
255109cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25521da177e4SLinus Torvalds 		if (index > end_index)
25531da177e4SLinus Torvalds 			break;
25541da177e4SLinus Torvalds 		if (index == end_index) {
255509cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25561da177e4SLinus Torvalds 			if (nr <= offset)
25571da177e4SLinus Torvalds 				break;
25581da177e4SLinus Torvalds 		}
25591da177e4SLinus Torvalds 
25609e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, index, &page, sgp);
25616e58e79dSAl Viro 		if (error) {
25626e58e79dSAl Viro 			if (error == -EINVAL)
25636e58e79dSAl Viro 				error = 0;
25641da177e4SLinus Torvalds 			break;
25651da177e4SLinus Torvalds 		}
256675edd345SHugh Dickins 		if (page) {
256775edd345SHugh Dickins 			if (sgp == SGP_CACHE)
256875edd345SHugh Dickins 				set_page_dirty(page);
2569d3602444SHugh Dickins 			unlock_page(page);
257075edd345SHugh Dickins 		}
25711da177e4SLinus Torvalds 
25721da177e4SLinus Torvalds 		/*
25731da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
25741b1dcc1bSJes Sorensen 		 * are called without i_mutex protection against truncate
25751da177e4SLinus Torvalds 		 */
257609cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
25771da177e4SLinus Torvalds 		i_size = i_size_read(inode);
257809cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25791da177e4SLinus Torvalds 		if (index == end_index) {
258009cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25811da177e4SLinus Torvalds 			if (nr <= offset) {
25821da177e4SLinus Torvalds 				if (page)
258309cbfeafSKirill A. Shutemov 					put_page(page);
25841da177e4SLinus Torvalds 				break;
25851da177e4SLinus Torvalds 			}
25861da177e4SLinus Torvalds 		}
25871da177e4SLinus Torvalds 		nr -= offset;
25881da177e4SLinus Torvalds 
25891da177e4SLinus Torvalds 		if (page) {
25901da177e4SLinus Torvalds 			/*
25911da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
25921da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
25931da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
25941da177e4SLinus Torvalds 			 */
25951da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
25961da177e4SLinus Torvalds 				flush_dcache_page(page);
25971da177e4SLinus Torvalds 			/*
25981da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
25991da177e4SLinus Torvalds 			 */
26001da177e4SLinus Torvalds 			if (!offset)
26011da177e4SLinus Torvalds 				mark_page_accessed(page);
2602b5810039SNick Piggin 		} else {
26031da177e4SLinus Torvalds 			page = ZERO_PAGE(0);
260409cbfeafSKirill A. Shutemov 			get_page(page);
2605b5810039SNick Piggin 		}
26061da177e4SLinus Torvalds 
26071da177e4SLinus Torvalds 		/*
26081da177e4SLinus Torvalds 		 * Ok, we have the page, and it's up-to-date, so
26091da177e4SLinus Torvalds 		 * now we can copy it to user space...
26101da177e4SLinus Torvalds 		 */
26112ba5bbedSAl Viro 		ret = copy_page_to_iter(page, offset, nr, to);
26126e58e79dSAl Viro 		retval += ret;
26131da177e4SLinus Torvalds 		offset += ret;
261409cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
261509cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
26161da177e4SLinus Torvalds 
261709cbfeafSKirill A. Shutemov 		put_page(page);
26182ba5bbedSAl Viro 		if (!iov_iter_count(to))
26191da177e4SLinus Torvalds 			break;
26206e58e79dSAl Viro 		if (ret < nr) {
26216e58e79dSAl Viro 			error = -EFAULT;
26226e58e79dSAl Viro 			break;
26236e58e79dSAl Viro 		}
26241da177e4SLinus Torvalds 		cond_resched();
26251da177e4SLinus Torvalds 	}
26261da177e4SLinus Torvalds 
262709cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
26286e58e79dSAl Viro 	file_accessed(file);
26296e58e79dSAl Viro 	return retval ? retval : error;
26301da177e4SLinus Torvalds }
26311da177e4SLinus Torvalds 
2632965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2633220f2ac9SHugh Dickins {
2634220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2635220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2636220f2ac9SHugh Dickins 
2637965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2638965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2639220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
264041139aa4SMatthew Wilcox (Oracle) 	if (offset < 0)
264141139aa4SMatthew Wilcox (Oracle) 		return -ENXIO;
264241139aa4SMatthew Wilcox (Oracle) 
26435955102cSAl Viro 	inode_lock(inode);
2644220f2ac9SHugh Dickins 	/* We're holding i_mutex so we can access i_size directly */
264541139aa4SMatthew Wilcox (Oracle) 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2646387aae6fSHugh Dickins 	if (offset >= 0)
264746a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
26485955102cSAl Viro 	inode_unlock(inode);
2649220f2ac9SHugh Dickins 	return offset;
2650220f2ac9SHugh Dickins }
2651220f2ac9SHugh Dickins 
265283e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
265383e4fa9cSHugh Dickins 							 loff_t len)
265483e4fa9cSHugh Dickins {
2655496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2656e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
265740e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
26581aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2659e2d12e22SHugh Dickins 	pgoff_t start, index, end;
2660e2d12e22SHugh Dickins 	int error;
266183e4fa9cSHugh Dickins 
266213ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
266313ace4d0SHugh Dickins 		return -EOPNOTSUPP;
266413ace4d0SHugh Dickins 
26655955102cSAl Viro 	inode_lock(inode);
266683e4fa9cSHugh Dickins 
266783e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
266883e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
266983e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
267083e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
26718e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
267283e4fa9cSHugh Dickins 
267340e041a2SDavid Herrmann 		/* protected by i_mutex */
2674ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
267540e041a2SDavid Herrmann 			error = -EPERM;
267640e041a2SDavid Herrmann 			goto out;
267740e041a2SDavid Herrmann 		}
267840e041a2SDavid Herrmann 
26798e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2680aa71ecd8SChen Jun 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2681f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2682f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2683f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2684f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2685f00cdc6dSHugh Dickins 
268683e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
268783e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
268883e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
268983e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
269083e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
26918e205f77SHugh Dickins 
26928e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
26938e205f77SHugh Dickins 		inode->i_private = NULL;
26948e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
26952055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
26968e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
269783e4fa9cSHugh Dickins 		error = 0;
26988e205f77SHugh Dickins 		goto out;
269983e4fa9cSHugh Dickins 	}
270083e4fa9cSHugh Dickins 
2701e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2702e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2703e2d12e22SHugh Dickins 	if (error)
2704e2d12e22SHugh Dickins 		goto out;
2705e2d12e22SHugh Dickins 
270640e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
270740e041a2SDavid Herrmann 		error = -EPERM;
270840e041a2SDavid Herrmann 		goto out;
270940e041a2SDavid Herrmann 	}
271040e041a2SDavid Herrmann 
271109cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
271209cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2713e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2714e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2715e2d12e22SHugh Dickins 		error = -ENOSPC;
2716e2d12e22SHugh Dickins 		goto out;
2717e2d12e22SHugh Dickins 	}
2718e2d12e22SHugh Dickins 
27198e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
27201aac1400SHugh Dickins 	shmem_falloc.start = start;
27211aac1400SHugh Dickins 	shmem_falloc.next  = start;
27221aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
27231aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
27241aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27251aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
27261aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
27271aac1400SHugh Dickins 
2728e2d12e22SHugh Dickins 	for (index = start; index < end; index++) {
2729e2d12e22SHugh Dickins 		struct page *page;
2730e2d12e22SHugh Dickins 
2731e2d12e22SHugh Dickins 		/*
2732e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2733e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2734e2d12e22SHugh Dickins 		 */
2735e2d12e22SHugh Dickins 		if (signal_pending(current))
2736e2d12e22SHugh Dickins 			error = -EINTR;
27371aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
27381aac1400SHugh Dickins 			error = -ENOMEM;
2739e2d12e22SHugh Dickins 		else
27409e18eb29SAndres Lagar-Cavilla 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2741e2d12e22SHugh Dickins 		if (error) {
27421635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
27437f556567SHugh Dickins 			if (index > start) {
27441635f6a7SHugh Dickins 				shmem_undo_range(inode,
274509cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2746b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
27477f556567SHugh Dickins 			}
27481aac1400SHugh Dickins 			goto undone;
2749e2d12e22SHugh Dickins 		}
2750e2d12e22SHugh Dickins 
2751e2d12e22SHugh Dickins 		/*
27521aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
27531aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
27541aac1400SHugh Dickins 		 */
27551aac1400SHugh Dickins 		shmem_falloc.next++;
27561aac1400SHugh Dickins 		if (!PageUptodate(page))
27571aac1400SHugh Dickins 			shmem_falloc.nr_falloced++;
27581aac1400SHugh Dickins 
27591aac1400SHugh Dickins 		/*
27601635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
27611635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
27621635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
2763e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
2764e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2765e2d12e22SHugh Dickins 		 */
2766e2d12e22SHugh Dickins 		set_page_dirty(page);
2767e2d12e22SHugh Dickins 		unlock_page(page);
276809cbfeafSKirill A. Shutemov 		put_page(page);
2769e2d12e22SHugh Dickins 		cond_resched();
2770e2d12e22SHugh Dickins 	}
2771e2d12e22SHugh Dickins 
2772e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2773e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
2774078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
27751aac1400SHugh Dickins undone:
27761aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27771aac1400SHugh Dickins 	inode->i_private = NULL;
27781aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2779e2d12e22SHugh Dickins out:
27805955102cSAl Viro 	inode_unlock(inode);
278183e4fa9cSHugh Dickins 	return error;
278283e4fa9cSHugh Dickins }
278383e4fa9cSHugh Dickins 
2784726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
27851da177e4SLinus Torvalds {
2786726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
27871da177e4SLinus Torvalds 
27881da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
278909cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
27901da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
27910edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
27921da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
279341ffe5d5SHugh Dickins 		buf->f_bavail =
279441ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
279541ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
27960edd73b3SHugh Dickins 	}
27970edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
27981da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
27991da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
28001da177e4SLinus Torvalds 	}
28011da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
280259cda49eSAmir Goldstein 
280359cda49eSAmir Goldstein 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
280459cda49eSAmir Goldstein 
28051da177e4SLinus Torvalds 	return 0;
28061da177e4SLinus Torvalds }
28071da177e4SLinus Torvalds 
28081da177e4SLinus Torvalds /*
28091da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
28101da177e4SLinus Torvalds  */
28111da177e4SLinus Torvalds static int
2812549c7297SChristian Brauner shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2813549c7297SChristian Brauner 	    struct dentry *dentry, umode_t mode, dev_t dev)
28141da177e4SLinus Torvalds {
28150b0a0806SHugh Dickins 	struct inode *inode;
28161da177e4SLinus Torvalds 	int error = -ENOSPC;
28171da177e4SLinus Torvalds 
2818454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
28191da177e4SLinus Torvalds 	if (inode) {
2820feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2821feda821eSChristoph Hellwig 		if (error)
2822feda821eSChristoph Hellwig 			goto out_iput;
28232a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
28249d8f13baSMimi Zohar 						     &dentry->d_name,
28256d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2826feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2827feda821eSChristoph Hellwig 			goto out_iput;
282837ec43cdSMimi Zohar 
2829718deb6bSAl Viro 		error = 0;
28301da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2831078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
28321da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
28331da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
28341da177e4SLinus Torvalds 	}
28351da177e4SLinus Torvalds 	return error;
2836feda821eSChristoph Hellwig out_iput:
2837feda821eSChristoph Hellwig 	iput(inode);
2838feda821eSChristoph Hellwig 	return error;
28391da177e4SLinus Torvalds }
28401da177e4SLinus Torvalds 
284160545d0dSAl Viro static int
2842549c7297SChristian Brauner shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2843549c7297SChristian Brauner 	      struct dentry *dentry, umode_t mode)
284460545d0dSAl Viro {
284560545d0dSAl Viro 	struct inode *inode;
284660545d0dSAl Viro 	int error = -ENOSPC;
284760545d0dSAl Viro 
284860545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
284960545d0dSAl Viro 	if (inode) {
285060545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
285160545d0dSAl Viro 						     NULL,
285260545d0dSAl Viro 						     shmem_initxattrs, NULL);
2853feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2854feda821eSChristoph Hellwig 			goto out_iput;
2855feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2856feda821eSChristoph Hellwig 		if (error)
2857feda821eSChristoph Hellwig 			goto out_iput;
285860545d0dSAl Viro 		d_tmpfile(dentry, inode);
285960545d0dSAl Viro 	}
286060545d0dSAl Viro 	return error;
2861feda821eSChristoph Hellwig out_iput:
2862feda821eSChristoph Hellwig 	iput(inode);
2863feda821eSChristoph Hellwig 	return error;
286460545d0dSAl Viro }
286560545d0dSAl Viro 
2866549c7297SChristian Brauner static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2867549c7297SChristian Brauner 		       struct dentry *dentry, umode_t mode)
28681da177e4SLinus Torvalds {
28691da177e4SLinus Torvalds 	int error;
28701da177e4SLinus Torvalds 
2871549c7297SChristian Brauner 	if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2872549c7297SChristian Brauner 				 mode | S_IFDIR, 0)))
28731da177e4SLinus Torvalds 		return error;
2874d8c76e6fSDave Hansen 	inc_nlink(dir);
28751da177e4SLinus Torvalds 	return 0;
28761da177e4SLinus Torvalds }
28771da177e4SLinus Torvalds 
2878549c7297SChristian Brauner static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2879549c7297SChristian Brauner 			struct dentry *dentry, umode_t mode, bool excl)
28801da177e4SLinus Torvalds {
2881549c7297SChristian Brauner 	return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
28821da177e4SLinus Torvalds }
28831da177e4SLinus Torvalds 
28841da177e4SLinus Torvalds /*
28851da177e4SLinus Torvalds  * Link a file..
28861da177e4SLinus Torvalds  */
28871da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
28881da177e4SLinus Torvalds {
288975c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
289029b00e60SDarrick J. Wong 	int ret = 0;
28911da177e4SLinus Torvalds 
28921da177e4SLinus Torvalds 	/*
28931da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
28941da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
28951da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
28961062af92SDarrick J. Wong 	 * But if an O_TMPFILE file is linked into the tmpfs, the
28971062af92SDarrick J. Wong 	 * first link must skip that, to get the accounting right.
28981da177e4SLinus Torvalds 	 */
28991062af92SDarrick J. Wong 	if (inode->i_nlink) {
2900e809d5f0SChris Down 		ret = shmem_reserve_inode(inode->i_sb, NULL);
29015b04c689SPavel Emelyanov 		if (ret)
29025b04c689SPavel Emelyanov 			goto out;
29031062af92SDarrick J. Wong 	}
29041da177e4SLinus Torvalds 
29051da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
2906078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2907d8c76e6fSDave Hansen 	inc_nlink(inode);
29087de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
29091da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
29101da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
29115b04c689SPavel Emelyanov out:
29125b04c689SPavel Emelyanov 	return ret;
29131da177e4SLinus Torvalds }
29141da177e4SLinus Torvalds 
29151da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
29161da177e4SLinus Torvalds {
291775c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
29181da177e4SLinus Torvalds 
29195b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
29205b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
29211da177e4SLinus Torvalds 
29221da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
2923078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
29249a53c3a7SDave Hansen 	drop_nlink(inode);
29251da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
29261da177e4SLinus Torvalds 	return 0;
29271da177e4SLinus Torvalds }
29281da177e4SLinus Torvalds 
29291da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
29301da177e4SLinus Torvalds {
29311da177e4SLinus Torvalds 	if (!simple_empty(dentry))
29321da177e4SLinus Torvalds 		return -ENOTEMPTY;
29331da177e4SLinus Torvalds 
293475c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
29359a53c3a7SDave Hansen 	drop_nlink(dir);
29361da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
29371da177e4SLinus Torvalds }
29381da177e4SLinus Torvalds 
293937456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
294037456771SMiklos Szeredi {
2941e36cb0b8SDavid Howells 	bool old_is_dir = d_is_dir(old_dentry);
2942e36cb0b8SDavid Howells 	bool new_is_dir = d_is_dir(new_dentry);
294337456771SMiklos Szeredi 
294437456771SMiklos Szeredi 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
294537456771SMiklos Szeredi 		if (old_is_dir) {
294637456771SMiklos Szeredi 			drop_nlink(old_dir);
294737456771SMiklos Szeredi 			inc_nlink(new_dir);
294837456771SMiklos Szeredi 		} else {
294937456771SMiklos Szeredi 			drop_nlink(new_dir);
295037456771SMiklos Szeredi 			inc_nlink(old_dir);
295137456771SMiklos Szeredi 		}
295237456771SMiklos Szeredi 	}
295337456771SMiklos Szeredi 	old_dir->i_ctime = old_dir->i_mtime =
295437456771SMiklos Szeredi 	new_dir->i_ctime = new_dir->i_mtime =
295575c3cfa8SDavid Howells 	d_inode(old_dentry)->i_ctime =
2956078cd827SDeepa Dinamani 	d_inode(new_dentry)->i_ctime = current_time(old_dir);
295737456771SMiklos Szeredi 
295837456771SMiklos Szeredi 	return 0;
295937456771SMiklos Szeredi }
296037456771SMiklos Szeredi 
2961549c7297SChristian Brauner static int shmem_whiteout(struct user_namespace *mnt_userns,
2962549c7297SChristian Brauner 			  struct inode *old_dir, struct dentry *old_dentry)
296346fdb794SMiklos Szeredi {
296446fdb794SMiklos Szeredi 	struct dentry *whiteout;
296546fdb794SMiklos Szeredi 	int error;
296646fdb794SMiklos Szeredi 
296746fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
296846fdb794SMiklos Szeredi 	if (!whiteout)
296946fdb794SMiklos Szeredi 		return -ENOMEM;
297046fdb794SMiklos Szeredi 
2971549c7297SChristian Brauner 	error = shmem_mknod(&init_user_ns, old_dir, whiteout,
297246fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
297346fdb794SMiklos Szeredi 	dput(whiteout);
297446fdb794SMiklos Szeredi 	if (error)
297546fdb794SMiklos Szeredi 		return error;
297646fdb794SMiklos Szeredi 
297746fdb794SMiklos Szeredi 	/*
297846fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
297946fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
298046fdb794SMiklos Szeredi 	 *
298146fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
298246fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
298346fdb794SMiklos Szeredi 	 */
298446fdb794SMiklos Szeredi 	d_rehash(whiteout);
298546fdb794SMiklos Szeredi 	return 0;
298646fdb794SMiklos Szeredi }
298746fdb794SMiklos Szeredi 
29881da177e4SLinus Torvalds /*
29891da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
29901da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
29911da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
29921da177e4SLinus Torvalds  * gets overwritten.
29931da177e4SLinus Torvalds  */
2994549c7297SChristian Brauner static int shmem_rename2(struct user_namespace *mnt_userns,
2995549c7297SChristian Brauner 			 struct inode *old_dir, struct dentry *old_dentry,
2996549c7297SChristian Brauner 			 struct inode *new_dir, struct dentry *new_dentry,
2997549c7297SChristian Brauner 			 unsigned int flags)
29981da177e4SLinus Torvalds {
299975c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
30001da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
30011da177e4SLinus Torvalds 
300246fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
30033b69ff51SMiklos Szeredi 		return -EINVAL;
30043b69ff51SMiklos Szeredi 
300537456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
300637456771SMiklos Szeredi 		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
300737456771SMiklos Szeredi 
30081da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
30091da177e4SLinus Torvalds 		return -ENOTEMPTY;
30101da177e4SLinus Torvalds 
301146fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
301246fdb794SMiklos Szeredi 		int error;
301346fdb794SMiklos Szeredi 
3014549c7297SChristian Brauner 		error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
301546fdb794SMiklos Szeredi 		if (error)
301646fdb794SMiklos Szeredi 			return error;
301746fdb794SMiklos Szeredi 	}
301846fdb794SMiklos Szeredi 
301975c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
30201da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3021b928095bSMiklos Szeredi 		if (they_are_dirs) {
302275c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
30239a53c3a7SDave Hansen 			drop_nlink(old_dir);
3024b928095bSMiklos Szeredi 		}
30251da177e4SLinus Torvalds 	} else if (they_are_dirs) {
30269a53c3a7SDave Hansen 		drop_nlink(old_dir);
3027d8c76e6fSDave Hansen 		inc_nlink(new_dir);
30281da177e4SLinus Torvalds 	}
30291da177e4SLinus Torvalds 
30301da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
30311da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
30321da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
30331da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3034078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
30351da177e4SLinus Torvalds 	return 0;
30361da177e4SLinus Torvalds }
30371da177e4SLinus Torvalds 
3038549c7297SChristian Brauner static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3039549c7297SChristian Brauner 			 struct dentry *dentry, const char *symname)
30401da177e4SLinus Torvalds {
30411da177e4SLinus Torvalds 	int error;
30421da177e4SLinus Torvalds 	int len;
30431da177e4SLinus Torvalds 	struct inode *inode;
30449276aad6SHugh Dickins 	struct page *page;
30451da177e4SLinus Torvalds 
30461da177e4SLinus Torvalds 	len = strlen(symname) + 1;
304709cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
30481da177e4SLinus Torvalds 		return -ENAMETOOLONG;
30491da177e4SLinus Torvalds 
30500825a6f9SJoe Perches 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
30510825a6f9SJoe Perches 				VM_NORESERVE);
30521da177e4SLinus Torvalds 	if (!inode)
30531da177e4SLinus Torvalds 		return -ENOSPC;
30541da177e4SLinus Torvalds 
30559d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
30566d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3057343c3d7fSMateusz Nosek 	if (error && error != -EOPNOTSUPP) {
3058570bc1c2SStephen Smalley 		iput(inode);
3059570bc1c2SStephen Smalley 		return error;
3060570bc1c2SStephen Smalley 	}
3061570bc1c2SStephen Smalley 
30621da177e4SLinus Torvalds 	inode->i_size = len-1;
306369f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
30643ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
30653ed47db3SAl Viro 		if (!inode->i_link) {
306669f07ec9SHugh Dickins 			iput(inode);
306769f07ec9SHugh Dickins 			return -ENOMEM;
306869f07ec9SHugh Dickins 		}
306969f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
30701da177e4SLinus Torvalds 	} else {
3071e8ecde25SAl Viro 		inode_nohighmem(inode);
30729e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
30731da177e4SLinus Torvalds 		if (error) {
30741da177e4SLinus Torvalds 			iput(inode);
30751da177e4SLinus Torvalds 			return error;
30761da177e4SLinus Torvalds 		}
307714fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
30781da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
307921fc61c7SAl Viro 		memcpy(page_address(page), symname, len);
3080ec9516fbSHugh Dickins 		SetPageUptodate(page);
30811da177e4SLinus Torvalds 		set_page_dirty(page);
30826746aff7SWu Fengguang 		unlock_page(page);
308309cbfeafSKirill A. Shutemov 		put_page(page);
30841da177e4SLinus Torvalds 	}
30851da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3086078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
30871da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
30881da177e4SLinus Torvalds 	dget(dentry);
30891da177e4SLinus Torvalds 	return 0;
30901da177e4SLinus Torvalds }
30911da177e4SLinus Torvalds 
3092fceef393SAl Viro static void shmem_put_link(void *arg)
3093fceef393SAl Viro {
3094fceef393SAl Viro 	mark_page_accessed(arg);
3095fceef393SAl Viro 	put_page(arg);
3096fceef393SAl Viro }
3097fceef393SAl Viro 
30986b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3099fceef393SAl Viro 				  struct inode *inode,
3100fceef393SAl Viro 				  struct delayed_call *done)
31011da177e4SLinus Torvalds {
31021da177e4SLinus Torvalds 	struct page *page = NULL;
31036b255391SAl Viro 	int error;
31046a6c9904SAl Viro 	if (!dentry) {
31056a6c9904SAl Viro 		page = find_get_page(inode->i_mapping, 0);
31066a6c9904SAl Viro 		if (!page)
31076b255391SAl Viro 			return ERR_PTR(-ECHILD);
31086a6c9904SAl Viro 		if (!PageUptodate(page)) {
31096a6c9904SAl Viro 			put_page(page);
31106a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
31116a6c9904SAl Viro 		}
31126a6c9904SAl Viro 	} else {
31139e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3114680baacbSAl Viro 		if (error)
3115680baacbSAl Viro 			return ERR_PTR(error);
3116d3602444SHugh Dickins 		unlock_page(page);
31171da177e4SLinus Torvalds 	}
3118fceef393SAl Viro 	set_delayed_call(done, shmem_put_link, page);
311921fc61c7SAl Viro 	return page_address(page);
31201da177e4SLinus Torvalds }
31211da177e4SLinus Torvalds 
3122b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3123b09e0fa4SEric Paris /*
3124b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3125b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3126b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3127b09e0fa4SEric Paris  * filesystem level, though.
3128b09e0fa4SEric Paris  */
3129b09e0fa4SEric Paris 
31306d9d88d0SJarkko Sakkinen /*
31316d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
31326d9d88d0SJarkko Sakkinen  */
31336d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
31346d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
31356d9d88d0SJarkko Sakkinen 			    void *fs_info)
31366d9d88d0SJarkko Sakkinen {
31376d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
31386d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
313938f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
31406d9d88d0SJarkko Sakkinen 	size_t len;
31416d9d88d0SJarkko Sakkinen 
31426d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
314338f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
31446d9d88d0SJarkko Sakkinen 		if (!new_xattr)
31456d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31466d9d88d0SJarkko Sakkinen 
31476d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
31486d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
31496d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
31506d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
31513bef735aSChengguang Xu 			kvfree(new_xattr);
31526d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31536d9d88d0SJarkko Sakkinen 		}
31546d9d88d0SJarkko Sakkinen 
31556d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
31566d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
31576d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
31586d9d88d0SJarkko Sakkinen 		       xattr->name, len);
31596d9d88d0SJarkko Sakkinen 
316038f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
31616d9d88d0SJarkko Sakkinen 	}
31626d9d88d0SJarkko Sakkinen 
31636d9d88d0SJarkko Sakkinen 	return 0;
31646d9d88d0SJarkko Sakkinen }
31656d9d88d0SJarkko Sakkinen 
3166aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3167b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3168b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3169aa7c5241SAndreas Gruenbacher {
3170b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3171aa7c5241SAndreas Gruenbacher 
3172aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3173aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3174aa7c5241SAndreas Gruenbacher }
3175aa7c5241SAndreas Gruenbacher 
3176aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3177e65ce2a5SChristian Brauner 				   struct user_namespace *mnt_userns,
317859301226SAl Viro 				   struct dentry *unused, struct inode *inode,
317959301226SAl Viro 				   const char *name, const void *value,
318059301226SAl Viro 				   size_t size, int flags)
3181aa7c5241SAndreas Gruenbacher {
318259301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3183aa7c5241SAndreas Gruenbacher 
3184aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3185a46a2295SDaniel Xu 	return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3186aa7c5241SAndreas Gruenbacher }
3187aa7c5241SAndreas Gruenbacher 
3188aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3189aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3190aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3191aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3192aa7c5241SAndreas Gruenbacher };
3193aa7c5241SAndreas Gruenbacher 
3194aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3195aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3196aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3197aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3198aa7c5241SAndreas Gruenbacher };
3199aa7c5241SAndreas Gruenbacher 
3200b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3201b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3202feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3203feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3204b09e0fa4SEric Paris #endif
3205aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3206aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3207b09e0fa4SEric Paris 	NULL
3208b09e0fa4SEric Paris };
3209b09e0fa4SEric Paris 
3210b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3211b09e0fa4SEric Paris {
321275c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3213786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3214b09e0fa4SEric Paris }
3215b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3216b09e0fa4SEric Paris 
321769f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
32186b255391SAl Viro 	.get_link	= simple_get_link,
3219b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3220b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3221b09e0fa4SEric Paris #endif
32221da177e4SLinus Torvalds };
32231da177e4SLinus Torvalds 
322492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
32256b255391SAl Viro 	.get_link	= shmem_get_link,
3226b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3227b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
322839f0247dSAndreas Gruenbacher #endif
3229b09e0fa4SEric Paris };
323039f0247dSAndreas Gruenbacher 
323191828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
323291828a40SDavid M. Grimes {
323391828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
323491828a40SDavid M. Grimes }
323591828a40SDavid M. Grimes 
323691828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
323791828a40SDavid M. Grimes {
323891828a40SDavid M. Grimes 	__u32 *fh = vfh;
323991828a40SDavid M. Grimes 	__u64 inum = fh[2];
324091828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
324191828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
324291828a40SDavid M. Grimes }
324391828a40SDavid M. Grimes 
324412ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
324512ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
324612ba780dSAmir Goldstein {
324712ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
324812ba780dSAmir Goldstein 
324912ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
325012ba780dSAmir Goldstein }
325112ba780dSAmir Goldstein 
325212ba780dSAmir Goldstein 
3253480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3254480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
325591828a40SDavid M. Grimes {
325691828a40SDavid M. Grimes 	struct inode *inode;
3257480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
325835c2a7f4SHugh Dickins 	u64 inum;
325991828a40SDavid M. Grimes 
3260480b116cSChristoph Hellwig 	if (fh_len < 3)
3261480b116cSChristoph Hellwig 		return NULL;
3262480b116cSChristoph Hellwig 
326335c2a7f4SHugh Dickins 	inum = fid->raw[2];
326435c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
326535c2a7f4SHugh Dickins 
3266480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3267480b116cSChristoph Hellwig 			shmem_match, fid->raw);
326891828a40SDavid M. Grimes 	if (inode) {
326912ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
327091828a40SDavid M. Grimes 		iput(inode);
327191828a40SDavid M. Grimes 	}
327291828a40SDavid M. Grimes 
3273480b116cSChristoph Hellwig 	return dentry;
327491828a40SDavid M. Grimes }
327591828a40SDavid M. Grimes 
3276b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3277b0b0382bSAl Viro 				struct inode *parent)
327891828a40SDavid M. Grimes {
32795fe0c237SAneesh Kumar K.V 	if (*len < 3) {
32805fe0c237SAneesh Kumar K.V 		*len = 3;
328194e07a75SNamjae Jeon 		return FILEID_INVALID;
32825fe0c237SAneesh Kumar K.V 	}
328391828a40SDavid M. Grimes 
32841d3382cbSAl Viro 	if (inode_unhashed(inode)) {
328591828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
328691828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
328791828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
328891828a40SDavid M. Grimes 		 * to do it once
328991828a40SDavid M. Grimes 		 */
329091828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
329191828a40SDavid M. Grimes 		spin_lock(&lock);
32921d3382cbSAl Viro 		if (inode_unhashed(inode))
329391828a40SDavid M. Grimes 			__insert_inode_hash(inode,
329491828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
329591828a40SDavid M. Grimes 		spin_unlock(&lock);
329691828a40SDavid M. Grimes 	}
329791828a40SDavid M. Grimes 
329891828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
329991828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
330091828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
330191828a40SDavid M. Grimes 
330291828a40SDavid M. Grimes 	*len = 3;
330391828a40SDavid M. Grimes 	return 1;
330491828a40SDavid M. Grimes }
330591828a40SDavid M. Grimes 
330639655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
330791828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
330891828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3309480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
331091828a40SDavid M. Grimes };
331191828a40SDavid M. Grimes 
3312626c3920SAl Viro enum shmem_param {
3313626c3920SAl Viro 	Opt_gid,
3314626c3920SAl Viro 	Opt_huge,
3315626c3920SAl Viro 	Opt_mode,
3316626c3920SAl Viro 	Opt_mpol,
3317626c3920SAl Viro 	Opt_nr_blocks,
3318626c3920SAl Viro 	Opt_nr_inodes,
3319626c3920SAl Viro 	Opt_size,
3320626c3920SAl Viro 	Opt_uid,
3321ea3271f7SChris Down 	Opt_inode32,
3322ea3271f7SChris Down 	Opt_inode64,
3323626c3920SAl Viro };
33241da177e4SLinus Torvalds 
33255eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = {
33262710c957SAl Viro 	{"never",	SHMEM_HUGE_NEVER },
33272710c957SAl Viro 	{"always",	SHMEM_HUGE_ALWAYS },
33282710c957SAl Viro 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
33292710c957SAl Viro 	{"advise",	SHMEM_HUGE_ADVISE },
33302710c957SAl Viro 	{}
33312710c957SAl Viro };
33322710c957SAl Viro 
3333d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = {
3334626c3920SAl Viro 	fsparam_u32   ("gid",		Opt_gid),
33352710c957SAl Viro 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3336626c3920SAl Viro 	fsparam_u32oct("mode",		Opt_mode),
3337626c3920SAl Viro 	fsparam_string("mpol",		Opt_mpol),
3338626c3920SAl Viro 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3339626c3920SAl Viro 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3340626c3920SAl Viro 	fsparam_string("size",		Opt_size),
3341626c3920SAl Viro 	fsparam_u32   ("uid",		Opt_uid),
3342ea3271f7SChris Down 	fsparam_flag  ("inode32",	Opt_inode32),
3343ea3271f7SChris Down 	fsparam_flag  ("inode64",	Opt_inode64),
3344626c3920SAl Viro 	{}
3345626c3920SAl Viro };
3346626c3920SAl Viro 
3347f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3348626c3920SAl Viro {
3349f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3350626c3920SAl Viro 	struct fs_parse_result result;
3351e04dc423SAl Viro 	unsigned long long size;
3352626c3920SAl Viro 	char *rest;
3353626c3920SAl Viro 	int opt;
3354626c3920SAl Viro 
3355d7167b14SAl Viro 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3356f3235626SDavid Howells 	if (opt < 0)
3357626c3920SAl Viro 		return opt;
3358626c3920SAl Viro 
3359626c3920SAl Viro 	switch (opt) {
3360626c3920SAl Viro 	case Opt_size:
3361626c3920SAl Viro 		size = memparse(param->string, &rest);
3362e04dc423SAl Viro 		if (*rest == '%') {
3363e04dc423SAl Viro 			size <<= PAGE_SHIFT;
3364e04dc423SAl Viro 			size *= totalram_pages();
3365e04dc423SAl Viro 			do_div(size, 100);
3366e04dc423SAl Viro 			rest++;
3367e04dc423SAl Viro 		}
3368e04dc423SAl Viro 		if (*rest)
3369626c3920SAl Viro 			goto bad_value;
3370e04dc423SAl Viro 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3371e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3372626c3920SAl Viro 		break;
3373626c3920SAl Viro 	case Opt_nr_blocks:
3374626c3920SAl Viro 		ctx->blocks = memparse(param->string, &rest);
3375e04dc423SAl Viro 		if (*rest)
3376626c3920SAl Viro 			goto bad_value;
3377e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3378626c3920SAl Viro 		break;
3379626c3920SAl Viro 	case Opt_nr_inodes:
3380626c3920SAl Viro 		ctx->inodes = memparse(param->string, &rest);
3381e04dc423SAl Viro 		if (*rest)
3382626c3920SAl Viro 			goto bad_value;
3383e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_INODES;
3384626c3920SAl Viro 		break;
3385626c3920SAl Viro 	case Opt_mode:
3386626c3920SAl Viro 		ctx->mode = result.uint_32 & 07777;
3387626c3920SAl Viro 		break;
3388626c3920SAl Viro 	case Opt_uid:
3389626c3920SAl Viro 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3390e04dc423SAl Viro 		if (!uid_valid(ctx->uid))
3391626c3920SAl Viro 			goto bad_value;
3392626c3920SAl Viro 		break;
3393626c3920SAl Viro 	case Opt_gid:
3394626c3920SAl Viro 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3395e04dc423SAl Viro 		if (!gid_valid(ctx->gid))
3396626c3920SAl Viro 			goto bad_value;
3397626c3920SAl Viro 		break;
3398626c3920SAl Viro 	case Opt_huge:
3399626c3920SAl Viro 		ctx->huge = result.uint_32;
3400626c3920SAl Viro 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3401396bcc52SMatthew Wilcox (Oracle) 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3402626c3920SAl Viro 		      has_transparent_hugepage()))
3403626c3920SAl Viro 			goto unsupported_parameter;
3404e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_HUGE;
3405626c3920SAl Viro 		break;
3406626c3920SAl Viro 	case Opt_mpol:
3407626c3920SAl Viro 		if (IS_ENABLED(CONFIG_NUMA)) {
3408e04dc423SAl Viro 			mpol_put(ctx->mpol);
3409e04dc423SAl Viro 			ctx->mpol = NULL;
3410626c3920SAl Viro 			if (mpol_parse_str(param->string, &ctx->mpol))
3411626c3920SAl Viro 				goto bad_value;
3412626c3920SAl Viro 			break;
3413626c3920SAl Viro 		}
3414626c3920SAl Viro 		goto unsupported_parameter;
3415ea3271f7SChris Down 	case Opt_inode32:
3416ea3271f7SChris Down 		ctx->full_inums = false;
3417ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3418ea3271f7SChris Down 		break;
3419ea3271f7SChris Down 	case Opt_inode64:
3420ea3271f7SChris Down 		if (sizeof(ino_t) < 8) {
3421ea3271f7SChris Down 			return invalfc(fc,
3422ea3271f7SChris Down 				       "Cannot use inode64 with <64bit inums in kernel\n");
3423ea3271f7SChris Down 		}
3424ea3271f7SChris Down 		ctx->full_inums = true;
3425ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3426ea3271f7SChris Down 		break;
3427e04dc423SAl Viro 	}
3428e04dc423SAl Viro 	return 0;
3429e04dc423SAl Viro 
3430626c3920SAl Viro unsupported_parameter:
3431f35aa2bcSAl Viro 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3432626c3920SAl Viro bad_value:
3433f35aa2bcSAl Viro 	return invalfc(fc, "Bad value for '%s'", param->key);
3434e04dc423SAl Viro }
3435e04dc423SAl Viro 
3436f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data)
3437e04dc423SAl Viro {
3438f3235626SDavid Howells 	char *options = data;
3439f3235626SDavid Howells 
344033f37c64SAl Viro 	if (options) {
344133f37c64SAl Viro 		int err = security_sb_eat_lsm_opts(options, &fc->security);
344233f37c64SAl Viro 		if (err)
344333f37c64SAl Viro 			return err;
344433f37c64SAl Viro 	}
344533f37c64SAl Viro 
3446b00dc3adSHugh Dickins 	while (options != NULL) {
3447626c3920SAl Viro 		char *this_char = options;
3448b00dc3adSHugh Dickins 		for (;;) {
3449b00dc3adSHugh Dickins 			/*
3450b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3451b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3452b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3453b00dc3adSHugh Dickins 			 */
3454b00dc3adSHugh Dickins 			options = strchr(options, ',');
3455b00dc3adSHugh Dickins 			if (options == NULL)
3456b00dc3adSHugh Dickins 				break;
3457b00dc3adSHugh Dickins 			options++;
3458b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3459b00dc3adSHugh Dickins 				options[-1] = '\0';
3460b00dc3adSHugh Dickins 				break;
3461b00dc3adSHugh Dickins 			}
3462b00dc3adSHugh Dickins 		}
3463626c3920SAl Viro 		if (*this_char) {
3464626c3920SAl Viro 			char *value = strchr(this_char, '=');
3465f3235626SDavid Howells 			size_t len = 0;
3466626c3920SAl Viro 			int err;
3467626c3920SAl Viro 
3468626c3920SAl Viro 			if (value) {
3469626c3920SAl Viro 				*value++ = '\0';
3470f3235626SDavid Howells 				len = strlen(value);
34711da177e4SLinus Torvalds 			}
3472f3235626SDavid Howells 			err = vfs_parse_fs_string(fc, this_char, value, len);
3473f3235626SDavid Howells 			if (err < 0)
3474f3235626SDavid Howells 				return err;
34751da177e4SLinus Torvalds 		}
3476626c3920SAl Viro 	}
34771da177e4SLinus Torvalds 	return 0;
34781da177e4SLinus Torvalds }
34791da177e4SLinus Torvalds 
3480f3235626SDavid Howells /*
3481f3235626SDavid Howells  * Reconfigure a shmem filesystem.
3482f3235626SDavid Howells  *
3483f3235626SDavid Howells  * Note that we disallow change from limited->unlimited blocks/inodes while any
3484f3235626SDavid Howells  * are in use; but we must separately disallow unlimited->limited, because in
3485f3235626SDavid Howells  * that case we have no record of how much is already in use.
3486f3235626SDavid Howells  */
3487f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc)
34881da177e4SLinus Torvalds {
3489f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3490f3235626SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
34910edd73b3SHugh Dickins 	unsigned long inodes;
3492*bf11b9a8SSebastian Andrzej Siewior 	struct mempolicy *mpol = NULL;
3493f3235626SDavid Howells 	const char *err;
34940edd73b3SHugh Dickins 
3495*bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock(&sbinfo->stat_lock);
34960edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3497f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3498f3235626SDavid Howells 		if (!sbinfo->max_blocks) {
3499f3235626SDavid Howells 			err = "Cannot retroactively limit size";
35000edd73b3SHugh Dickins 			goto out;
35010b5071ddSAl Viro 		}
3502f3235626SDavid Howells 		if (percpu_counter_compare(&sbinfo->used_blocks,
3503f3235626SDavid Howells 					   ctx->blocks) > 0) {
3504f3235626SDavid Howells 			err = "Too small a size for current use";
35050b5071ddSAl Viro 			goto out;
3506f3235626SDavid Howells 		}
3507f3235626SDavid Howells 	}
3508f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3509f3235626SDavid Howells 		if (!sbinfo->max_inodes) {
3510f3235626SDavid Howells 			err = "Cannot retroactively limit inodes";
35110b5071ddSAl Viro 			goto out;
35120b5071ddSAl Viro 		}
3513f3235626SDavid Howells 		if (ctx->inodes < inodes) {
3514f3235626SDavid Howells 			err = "Too few inodes for current use";
3515f3235626SDavid Howells 			goto out;
3516f3235626SDavid Howells 		}
3517f3235626SDavid Howells 	}
35180edd73b3SHugh Dickins 
3519ea3271f7SChris Down 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3520ea3271f7SChris Down 	    sbinfo->next_ino > UINT_MAX) {
3521ea3271f7SChris Down 		err = "Current inum too high to switch to 32-bit inums";
3522ea3271f7SChris Down 		goto out;
3523ea3271f7SChris Down 	}
3524ea3271f7SChris Down 
3525f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_HUGE)
3526f3235626SDavid Howells 		sbinfo->huge = ctx->huge;
3527ea3271f7SChris Down 	if (ctx->seen & SHMEM_SEEN_INUMS)
3528ea3271f7SChris Down 		sbinfo->full_inums = ctx->full_inums;
3529f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3530f3235626SDavid Howells 		sbinfo->max_blocks  = ctx->blocks;
3531f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_INODES) {
3532f3235626SDavid Howells 		sbinfo->max_inodes  = ctx->inodes;
3533f3235626SDavid Howells 		sbinfo->free_inodes = ctx->inodes - inodes;
35340b5071ddSAl Viro 	}
353571fe804bSLee Schermerhorn 
35365f00110fSGreg Thelen 	/*
35375f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
35385f00110fSGreg Thelen 	 */
3539f3235626SDavid Howells 	if (ctx->mpol) {
3540*bf11b9a8SSebastian Andrzej Siewior 		mpol = sbinfo->mpol;
3541f3235626SDavid Howells 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3542f3235626SDavid Howells 		ctx->mpol = NULL;
35435f00110fSGreg Thelen 	}
3544*bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3545*bf11b9a8SSebastian Andrzej Siewior 	mpol_put(mpol);
3546f3235626SDavid Howells 	return 0;
35470edd73b3SHugh Dickins out:
3548*bf11b9a8SSebastian Andrzej Siewior 	raw_spin_unlock(&sbinfo->stat_lock);
3549f35aa2bcSAl Viro 	return invalfc(fc, "%s", err);
35501da177e4SLinus Torvalds }
3551680d794bSakpm@linux-foundation.org 
355234c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3553680d794bSakpm@linux-foundation.org {
355434c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3555680d794bSakpm@linux-foundation.org 
3556680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3557680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
355809cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3559680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3560680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
35610825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
356209208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
35638751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
35648751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
35658751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
35668751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
35678751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
35688751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3569ea3271f7SChris Down 
3570ea3271f7SChris Down 	/*
3571ea3271f7SChris Down 	 * Showing inode{64,32} might be useful even if it's the system default,
3572ea3271f7SChris Down 	 * since then people don't have to resort to checking both here and
3573ea3271f7SChris Down 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
3574ea3271f7SChris Down 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3575ea3271f7SChris Down 	 *
3576ea3271f7SChris Down 	 * We hide it when inode64 isn't the default and we are using 32-bit
3577ea3271f7SChris Down 	 * inodes, since that probably just means the feature isn't even under
3578ea3271f7SChris Down 	 * consideration.
3579ea3271f7SChris Down 	 *
3580ea3271f7SChris Down 	 * As such:
3581ea3271f7SChris Down 	 *
3582ea3271f7SChris Down 	 *                     +-----------------+-----------------+
3583ea3271f7SChris Down 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3584ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3585ea3271f7SChris Down 	 *  | full_inums=true  | show            | show            |
3586ea3271f7SChris Down 	 *  | full_inums=false | show            | hide            |
3587ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3588ea3271f7SChris Down 	 *
3589ea3271f7SChris Down 	 */
3590ea3271f7SChris Down 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3591ea3271f7SChris Down 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3592396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
35935a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
35945a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
35955a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
35965a6e75f8SKirill A. Shutemov #endif
359771fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3598680d794bSakpm@linux-foundation.org 	return 0;
3599680d794bSakpm@linux-foundation.org }
36009183df25SDavid Herrmann 
3601680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
36021da177e4SLinus Torvalds 
36031da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
36041da177e4SLinus Torvalds {
3605602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3606602586a8SHugh Dickins 
3607e809d5f0SChris Down 	free_percpu(sbinfo->ino_batch);
3608602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
360949cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3610602586a8SHugh Dickins 	kfree(sbinfo);
36111da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
36121da177e4SLinus Torvalds }
36131da177e4SLinus Torvalds 
3614f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
36151da177e4SLinus Torvalds {
3616f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
36171da177e4SLinus Torvalds 	struct inode *inode;
36180edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3619680d794bSakpm@linux-foundation.org 	int err = -ENOMEM;
3620680d794bSakpm@linux-foundation.org 
3621680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3622425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3623680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3624680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3625680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3626680d794bSakpm@linux-foundation.org 
3627680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
36281da177e4SLinus Torvalds 
36290edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
36301da177e4SLinus Torvalds 	/*
36311da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
36321da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
36331da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
36341da177e4SLinus Torvalds 	 */
36351751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3636f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3637f3235626SDavid Howells 			ctx->blocks = shmem_default_max_blocks();
3638f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3639f3235626SDavid Howells 			ctx->inodes = shmem_default_max_inodes();
3640ea3271f7SChris Down 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
3641ea3271f7SChris Down 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3642ca4e0519SAl Viro 	} else {
36431751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
36441da177e4SLinus Torvalds 	}
364591828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
36461751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOSEC;
36470edd73b3SHugh Dickins #else
36481751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
36490edd73b3SHugh Dickins #endif
3650f3235626SDavid Howells 	sbinfo->max_blocks = ctx->blocks;
3651f3235626SDavid Howells 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3652e809d5f0SChris Down 	if (sb->s_flags & SB_KERNMOUNT) {
3653e809d5f0SChris Down 		sbinfo->ino_batch = alloc_percpu(ino_t);
3654e809d5f0SChris Down 		if (!sbinfo->ino_batch)
3655e809d5f0SChris Down 			goto failed;
3656e809d5f0SChris Down 	}
3657f3235626SDavid Howells 	sbinfo->uid = ctx->uid;
3658f3235626SDavid Howells 	sbinfo->gid = ctx->gid;
3659ea3271f7SChris Down 	sbinfo->full_inums = ctx->full_inums;
3660f3235626SDavid Howells 	sbinfo->mode = ctx->mode;
3661f3235626SDavid Howells 	sbinfo->huge = ctx->huge;
3662f3235626SDavid Howells 	sbinfo->mpol = ctx->mpol;
3663f3235626SDavid Howells 	ctx->mpol = NULL;
36641da177e4SLinus Torvalds 
3665*bf11b9a8SSebastian Andrzej Siewior 	raw_spin_lock_init(&sbinfo->stat_lock);
3666908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3667602586a8SHugh Dickins 		goto failed;
3668779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3669779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
36701da177e4SLinus Torvalds 
3671285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
367209cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
367309cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
36741da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
36751da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3676cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3677b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
367839f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3679b09e0fa4SEric Paris #endif
3680b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
36811751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
368239f0247dSAndreas Gruenbacher #endif
36832b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
36840edd73b3SHugh Dickins 
3685454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
36861da177e4SLinus Torvalds 	if (!inode)
36871da177e4SLinus Torvalds 		goto failed;
3688680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3689680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3690318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3691318ceed0SAl Viro 	if (!sb->s_root)
369248fde701SAl Viro 		goto failed;
36931da177e4SLinus Torvalds 	return 0;
36941da177e4SLinus Torvalds 
36951da177e4SLinus Torvalds failed:
36961da177e4SLinus Torvalds 	shmem_put_super(sb);
36971da177e4SLinus Torvalds 	return err;
36981da177e4SLinus Torvalds }
36991da177e4SLinus Torvalds 
3700f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc)
3701f3235626SDavid Howells {
3702f3235626SDavid Howells 	return get_tree_nodev(fc, shmem_fill_super);
3703f3235626SDavid Howells }
3704f3235626SDavid Howells 
3705f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc)
3706f3235626SDavid Howells {
3707f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3708f3235626SDavid Howells 
3709f3235626SDavid Howells 	if (ctx) {
3710f3235626SDavid Howells 		mpol_put(ctx->mpol);
3711f3235626SDavid Howells 		kfree(ctx);
3712f3235626SDavid Howells 	}
3713f3235626SDavid Howells }
3714f3235626SDavid Howells 
3715f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = {
3716f3235626SDavid Howells 	.free			= shmem_free_fc,
3717f3235626SDavid Howells 	.get_tree		= shmem_get_tree,
3718f3235626SDavid Howells #ifdef CONFIG_TMPFS
3719f3235626SDavid Howells 	.parse_monolithic	= shmem_parse_options,
3720f3235626SDavid Howells 	.parse_param		= shmem_parse_one,
3721f3235626SDavid Howells 	.reconfigure		= shmem_reconfigure,
3722f3235626SDavid Howells #endif
3723f3235626SDavid Howells };
3724f3235626SDavid Howells 
3725fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
37261da177e4SLinus Torvalds 
37271da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
37281da177e4SLinus Torvalds {
372941ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
373041ffe5d5SHugh Dickins 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
373141ffe5d5SHugh Dickins 	if (!info)
37321da177e4SLinus Torvalds 		return NULL;
373341ffe5d5SHugh Dickins 	return &info->vfs_inode;
37341da177e4SLinus Torvalds }
37351da177e4SLinus Torvalds 
373674b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode)
3737fa0d7e3dSNick Piggin {
373884e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
37393ed47db3SAl Viro 		kfree(inode->i_link);
3740fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3741fa0d7e3dSNick Piggin }
3742fa0d7e3dSNick Piggin 
37431da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
37441da177e4SLinus Torvalds {
374509208d15SAl Viro 	if (S_ISREG(inode->i_mode))
37461da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
37471da177e4SLinus Torvalds }
37481da177e4SLinus Torvalds 
374941ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
37501da177e4SLinus Torvalds {
375141ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
375241ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
37531da177e4SLinus Torvalds }
37541da177e4SLinus Torvalds 
37559a8ec03eSweiping zhang static void shmem_init_inodecache(void)
37561da177e4SLinus Torvalds {
37571da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
37581da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
37595d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
37601da177e4SLinus Torvalds }
37611da177e4SLinus Torvalds 
376241ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
37631da177e4SLinus Torvalds {
37641a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
37651da177e4SLinus Torvalds }
37661da177e4SLinus Torvalds 
376730e6a51dSHui Su const struct address_space_operations shmem_aops = {
37681da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
376976719325SKen Chen 	.set_page_dirty	= __set_page_dirty_no_writeback,
37701da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3771800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3772800d15a5SNick Piggin 	.write_end	= shmem_write_end,
37731da177e4SLinus Torvalds #endif
37741c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
3775304dbdb7SLee Schermerhorn 	.migratepage	= migrate_page,
37761c93923cSAndrew Morton #endif
3777aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
37781da177e4SLinus Torvalds };
377930e6a51dSHui Su EXPORT_SYMBOL(shmem_aops);
37801da177e4SLinus Torvalds 
378115ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
37821da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3783c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
37841da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3785220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
37862ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
37878174202bSAl Viro 	.write_iter	= generic_file_write_iter,
37881b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
378982c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3790f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
379183e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
37921da177e4SLinus Torvalds #endif
37931da177e4SLinus Torvalds };
37941da177e4SLinus Torvalds 
379592e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
379644a30220SYu Zhao 	.getattr	= shmem_getattr,
379794c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3798b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3799b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3800feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3801b09e0fa4SEric Paris #endif
38021da177e4SLinus Torvalds };
38031da177e4SLinus Torvalds 
380492e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
38051da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
38061da177e4SLinus Torvalds 	.create		= shmem_create,
38071da177e4SLinus Torvalds 	.lookup		= simple_lookup,
38081da177e4SLinus Torvalds 	.link		= shmem_link,
38091da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
38101da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
38111da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
38121da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
38131da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
38142773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
381560545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
38161da177e4SLinus Torvalds #endif
3817b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3818b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3819b09e0fa4SEric Paris #endif
382039f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
382194c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3822feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
382339f0247dSAndreas Gruenbacher #endif
382439f0247dSAndreas Gruenbacher };
382539f0247dSAndreas Gruenbacher 
382692e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3827b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3828b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3829b09e0fa4SEric Paris #endif
383039f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
383194c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3832feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
383339f0247dSAndreas Gruenbacher #endif
38341da177e4SLinus Torvalds };
38351da177e4SLinus Torvalds 
3836759b9775SHugh Dickins static const struct super_operations shmem_ops = {
38371da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
383874b1da56SAl Viro 	.free_inode	= shmem_free_in_core_inode,
38391da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
38401da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
38411da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
3842680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
38431da177e4SLinus Torvalds #endif
38441f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
38451da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
38461da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3847396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3848779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3849779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3850779750d2SKirill A. Shutemov #endif
38511da177e4SLinus Torvalds };
38521da177e4SLinus Torvalds 
3853f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
385454cb8821SNick Piggin 	.fault		= shmem_fault,
3855d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
38561da177e4SLinus Torvalds #ifdef CONFIG_NUMA
38571da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
38581da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
38591da177e4SLinus Torvalds #endif
38601da177e4SLinus Torvalds };
38611da177e4SLinus Torvalds 
3862f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc)
38631da177e4SLinus Torvalds {
3864f3235626SDavid Howells 	struct shmem_options *ctx;
3865f3235626SDavid Howells 
3866f3235626SDavid Howells 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3867f3235626SDavid Howells 	if (!ctx)
3868f3235626SDavid Howells 		return -ENOMEM;
3869f3235626SDavid Howells 
3870f3235626SDavid Howells 	ctx->mode = 0777 | S_ISVTX;
3871f3235626SDavid Howells 	ctx->uid = current_fsuid();
3872f3235626SDavid Howells 	ctx->gid = current_fsgid();
3873f3235626SDavid Howells 
3874f3235626SDavid Howells 	fc->fs_private = ctx;
3875f3235626SDavid Howells 	fc->ops = &shmem_fs_context_ops;
3876f3235626SDavid Howells 	return 0;
38771da177e4SLinus Torvalds }
38781da177e4SLinus Torvalds 
387941ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
38801da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
38811da177e4SLinus Torvalds 	.name		= "tmpfs",
3882f3235626SDavid Howells 	.init_fs_context = shmem_init_fs_context,
3883f3235626SDavid Howells #ifdef CONFIG_TMPFS
3884d7167b14SAl Viro 	.parameters	= shmem_fs_parameters,
3885f3235626SDavid Howells #endif
38861da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
388701c70267SMatthew Wilcox (Oracle) 	.fs_flags	= FS_USERNS_MOUNT | FS_THP_SUPPORT,
38881da177e4SLinus Torvalds };
38891da177e4SLinus Torvalds 
389041ffe5d5SHugh Dickins int __init shmem_init(void)
38911da177e4SLinus Torvalds {
38921da177e4SLinus Torvalds 	int error;
38931da177e4SLinus Torvalds 
38949a8ec03eSweiping zhang 	shmem_init_inodecache();
38951da177e4SLinus Torvalds 
389641ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
38971da177e4SLinus Torvalds 	if (error) {
38981170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
38991da177e4SLinus Torvalds 		goto out2;
39001da177e4SLinus Torvalds 	}
390195dc112aSGreg Kroah-Hartman 
3902ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
39031da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
39041da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
39051170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
39061da177e4SLinus Torvalds 		goto out1;
39071da177e4SLinus Torvalds 	}
39085a6e75f8SKirill A. Shutemov 
3909396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3910435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
39115a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
39125a6e75f8SKirill A. Shutemov 	else
39135a6e75f8SKirill A. Shutemov 		shmem_huge = 0; /* just in case it was patched */
39145a6e75f8SKirill A. Shutemov #endif
39151da177e4SLinus Torvalds 	return 0;
39161da177e4SLinus Torvalds 
39171da177e4SLinus Torvalds out1:
391841ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
39191da177e4SLinus Torvalds out2:
392041ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
39211da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
39221da177e4SLinus Torvalds 	return error;
39231da177e4SLinus Torvalds }
3924853ac43aSMatt Mackall 
3925396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
39265a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
39275a6e75f8SKirill A. Shutemov 				  struct kobj_attribute *attr, char *buf)
39285a6e75f8SKirill A. Shutemov {
392926083eb6SColin Ian King 	static const int values[] = {
39305a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
39315a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
39325a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
39335a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
39345a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
39355a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
39365a6e75f8SKirill A. Shutemov 	};
393779d4d38aSJoe Perches 	int len = 0;
393879d4d38aSJoe Perches 	int i;
39395a6e75f8SKirill A. Shutemov 
394079d4d38aSJoe Perches 	for (i = 0; i < ARRAY_SIZE(values); i++) {
394179d4d38aSJoe Perches 		len += sysfs_emit_at(buf, len,
394279d4d38aSJoe Perches 				     shmem_huge == values[i] ? "%s[%s]" : "%s%s",
394379d4d38aSJoe Perches 				     i ? " " : "",
39445a6e75f8SKirill A. Shutemov 				     shmem_format_huge(values[i]));
39455a6e75f8SKirill A. Shutemov 	}
394679d4d38aSJoe Perches 
394779d4d38aSJoe Perches 	len += sysfs_emit_at(buf, len, "\n");
394879d4d38aSJoe Perches 
394979d4d38aSJoe Perches 	return len;
39505a6e75f8SKirill A. Shutemov }
39515a6e75f8SKirill A. Shutemov 
39525a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
39535a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
39545a6e75f8SKirill A. Shutemov {
39555a6e75f8SKirill A. Shutemov 	char tmp[16];
39565a6e75f8SKirill A. Shutemov 	int huge;
39575a6e75f8SKirill A. Shutemov 
39585a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
39595a6e75f8SKirill A. Shutemov 		return -EINVAL;
39605a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
39615a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
39625a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
39635a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
39645a6e75f8SKirill A. Shutemov 
39655a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
39665a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
39675a6e75f8SKirill A. Shutemov 		return -EINVAL;
39685a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
39695a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
39705a6e75f8SKirill A. Shutemov 		return -EINVAL;
39715a6e75f8SKirill A. Shutemov 
39725a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
3973435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
39745a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
39755a6e75f8SKirill A. Shutemov 	return count;
39765a6e75f8SKirill A. Shutemov }
39775a6e75f8SKirill A. Shutemov 
39785a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr =
39795a6e75f8SKirill A. Shutemov 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3980396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
3981f3f0e1d2SKirill A. Shutemov 
3982396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3983f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma)
3984f3f0e1d2SKirill A. Shutemov {
3985f3f0e1d2SKirill A. Shutemov 	struct inode *inode = file_inode(vma->vm_file);
3986f3f0e1d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3987f3f0e1d2SKirill A. Shutemov 	loff_t i_size;
3988f3f0e1d2SKirill A. Shutemov 	pgoff_t off;
3989f3f0e1d2SKirill A. Shutemov 
3990e6be37b2SMiaohe Lin 	if (!transhuge_vma_enabled(vma, vma->vm_flags))
3991c0630669SYang Shi 		return false;
3992f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
3993f3f0e1d2SKirill A. Shutemov 		return true;
3994f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY)
3995f3f0e1d2SKirill A. Shutemov 		return false;
3996f3f0e1d2SKirill A. Shutemov 	switch (sbinfo->huge) {
3997f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_NEVER:
3998f3f0e1d2SKirill A. Shutemov 			return false;
3999f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ALWAYS:
4000f3f0e1d2SKirill A. Shutemov 			return true;
4001f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_WITHIN_SIZE:
4002f3f0e1d2SKirill A. Shutemov 			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4003f3f0e1d2SKirill A. Shutemov 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
4004f3f0e1d2SKirill A. Shutemov 			if (i_size >= HPAGE_PMD_SIZE &&
4005f3f0e1d2SKirill A. Shutemov 					i_size >> PAGE_SHIFT >= off)
4006f3f0e1d2SKirill A. Shutemov 				return true;
4007e4a9bc58SJoe Perches 			fallthrough;
4008f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ADVISE:
4009f3f0e1d2SKirill A. Shutemov 			/* TODO: implement fadvise() hints */
4010f3f0e1d2SKirill A. Shutemov 			return (vma->vm_flags & VM_HUGEPAGE);
4011f3f0e1d2SKirill A. Shutemov 		default:
4012f3f0e1d2SKirill A. Shutemov 			VM_BUG_ON(1);
4013f3f0e1d2SKirill A. Shutemov 			return false;
4014f3f0e1d2SKirill A. Shutemov 	}
4015f3f0e1d2SKirill A. Shutemov }
4016396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
40175a6e75f8SKirill A. Shutemov 
4018853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
4019853ac43aSMatt Mackall 
4020853ac43aSMatt Mackall /*
4021853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4022853ac43aSMatt Mackall  *
4023853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
4024853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
4025853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
4026853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
4027853ac43aSMatt Mackall  */
4028853ac43aSMatt Mackall 
402941ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
4030853ac43aSMatt Mackall 	.name		= "tmpfs",
4031f3235626SDavid Howells 	.init_fs_context = ramfs_init_fs_context,
4032d7167b14SAl Viro 	.parameters	= ramfs_fs_parameters,
4033853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
40342b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
4035853ac43aSMatt Mackall };
4036853ac43aSMatt Mackall 
403741ffe5d5SHugh Dickins int __init shmem_init(void)
4038853ac43aSMatt Mackall {
403941ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4040853ac43aSMatt Mackall 
404141ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
4042853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
4043853ac43aSMatt Mackall 
4044853ac43aSMatt Mackall 	return 0;
4045853ac43aSMatt Mackall }
4046853ac43aSMatt Mackall 
4047b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
4048b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
4049853ac43aSMatt Mackall {
4050853ac43aSMatt Mackall 	return 0;
4051853ac43aSMatt Mackall }
4052853ac43aSMatt Mackall 
4053d7c9e99aSAlexey Gladkov int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
40543f96b79aSHugh Dickins {
40553f96b79aSHugh Dickins 	return 0;
40563f96b79aSHugh Dickins }
40573f96b79aSHugh Dickins 
405824513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
405924513264SHugh Dickins {
406024513264SHugh Dickins }
406124513264SHugh Dickins 
4062c01d5b30SHugh Dickins #ifdef CONFIG_MMU
4063c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4064c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
4065c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
4066c01d5b30SHugh Dickins {
4067c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4068c01d5b30SHugh Dickins }
4069c01d5b30SHugh Dickins #endif
4070c01d5b30SHugh Dickins 
407141ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
407294c1e62dSHugh Dickins {
407341ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
407494c1e62dSHugh Dickins }
407594c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
407694c1e62dSHugh Dickins 
4077853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
40780b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
4079454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
40800b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
40810b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
4082853ac43aSMatt Mackall 
4083853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4084853ac43aSMatt Mackall 
4085853ac43aSMatt Mackall /* common code */
40861da177e4SLinus Torvalds 
4087703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4088c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
40891da177e4SLinus Torvalds {
40901da177e4SLinus Torvalds 	struct inode *inode;
409193dec2daSAl Viro 	struct file *res;
40921da177e4SLinus Torvalds 
4093703321b6SMatthew Auld 	if (IS_ERR(mnt))
4094703321b6SMatthew Auld 		return ERR_CAST(mnt);
40951da177e4SLinus Torvalds 
4096285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
40971da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
40981da177e4SLinus Torvalds 
40991da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
41001da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
41011da177e4SLinus Torvalds 
410293dec2daSAl Viro 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
410393dec2daSAl Viro 				flags);
4104dac2d1f6SAl Viro 	if (unlikely(!inode)) {
4105dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
4106dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
4107dac2d1f6SAl Viro 	}
4108c7277090SEric Paris 	inode->i_flags |= i_flags;
41091da177e4SLinus Torvalds 	inode->i_size = size;
41106d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
411126567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
411293dec2daSAl Viro 	if (!IS_ERR(res))
411393dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
41144b42af81SAl Viro 				&shmem_file_operations);
41156b4d0b27SAl Viro 	if (IS_ERR(res))
411693dec2daSAl Viro 		iput(inode);
41176b4d0b27SAl Viro 	return res;
41181da177e4SLinus Torvalds }
4119c7277090SEric Paris 
4120c7277090SEric Paris /**
4121c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4122c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
4123c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
4124e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
4125e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
4126c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4127c7277090SEric Paris  * @size: size to be set for the file
4128c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4129c7277090SEric Paris  */
4130c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4131c7277090SEric Paris {
4132703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4133c7277090SEric Paris }
4134c7277090SEric Paris 
4135c7277090SEric Paris /**
4136c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4137c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4138c7277090SEric Paris  * @size: size to be set for the file
4139c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4140c7277090SEric Paris  */
4141c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4142c7277090SEric Paris {
4143703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4144c7277090SEric Paris }
4145395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
41461da177e4SLinus Torvalds 
414746711810SRandy Dunlap /**
4148703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4149703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
4150703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
4151703321b6SMatthew Auld  * @size: size to be set for the file
4152703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4153703321b6SMatthew Auld  */
4154703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4155703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
4156703321b6SMatthew Auld {
4157703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
4158703321b6SMatthew Auld }
4159703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4160703321b6SMatthew Auld 
4161703321b6SMatthew Auld /**
41621da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
416345e55300SPeter Collingbourne  * @vma: the vma to be mmapped is prepared by do_mmap
41641da177e4SLinus Torvalds  */
41651da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
41661da177e4SLinus Torvalds {
41671da177e4SLinus Torvalds 	struct file *file;
41681da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
41691da177e4SLinus Torvalds 
417066fc1303SHugh Dickins 	/*
4171c1e8d7c6SMichel Lespinasse 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
417266fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
417366fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
417466fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
417566fc1303SHugh Dickins 	 */
4176703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
41771da177e4SLinus Torvalds 	if (IS_ERR(file))
41781da177e4SLinus Torvalds 		return PTR_ERR(file);
41791da177e4SLinus Torvalds 
41801da177e4SLinus Torvalds 	if (vma->vm_file)
41811da177e4SLinus Torvalds 		fput(vma->vm_file);
41821da177e4SLinus Torvalds 	vma->vm_file = file;
41831da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
4184f3f0e1d2SKirill A. Shutemov 
4185396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4186f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4187f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
4188f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
4189f3f0e1d2SKirill A. Shutemov 	}
4190f3f0e1d2SKirill A. Shutemov 
41911da177e4SLinus Torvalds 	return 0;
41921da177e4SLinus Torvalds }
4193d9d90e5eSHugh Dickins 
4194d9d90e5eSHugh Dickins /**
4195d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4196d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4197d9d90e5eSHugh Dickins  * @index:	the page index
4198d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4199d9d90e5eSHugh Dickins  *
4200d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4201d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
4202d9d90e5eSHugh Dickins  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4203d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4204d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4205d9d90e5eSHugh Dickins  *
420668da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
420768da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4208d9d90e5eSHugh Dickins  */
4209d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4210d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4211d9d90e5eSHugh Dickins {
421268da9f05SHugh Dickins #ifdef CONFIG_SHMEM
421368da9f05SHugh Dickins 	struct inode *inode = mapping->host;
42149276aad6SHugh Dickins 	struct page *page;
421568da9f05SHugh Dickins 	int error;
421668da9f05SHugh Dickins 
421730e6a51dSHui Su 	BUG_ON(!shmem_mapping(mapping));
42189e18eb29SAndres Lagar-Cavilla 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4219cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
422068da9f05SHugh Dickins 	if (error)
422168da9f05SHugh Dickins 		page = ERR_PTR(error);
422268da9f05SHugh Dickins 	else
422368da9f05SHugh Dickins 		unlock_page(page);
422468da9f05SHugh Dickins 	return page;
422568da9f05SHugh Dickins #else
422668da9f05SHugh Dickins 	/*
422768da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
422868da9f05SHugh Dickins 	 */
4229d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
423068da9f05SHugh Dickins #endif
4231d9d90e5eSHugh Dickins }
4232d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4233