xref: /openbmc/linux/mm/shmem.c (revision 51b0bff2f703f7ecfeb228eaa3d8f6090c18c9c1)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31853ac43aSMatt Mackall #include <linux/mm.h>
3246c9a946SArnd Bergmann #include <linux/random.h>
33174cd4b1SIngo Molnar #include <linux/sched/signal.h>
34b95f1b31SPaul Gortmaker #include <linux/export.h>
35853ac43aSMatt Mackall #include <linux/swap.h>
36e2e40f2cSChristoph Hellwig #include <linux/uio.h>
37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h>
38749df87bSMike Kravetz #include <linux/hugetlb.h>
39b56a2d8aSVineeth Remanan Pillai #include <linux/frontswap.h>
40626c3920SAl Viro #include <linux/fs_parser.h>
41853ac43aSMatt Mackall 
4295cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
4395cc09d6SAndrea Arcangeli 
44853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
45853ac43aSMatt Mackall 
46853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
471da177e4SLinus Torvalds /*
481da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
491da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
501da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
511da177e4SLinus Torvalds  */
521da177e4SLinus Torvalds 
5339f0247dSAndreas Gruenbacher #include <linux/xattr.h>
54a5694255SChristoph Hellwig #include <linux/exportfs.h>
551c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
56feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
571da177e4SLinus Torvalds #include <linux/mman.h>
581da177e4SLinus Torvalds #include <linux/string.h>
591da177e4SLinus Torvalds #include <linux/slab.h>
601da177e4SLinus Torvalds #include <linux/backing-dev.h>
611da177e4SLinus Torvalds #include <linux/shmem_fs.h>
621da177e4SLinus Torvalds #include <linux/writeback.h>
631da177e4SLinus Torvalds #include <linux/blkdev.h>
64bda97eabSHugh Dickins #include <linux/pagevec.h>
6541ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6683e4fa9cSHugh Dickins #include <linux/falloc.h>
67708e3508SHugh Dickins #include <linux/splice.h>
681da177e4SLinus Torvalds #include <linux/security.h>
691da177e4SLinus Torvalds #include <linux/swapops.h>
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/namei.h>
72b00dc3adSHugh Dickins #include <linux/ctype.h>
73304dbdb7SLee Schermerhorn #include <linux/migrate.h>
74c1f60a5aSChristoph Lameter #include <linux/highmem.h>
75680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7692562927SMimi Zohar #include <linux/magic.h>
779183df25SDavid Herrmann #include <linux/syscalls.h>
7840e041a2SDavid Herrmann #include <linux/fcntl.h>
799183df25SDavid Herrmann #include <uapi/linux/memfd.h>
80cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
814c27fe4cSMike Rapoport #include <linux/rmap.h>
822b4db796SAmir Goldstein #include <linux/uuid.h>
83304dbdb7SLee Schermerhorn 
847c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
851da177e4SLinus Torvalds 
86dd56b046SMel Gorman #include "internal.h"
87dd56b046SMel Gorman 
8809cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8909cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
921da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
931da177e4SLinus Torvalds 
9469f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9569f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9669f07ec9SHugh Dickins 
971aac1400SHugh Dickins /*
98f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99f00cdc6dSHugh Dickins  * inode->i_private (with i_mutex making sure that it has only one user at
100f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
1011aac1400SHugh Dickins  */
1021aac1400SHugh Dickins struct shmem_falloc {
1038e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1041aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1051aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1061aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1071aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1081aac1400SHugh Dickins };
1091aac1400SHugh Dickins 
1100b5071ddSAl Viro struct shmem_options {
1110b5071ddSAl Viro 	unsigned long long blocks;
1120b5071ddSAl Viro 	unsigned long long inodes;
1130b5071ddSAl Viro 	struct mempolicy *mpol;
1140b5071ddSAl Viro 	kuid_t uid;
1150b5071ddSAl Viro 	kgid_t gid;
1160b5071ddSAl Viro 	umode_t mode;
117ea3271f7SChris Down 	bool full_inums;
1180b5071ddSAl Viro 	int huge;
1190b5071ddSAl Viro 	int seen;
1200b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1
1210b5071ddSAl Viro #define SHMEM_SEEN_INODES 2
1220b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4
123ea3271f7SChris Down #define SHMEM_SEEN_INUMS 8
1240b5071ddSAl Viro };
1250b5071ddSAl Viro 
126b76db735SAndrew Morton #ifdef CONFIG_TMPFS
127680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
128680d794bSakpm@linux-foundation.org {
129ca79b0c2SArun KS 	return totalram_pages() / 2;
130680d794bSakpm@linux-foundation.org }
131680d794bSakpm@linux-foundation.org 
132680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
133680d794bSakpm@linux-foundation.org {
134ca79b0c2SArun KS 	unsigned long nr_pages = totalram_pages();
135ca79b0c2SArun KS 
136ca79b0c2SArun KS 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
137680d794bSakpm@linux-foundation.org }
138b76db735SAndrew Morton #endif
139680d794bSakpm@linux-foundation.org 
140bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
142bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index);
143c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
144c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
145c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
146c5bf121eSVineeth Remanan Pillai 			     vm_fault_t *fault_type);
14768da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1489e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp,
149cfda0526SMike Rapoport 		gfp_t gfp, struct vm_area_struct *vma,
1502b740303SSouptick Joarder 		struct vm_fault *vmf, vm_fault_t *fault_type);
15168da9f05SHugh Dickins 
152f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index,
1539e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp)
15468da9f05SHugh Dickins {
15568da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
156cfda0526SMike Rapoport 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
15768da9f05SHugh Dickins }
1581da177e4SLinus Torvalds 
1591da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1601da177e4SLinus Torvalds {
1611da177e4SLinus Torvalds 	return sb->s_fs_info;
1621da177e4SLinus Torvalds }
1631da177e4SLinus Torvalds 
1641da177e4SLinus Torvalds /*
1651da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1661da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1671da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1681da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1691da177e4SLinus Torvalds  */
1701da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1711da177e4SLinus Torvalds {
1720b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
173191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1741da177e4SLinus Torvalds }
1751da177e4SLinus Torvalds 
1761da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1771da177e4SLinus Torvalds {
1780b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1791da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1801da177e4SLinus Torvalds }
1811da177e4SLinus Torvalds 
18277142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
18377142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
18477142517SKonstantin Khlebnikov {
18577142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
18677142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
18777142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
18877142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
18977142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
19077142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
19177142517SKonstantin Khlebnikov 	}
19277142517SKonstantin Khlebnikov 	return 0;
19377142517SKonstantin Khlebnikov }
19477142517SKonstantin Khlebnikov 
1951da177e4SLinus Torvalds /*
1961da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
19775edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
1981da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1991da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
2001da177e4SLinus Torvalds  */
201800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
2021da177e4SLinus Torvalds {
203800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
204800d8c63SKirill A. Shutemov 		return 0;
205800d8c63SKirill A. Shutemov 
206800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
207800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
2081da177e4SLinus Torvalds }
2091da177e4SLinus Torvalds 
2101da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
2111da177e4SLinus Torvalds {
2120b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
21309cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
2141da177e4SLinus Torvalds }
2151da177e4SLinus Torvalds 
2160f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
2170f079694SMike Rapoport {
2180f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2190f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2200f079694SMike Rapoport 
2210f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
2220f079694SMike Rapoport 		return false;
2230f079694SMike Rapoport 
2240f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2250f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2260f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2270f079694SMike Rapoport 			goto unacct;
2280f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2290f079694SMike Rapoport 	}
2300f079694SMike Rapoport 
2310f079694SMike Rapoport 	return true;
2320f079694SMike Rapoport 
2330f079694SMike Rapoport unacct:
2340f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2350f079694SMike Rapoport 	return false;
2360f079694SMike Rapoport }
2370f079694SMike Rapoport 
2380f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2390f079694SMike Rapoport {
2400f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2410f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2420f079694SMike Rapoport 
2430f079694SMike Rapoport 	if (sbinfo->max_blocks)
2440f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2450f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2460f079694SMike Rapoport }
2470f079694SMike Rapoport 
248759b9775SHugh Dickins static const struct super_operations shmem_ops;
249f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops;
25015ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
25192e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
25292e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
25392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
254f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
255779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2561da177e4SLinus Torvalds 
257b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
258b0506e48SMike Rapoport {
259b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
260b0506e48SMike Rapoport }
261b0506e48SMike Rapoport 
2621da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
263cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2641da177e4SLinus Torvalds 
265e809d5f0SChris Down /*
266e809d5f0SChris Down  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
267e809d5f0SChris Down  * produces a novel ino for the newly allocated inode.
268e809d5f0SChris Down  *
269e809d5f0SChris Down  * It may also be called when making a hard link to permit the space needed by
270e809d5f0SChris Down  * each dentry. However, in that case, no new inode number is needed since that
271e809d5f0SChris Down  * internally draws from another pool of inode numbers (currently global
272e809d5f0SChris Down  * get_next_ino()). This case is indicated by passing NULL as inop.
273e809d5f0SChris Down  */
274e809d5f0SChris Down #define SHMEM_INO_BATCH 1024
275e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
2765b04c689SPavel Emelyanov {
2775b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
278e809d5f0SChris Down 	ino_t ino;
279e809d5f0SChris Down 
280e809d5f0SChris Down 	if (!(sb->s_flags & SB_KERNMOUNT)) {
2815b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2825b04c689SPavel Emelyanov 		if (!sbinfo->free_inodes) {
2835b04c689SPavel Emelyanov 			spin_unlock(&sbinfo->stat_lock);
2845b04c689SPavel Emelyanov 			return -ENOSPC;
2855b04c689SPavel Emelyanov 		}
2865b04c689SPavel Emelyanov 		sbinfo->free_inodes--;
287e809d5f0SChris Down 		if (inop) {
288e809d5f0SChris Down 			ino = sbinfo->next_ino++;
289e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
290e809d5f0SChris Down 				ino = sbinfo->next_ino++;
291ea3271f7SChris Down 			if (unlikely(!sbinfo->full_inums &&
292ea3271f7SChris Down 				     ino > UINT_MAX)) {
293e809d5f0SChris Down 				/*
294e809d5f0SChris Down 				 * Emulate get_next_ino uint wraparound for
295e809d5f0SChris Down 				 * compatibility
296e809d5f0SChris Down 				 */
297ea3271f7SChris Down 				if (IS_ENABLED(CONFIG_64BIT))
298ea3271f7SChris Down 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
299ea3271f7SChris Down 						__func__, MINOR(sb->s_dev));
300ea3271f7SChris Down 				sbinfo->next_ino = 1;
301ea3271f7SChris Down 				ino = sbinfo->next_ino++;
3025b04c689SPavel Emelyanov 			}
303e809d5f0SChris Down 			*inop = ino;
304e809d5f0SChris Down 		}
305e809d5f0SChris Down 		spin_unlock(&sbinfo->stat_lock);
306e809d5f0SChris Down 	} else if (inop) {
307e809d5f0SChris Down 		/*
308e809d5f0SChris Down 		 * __shmem_file_setup, one of our callers, is lock-free: it
309e809d5f0SChris Down 		 * doesn't hold stat_lock in shmem_reserve_inode since
310e809d5f0SChris Down 		 * max_inodes is always 0, and is called from potentially
311e809d5f0SChris Down 		 * unknown contexts. As such, use a per-cpu batched allocator
312e809d5f0SChris Down 		 * which doesn't require the per-sb stat_lock unless we are at
313e809d5f0SChris Down 		 * the batch boundary.
314ea3271f7SChris Down 		 *
315ea3271f7SChris Down 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
316ea3271f7SChris Down 		 * shmem mounts are not exposed to userspace, so we don't need
317ea3271f7SChris Down 		 * to worry about things like glibc compatibility.
318e809d5f0SChris Down 		 */
319e809d5f0SChris Down 		ino_t *next_ino;
320e809d5f0SChris Down 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
321e809d5f0SChris Down 		ino = *next_ino;
322e809d5f0SChris Down 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
323e809d5f0SChris Down 			spin_lock(&sbinfo->stat_lock);
324e809d5f0SChris Down 			ino = sbinfo->next_ino;
325e809d5f0SChris Down 			sbinfo->next_ino += SHMEM_INO_BATCH;
326e809d5f0SChris Down 			spin_unlock(&sbinfo->stat_lock);
327e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
328e809d5f0SChris Down 				ino++;
329e809d5f0SChris Down 		}
330e809d5f0SChris Down 		*inop = ino;
331e809d5f0SChris Down 		*next_ino = ++ino;
332e809d5f0SChris Down 		put_cpu();
333e809d5f0SChris Down 	}
334e809d5f0SChris Down 
3355b04c689SPavel Emelyanov 	return 0;
3365b04c689SPavel Emelyanov }
3375b04c689SPavel Emelyanov 
3385b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
3395b04c689SPavel Emelyanov {
3405b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3415b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
3425b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
3435b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
3445b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
3455b04c689SPavel Emelyanov 	}
3465b04c689SPavel Emelyanov }
3475b04c689SPavel Emelyanov 
34846711810SRandy Dunlap /**
34941ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
3501da177e4SLinus Torvalds  * @inode: inode to recalc
3511da177e4SLinus Torvalds  *
3521da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
3531da177e4SLinus Torvalds  * undirtied hole pages behind our back.
3541da177e4SLinus Torvalds  *
3551da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
3561da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
3571da177e4SLinus Torvalds  *
3581da177e4SLinus Torvalds  * It has to be called with the spinlock held.
3591da177e4SLinus Torvalds  */
3601da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
3611da177e4SLinus Torvalds {
3621da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
3631da177e4SLinus Torvalds 	long freed;
3641da177e4SLinus Torvalds 
3651da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
3661da177e4SLinus Torvalds 	if (freed > 0) {
3671da177e4SLinus Torvalds 		info->alloced -= freed;
36854af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
3690f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
3701da177e4SLinus Torvalds 	}
3711da177e4SLinus Torvalds }
3721da177e4SLinus Torvalds 
373800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
374800d8c63SKirill A. Shutemov {
375800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3764595ef88SKirill A. Shutemov 	unsigned long flags;
377800d8c63SKirill A. Shutemov 
3780f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
379800d8c63SKirill A. Shutemov 		return false;
380b1cc94abSMike Rapoport 
381aaa52e34SHugh Dickins 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
382aaa52e34SHugh Dickins 	inode->i_mapping->nrpages += pages;
383aaa52e34SHugh Dickins 
3844595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
385800d8c63SKirill A. Shutemov 	info->alloced += pages;
386800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
387800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3884595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
389800d8c63SKirill A. Shutemov 
390800d8c63SKirill A. Shutemov 	return true;
391800d8c63SKirill A. Shutemov }
392800d8c63SKirill A. Shutemov 
393800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
394800d8c63SKirill A. Shutemov {
395800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3964595ef88SKirill A. Shutemov 	unsigned long flags;
397800d8c63SKirill A. Shutemov 
398aaa52e34SHugh Dickins 	/* nrpages adjustment done by __delete_from_page_cache() or caller */
399aaa52e34SHugh Dickins 
4004595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
401800d8c63SKirill A. Shutemov 	info->alloced -= pages;
402800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
403800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
4044595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
405800d8c63SKirill A. Shutemov 
4060f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
407800d8c63SKirill A. Shutemov }
408800d8c63SKirill A. Shutemov 
4097a5d0fbbSHugh Dickins /*
41062f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
4117a5d0fbbSHugh Dickins  */
41262f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
4137a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
4147a5d0fbbSHugh Dickins {
41562f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
4166dbaf22cSJohannes Weiner 	void *item;
4177a5d0fbbSHugh Dickins 
4187a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
4196dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
42062f945b6SMatthew Wilcox 	item = xas_load(&xas);
4217a5d0fbbSHugh Dickins 	if (item != expected)
4227a5d0fbbSHugh Dickins 		return -ENOENT;
42362f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
4247a5d0fbbSHugh Dickins 	return 0;
4257a5d0fbbSHugh Dickins }
4267a5d0fbbSHugh Dickins 
4277a5d0fbbSHugh Dickins /*
428d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
429d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
430d1899228SHugh Dickins  *
431d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
432d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
433d1899228SHugh Dickins  */
434d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
435d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
436d1899228SHugh Dickins {
437a12831bfSMatthew Wilcox 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
438d1899228SHugh Dickins }
439d1899228SHugh Dickins 
440d1899228SHugh Dickins /*
4415a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
4425a6e75f8SKirill A. Shutemov  *
4435a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
4445a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
4455a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
4465a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
4475a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
4485a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
4495a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
4505a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
4515a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
4525a6e75f8SKirill A. Shutemov  */
4535a6e75f8SKirill A. Shutemov 
4545a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
4555a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
4565a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
4575a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
4585a6e75f8SKirill A. Shutemov 
4595a6e75f8SKirill A. Shutemov /*
4605a6e75f8SKirill A. Shutemov  * Special values.
4615a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
4625a6e75f8SKirill A. Shutemov  *
4635a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
4645a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
4655a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
4665a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
4675a6e75f8SKirill A. Shutemov  *
4685a6e75f8SKirill A. Shutemov  */
4695a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
4705a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
4715a6e75f8SKirill A. Shutemov 
472396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4735a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
4745a6e75f8SKirill A. Shutemov 
4755b9c98f3SMike Kravetz static int shmem_huge __read_mostly;
4765a6e75f8SKirill A. Shutemov 
477e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS)
4785a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
4795a6e75f8SKirill A. Shutemov {
4805a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
4815a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
4825a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
4835a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
4845a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
4855a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
4865a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
4875a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
4885a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
4895a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
4905a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
4915a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
4925a6e75f8SKirill A. Shutemov 	return -EINVAL;
4935a6e75f8SKirill A. Shutemov }
494e5f2249aSArnd Bergmann #endif
4955a6e75f8SKirill A. Shutemov 
496e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
4975a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
4985a6e75f8SKirill A. Shutemov {
4995a6e75f8SKirill A. Shutemov 	switch (huge) {
5005a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
5015a6e75f8SKirill A. Shutemov 		return "never";
5025a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
5035a6e75f8SKirill A. Shutemov 		return "always";
5045a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
5055a6e75f8SKirill A. Shutemov 		return "within_size";
5065a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
5075a6e75f8SKirill A. Shutemov 		return "advise";
5085a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
5095a6e75f8SKirill A. Shutemov 		return "deny";
5105a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
5115a6e75f8SKirill A. Shutemov 		return "force";
5125a6e75f8SKirill A. Shutemov 	default:
5135a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
5145a6e75f8SKirill A. Shutemov 		return "bad_val";
5155a6e75f8SKirill A. Shutemov 	}
5165a6e75f8SKirill A. Shutemov }
517f1f5929cSJérémy Lefaure #endif
5185a6e75f8SKirill A. Shutemov 
519779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
520779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
521779750d2SKirill A. Shutemov {
522779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
523253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
524779750d2SKirill A. Shutemov 	struct inode *inode;
525779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
526779750d2SKirill A. Shutemov 	struct page *page;
527779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
528779750d2SKirill A. Shutemov 	int removed = 0, split = 0;
529779750d2SKirill A. Shutemov 
530779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
531779750d2SKirill A. Shutemov 		return SHRINK_STOP;
532779750d2SKirill A. Shutemov 
533779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
534779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
535779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
536779750d2SKirill A. Shutemov 
537779750d2SKirill A. Shutemov 		/* pin the inode */
538779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
539779750d2SKirill A. Shutemov 
540779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
541779750d2SKirill A. Shutemov 		if (!inode) {
542779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
543779750d2SKirill A. Shutemov 			removed++;
544779750d2SKirill A. Shutemov 			goto next;
545779750d2SKirill A. Shutemov 		}
546779750d2SKirill A. Shutemov 
547779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
548779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
549779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
550253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
551779750d2SKirill A. Shutemov 			removed++;
552779750d2SKirill A. Shutemov 			goto next;
553779750d2SKirill A. Shutemov 		}
554779750d2SKirill A. Shutemov 
555779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
556779750d2SKirill A. Shutemov next:
557779750d2SKirill A. Shutemov 		if (!--batch)
558779750d2SKirill A. Shutemov 			break;
559779750d2SKirill A. Shutemov 	}
560779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
561779750d2SKirill A. Shutemov 
562253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
563253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
564253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
565253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
566253fd0f0SKirill A. Shutemov 		iput(inode);
567253fd0f0SKirill A. Shutemov 	}
568253fd0f0SKirill A. Shutemov 
569779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
570779750d2SKirill A. Shutemov 		int ret;
571779750d2SKirill A. Shutemov 
572779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
573779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
574779750d2SKirill A. Shutemov 
575b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
576b3cd54b2SKirill A. Shutemov 			goto leave;
577779750d2SKirill A. Shutemov 
578b3cd54b2SKirill A. Shutemov 		page = find_get_page(inode->i_mapping,
579779750d2SKirill A. Shutemov 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
580779750d2SKirill A. Shutemov 		if (!page)
581779750d2SKirill A. Shutemov 			goto drop;
582779750d2SKirill A. Shutemov 
583b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
584779750d2SKirill A. Shutemov 		if (!PageTransHuge(page)) {
585779750d2SKirill A. Shutemov 			put_page(page);
586779750d2SKirill A. Shutemov 			goto drop;
587779750d2SKirill A. Shutemov 		}
588779750d2SKirill A. Shutemov 
589b3cd54b2SKirill A. Shutemov 		/*
590b3cd54b2SKirill A. Shutemov 		 * Leave the inode on the list if we failed to lock
591b3cd54b2SKirill A. Shutemov 		 * the page at this time.
592b3cd54b2SKirill A. Shutemov 		 *
593b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
594b3cd54b2SKirill A. Shutemov 		 * reclaim path.
595b3cd54b2SKirill A. Shutemov 		 */
596b3cd54b2SKirill A. Shutemov 		if (!trylock_page(page)) {
597b3cd54b2SKirill A. Shutemov 			put_page(page);
598b3cd54b2SKirill A. Shutemov 			goto leave;
599b3cd54b2SKirill A. Shutemov 		}
600b3cd54b2SKirill A. Shutemov 
601779750d2SKirill A. Shutemov 		ret = split_huge_page(page);
602779750d2SKirill A. Shutemov 		unlock_page(page);
603779750d2SKirill A. Shutemov 		put_page(page);
604779750d2SKirill A. Shutemov 
605b3cd54b2SKirill A. Shutemov 		/* If split failed leave the inode on the list */
606b3cd54b2SKirill A. Shutemov 		if (ret)
607b3cd54b2SKirill A. Shutemov 			goto leave;
608779750d2SKirill A. Shutemov 
609779750d2SKirill A. Shutemov 		split++;
610779750d2SKirill A. Shutemov drop:
611779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
612779750d2SKirill A. Shutemov 		removed++;
613b3cd54b2SKirill A. Shutemov leave:
614779750d2SKirill A. Shutemov 		iput(inode);
615779750d2SKirill A. Shutemov 	}
616779750d2SKirill A. Shutemov 
617779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
618779750d2SKirill A. Shutemov 	list_splice_tail(&list, &sbinfo->shrinklist);
619779750d2SKirill A. Shutemov 	sbinfo->shrinklist_len -= removed;
620779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
621779750d2SKirill A. Shutemov 
622779750d2SKirill A. Shutemov 	return split;
623779750d2SKirill A. Shutemov }
624779750d2SKirill A. Shutemov 
625779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
626779750d2SKirill A. Shutemov 		struct shrink_control *sc)
627779750d2SKirill A. Shutemov {
628779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
629779750d2SKirill A. Shutemov 
630779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
631779750d2SKirill A. Shutemov 		return SHRINK_STOP;
632779750d2SKirill A. Shutemov 
633779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
634779750d2SKirill A. Shutemov }
635779750d2SKirill A. Shutemov 
636779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
637779750d2SKirill A. Shutemov 		struct shrink_control *sc)
638779750d2SKirill A. Shutemov {
639779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
640779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
641779750d2SKirill A. Shutemov }
642396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
6435a6e75f8SKirill A. Shutemov 
6445a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
6455a6e75f8SKirill A. Shutemov 
646779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
647779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
648779750d2SKirill A. Shutemov {
649779750d2SKirill A. Shutemov 	return 0;
650779750d2SKirill A. Shutemov }
651396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6525a6e75f8SKirill A. Shutemov 
65389fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
65489fdcd26SYang Shi {
655396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
65689fdcd26SYang Shi 	    (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
65789fdcd26SYang Shi 	    shmem_huge != SHMEM_HUGE_DENY)
65889fdcd26SYang Shi 		return true;
65989fdcd26SYang Shi 	return false;
66089fdcd26SYang Shi }
66189fdcd26SYang Shi 
6625a6e75f8SKirill A. Shutemov /*
66346f65ec1SHugh Dickins  * Like add_to_page_cache_locked, but error if expected item has gone.
66446f65ec1SHugh Dickins  */
66546f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page,
66646f65ec1SHugh Dickins 				   struct address_space *mapping,
6673fea5a49SJohannes Weiner 				   pgoff_t index, void *expected, gfp_t gfp,
6683fea5a49SJohannes Weiner 				   struct mm_struct *charge_mm)
66946f65ec1SHugh Dickins {
670552446a4SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
671552446a4SMatthew Wilcox 	unsigned long i = 0;
672d8c6546bSMatthew Wilcox (Oracle) 	unsigned long nr = compound_nr(page);
6733fea5a49SJohannes Weiner 	int error;
67446f65ec1SHugh Dickins 
675800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
676800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
677309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
678309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
679800d8c63SKirill A. Shutemov 	VM_BUG_ON(expected && PageTransHuge(page));
68046f65ec1SHugh Dickins 
681800d8c63SKirill A. Shutemov 	page_ref_add(page, nr);
68246f65ec1SHugh Dickins 	page->mapping = mapping;
68346f65ec1SHugh Dickins 	page->index = index;
68446f65ec1SHugh Dickins 
6854c6355b2SJohannes Weiner 	if (!PageSwapCache(page)) {
686d9eb1ea2SJohannes Weiner 		error = mem_cgroup_charge(page, charge_mm, gfp);
6873fea5a49SJohannes Weiner 		if (error) {
6884c6355b2SJohannes Weiner 			if (PageTransHuge(page)) {
6893fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK);
6903fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
6913fea5a49SJohannes Weiner 			}
6923fea5a49SJohannes Weiner 			goto error;
6933fea5a49SJohannes Weiner 		}
6944c6355b2SJohannes Weiner 	}
6953fea5a49SJohannes Weiner 	cgroup_throttle_swaprate(page, gfp);
6963fea5a49SJohannes Weiner 
697552446a4SMatthew Wilcox 	do {
698552446a4SMatthew Wilcox 		void *entry;
699552446a4SMatthew Wilcox 		xas_lock_irq(&xas);
700552446a4SMatthew Wilcox 		entry = xas_find_conflict(&xas);
701552446a4SMatthew Wilcox 		if (entry != expected)
702552446a4SMatthew Wilcox 			xas_set_err(&xas, -EEXIST);
703552446a4SMatthew Wilcox 		xas_create_range(&xas);
704552446a4SMatthew Wilcox 		if (xas_error(&xas))
705552446a4SMatthew Wilcox 			goto unlock;
706552446a4SMatthew Wilcox next:
7074101196bSMatthew Wilcox (Oracle) 		xas_store(&xas, page);
708552446a4SMatthew Wilcox 		if (++i < nr) {
709552446a4SMatthew Wilcox 			xas_next(&xas);
710552446a4SMatthew Wilcox 			goto next;
711552446a4SMatthew Wilcox 		}
712800d8c63SKirill A. Shutemov 		if (PageTransHuge(page)) {
713800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
71411fb9989SMel Gorman 			__inc_node_page_state(page, NR_SHMEM_THPS);
715552446a4SMatthew Wilcox 		}
716552446a4SMatthew Wilcox 		mapping->nrpages += nr;
7170d1c2072SJohannes Weiner 		__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
7180d1c2072SJohannes Weiner 		__mod_lruvec_page_state(page, NR_SHMEM, nr);
719552446a4SMatthew Wilcox unlock:
720552446a4SMatthew Wilcox 		xas_unlock_irq(&xas);
721552446a4SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
722552446a4SMatthew Wilcox 
723552446a4SMatthew Wilcox 	if (xas_error(&xas)) {
7243fea5a49SJohannes Weiner 		error = xas_error(&xas);
7253fea5a49SJohannes Weiner 		goto error;
72646f65ec1SHugh Dickins 	}
727552446a4SMatthew Wilcox 
728552446a4SMatthew Wilcox 	return 0;
7293fea5a49SJohannes Weiner error:
7303fea5a49SJohannes Weiner 	page->mapping = NULL;
7313fea5a49SJohannes Weiner 	page_ref_sub(page, nr);
7323fea5a49SJohannes Weiner 	return error;
73346f65ec1SHugh Dickins }
73446f65ec1SHugh Dickins 
73546f65ec1SHugh Dickins /*
7366922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
7376922c0c7SHugh Dickins  */
7386922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
7396922c0c7SHugh Dickins {
7406922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
7416922c0c7SHugh Dickins 	int error;
7426922c0c7SHugh Dickins 
743800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
744800d8c63SKirill A. Shutemov 
745b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
74662f945b6SMatthew Wilcox 	error = shmem_replace_entry(mapping, page->index, page, radswap);
7476922c0c7SHugh Dickins 	page->mapping = NULL;
7486922c0c7SHugh Dickins 	mapping->nrpages--;
7490d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_FILE_PAGES);
7500d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_SHMEM);
751b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
75209cbfeafSKirill A. Shutemov 	put_page(page);
7536922c0c7SHugh Dickins 	BUG_ON(error);
7546922c0c7SHugh Dickins }
7556922c0c7SHugh Dickins 
7566922c0c7SHugh Dickins /*
757c121d3bbSMatthew Wilcox  * Remove swap entry from page cache, free the swap and its page cache.
7587a5d0fbbSHugh Dickins  */
7597a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
7607a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
7617a5d0fbbSHugh Dickins {
7626dbaf22cSJohannes Weiner 	void *old;
7637a5d0fbbSHugh Dickins 
76455f3f7eaSMatthew Wilcox 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
7656dbaf22cSJohannes Weiner 	if (old != radswap)
7666dbaf22cSJohannes Weiner 		return -ENOENT;
7677a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
7686dbaf22cSJohannes Weiner 	return 0;
7697a5d0fbbSHugh Dickins }
7707a5d0fbbSHugh Dickins 
7717a5d0fbbSHugh Dickins /*
7726a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
77348131e03SVlastimil Babka  * given offsets are swapped out.
7746a15a370SVlastimil Babka  *
775b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
7766a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
7776a15a370SVlastimil Babka  */
77848131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
77948131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
7806a15a370SVlastimil Babka {
7817ae3424fSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
7826a15a370SVlastimil Babka 	struct page *page;
78348131e03SVlastimil Babka 	unsigned long swapped = 0;
7846a15a370SVlastimil Babka 
7856a15a370SVlastimil Babka 	rcu_read_lock();
7867ae3424fSMatthew Wilcox 	xas_for_each(&xas, page, end - 1) {
7877ae3424fSMatthew Wilcox 		if (xas_retry(&xas, page))
7882cf938aaSMatthew Wilcox 			continue;
7893159f943SMatthew Wilcox 		if (xa_is_value(page))
7906a15a370SVlastimil Babka 			swapped++;
7916a15a370SVlastimil Babka 
7926a15a370SVlastimil Babka 		if (need_resched()) {
7937ae3424fSMatthew Wilcox 			xas_pause(&xas);
7946a15a370SVlastimil Babka 			cond_resched_rcu();
7956a15a370SVlastimil Babka 		}
7966a15a370SVlastimil Babka 	}
7976a15a370SVlastimil Babka 
7986a15a370SVlastimil Babka 	rcu_read_unlock();
7996a15a370SVlastimil Babka 
8006a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
8016a15a370SVlastimil Babka }
8026a15a370SVlastimil Babka 
8036a15a370SVlastimil Babka /*
80448131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
80548131e03SVlastimil Babka  * given vma is swapped out.
80648131e03SVlastimil Babka  *
807b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
80848131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
80948131e03SVlastimil Babka  */
81048131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
81148131e03SVlastimil Babka {
81248131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
81348131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
81448131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
81548131e03SVlastimil Babka 	unsigned long swapped;
81648131e03SVlastimil Babka 
81748131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
81848131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
81948131e03SVlastimil Babka 
82048131e03SVlastimil Babka 	/*
82148131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
82248131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
82348131e03SVlastimil Babka 	 * already track.
82448131e03SVlastimil Babka 	 */
82548131e03SVlastimil Babka 	if (!swapped)
82648131e03SVlastimil Babka 		return 0;
82748131e03SVlastimil Babka 
82848131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
82948131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
83048131e03SVlastimil Babka 
83148131e03SVlastimil Babka 	/* Here comes the more involved part */
83248131e03SVlastimil Babka 	return shmem_partial_swap_usage(mapping,
83348131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_start),
83448131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_end));
83548131e03SVlastimil Babka }
83648131e03SVlastimil Babka 
83748131e03SVlastimil Babka /*
83824513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
83924513264SHugh Dickins  */
84024513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
84124513264SHugh Dickins {
84224513264SHugh Dickins 	struct pagevec pvec;
84324513264SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
84424513264SHugh Dickins 	pgoff_t index = 0;
84524513264SHugh Dickins 
84686679820SMel Gorman 	pagevec_init(&pvec);
84724513264SHugh Dickins 	/*
84824513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
84924513264SHugh Dickins 	 */
85024513264SHugh Dickins 	while (!mapping_unevictable(mapping)) {
85124513264SHugh Dickins 		/*
85224513264SHugh Dickins 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
85324513264SHugh Dickins 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
85424513264SHugh Dickins 		 */
8550cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
85624513264SHugh Dickins 					   PAGEVEC_SIZE, pvec.pages, indices);
85724513264SHugh Dickins 		if (!pvec.nr)
85824513264SHugh Dickins 			break;
85924513264SHugh Dickins 		index = indices[pvec.nr - 1] + 1;
8600cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
86164e3d12fSKuo-Hsin Yang 		check_move_unevictable_pages(&pvec);
86224513264SHugh Dickins 		pagevec_release(&pvec);
86324513264SHugh Dickins 		cond_resched();
86424513264SHugh Dickins 	}
8657a5d0fbbSHugh Dickins }
8667a5d0fbbSHugh Dickins 
8677a5d0fbbSHugh Dickins /*
86871725ed1SHugh Dickins  * Check whether a hole-punch or truncation needs to split a huge page,
86971725ed1SHugh Dickins  * returning true if no split was required, or the split has been successful.
87071725ed1SHugh Dickins  *
87171725ed1SHugh Dickins  * Eviction (or truncation to 0 size) should never need to split a huge page;
87271725ed1SHugh Dickins  * but in rare cases might do so, if shmem_undo_range() failed to trylock on
87371725ed1SHugh Dickins  * head, and then succeeded to trylock on tail.
87471725ed1SHugh Dickins  *
87571725ed1SHugh Dickins  * A split can only succeed when there are no additional references on the
87671725ed1SHugh Dickins  * huge page: so the split below relies upon find_get_entries() having stopped
87771725ed1SHugh Dickins  * when it found a subpage of the huge page, without getting further references.
87871725ed1SHugh Dickins  */
87971725ed1SHugh Dickins static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
88071725ed1SHugh Dickins {
88171725ed1SHugh Dickins 	if (!PageTransCompound(page))
88271725ed1SHugh Dickins 		return true;
88371725ed1SHugh Dickins 
88471725ed1SHugh Dickins 	/* Just proceed to delete a huge page wholly within the range punched */
88571725ed1SHugh Dickins 	if (PageHead(page) &&
88671725ed1SHugh Dickins 	    page->index >= start && page->index + HPAGE_PMD_NR <= end)
88771725ed1SHugh Dickins 		return true;
88871725ed1SHugh Dickins 
88971725ed1SHugh Dickins 	/* Try to split huge page, so we can truly punch the hole or truncate */
89071725ed1SHugh Dickins 	return split_huge_page(page) >= 0;
89171725ed1SHugh Dickins }
89271725ed1SHugh Dickins 
89371725ed1SHugh Dickins /*
8947f4446eeSMatthew Wilcox  * Remove range of pages and swap entries from page cache, and free them.
8951635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
8967a5d0fbbSHugh Dickins  */
8971635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
8981635f6a7SHugh Dickins 								 bool unfalloc)
8991da177e4SLinus Torvalds {
900285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
9011da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
90209cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
90309cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
90409cbfeafSKirill A. Shutemov 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
90509cbfeafSKirill A. Shutemov 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
906bda97eabSHugh Dickins 	struct pagevec pvec;
9077a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
9087a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
909285b2c4fSHugh Dickins 	pgoff_t index;
910bda97eabSHugh Dickins 	int i;
9111da177e4SLinus Torvalds 
91283e4fa9cSHugh Dickins 	if (lend == -1)
91383e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
914bda97eabSHugh Dickins 
91586679820SMel Gorman 	pagevec_init(&pvec);
916bda97eabSHugh Dickins 	index = start;
91783e4fa9cSHugh Dickins 	while (index < end) {
9180cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
91983e4fa9cSHugh Dickins 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
9207a5d0fbbSHugh Dickins 			pvec.pages, indices);
9217a5d0fbbSHugh Dickins 		if (!pvec.nr)
9227a5d0fbbSHugh Dickins 			break;
923bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
924bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
925bda97eabSHugh Dickins 
9267a5d0fbbSHugh Dickins 			index = indices[i];
92783e4fa9cSHugh Dickins 			if (index >= end)
928bda97eabSHugh Dickins 				break;
929bda97eabSHugh Dickins 
9303159f943SMatthew Wilcox 			if (xa_is_value(page)) {
9311635f6a7SHugh Dickins 				if (unfalloc)
9321635f6a7SHugh Dickins 					continue;
9337a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
9347a5d0fbbSHugh Dickins 								index, page);
9357a5d0fbbSHugh Dickins 				continue;
9367a5d0fbbSHugh Dickins 			}
9377a5d0fbbSHugh Dickins 
938800d8c63SKirill A. Shutemov 			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
939800d8c63SKirill A. Shutemov 
940bda97eabSHugh Dickins 			if (!trylock_page(page))
941bda97eabSHugh Dickins 				continue;
942800d8c63SKirill A. Shutemov 
94371725ed1SHugh Dickins 			if ((!unfalloc || !PageUptodate(page)) &&
94471725ed1SHugh Dickins 			    page_mapping(page) == mapping) {
945309381feSSasha Levin 				VM_BUG_ON_PAGE(PageWriteback(page), page);
94671725ed1SHugh Dickins 				if (shmem_punch_compound(page, start, end))
947bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
9487a5d0fbbSHugh Dickins 			}
949bda97eabSHugh Dickins 			unlock_page(page);
950bda97eabSHugh Dickins 		}
9510cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
95224513264SHugh Dickins 		pagevec_release(&pvec);
953bda97eabSHugh Dickins 		cond_resched();
954bda97eabSHugh Dickins 		index++;
955bda97eabSHugh Dickins 	}
956bda97eabSHugh Dickins 
95783e4fa9cSHugh Dickins 	if (partial_start) {
958bda97eabSHugh Dickins 		struct page *page = NULL;
9599e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, start - 1, &page, SGP_READ);
960bda97eabSHugh Dickins 		if (page) {
96109cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
96283e4fa9cSHugh Dickins 			if (start > end) {
96383e4fa9cSHugh Dickins 				top = partial_end;
96483e4fa9cSHugh Dickins 				partial_end = 0;
96583e4fa9cSHugh Dickins 			}
96683e4fa9cSHugh Dickins 			zero_user_segment(page, partial_start, top);
967bda97eabSHugh Dickins 			set_page_dirty(page);
968bda97eabSHugh Dickins 			unlock_page(page);
96909cbfeafSKirill A. Shutemov 			put_page(page);
970bda97eabSHugh Dickins 		}
971bda97eabSHugh Dickins 	}
97283e4fa9cSHugh Dickins 	if (partial_end) {
97383e4fa9cSHugh Dickins 		struct page *page = NULL;
9749e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, end, &page, SGP_READ);
97583e4fa9cSHugh Dickins 		if (page) {
97683e4fa9cSHugh Dickins 			zero_user_segment(page, 0, partial_end);
97783e4fa9cSHugh Dickins 			set_page_dirty(page);
97883e4fa9cSHugh Dickins 			unlock_page(page);
97909cbfeafSKirill A. Shutemov 			put_page(page);
98083e4fa9cSHugh Dickins 		}
98183e4fa9cSHugh Dickins 	}
98283e4fa9cSHugh Dickins 	if (start >= end)
98383e4fa9cSHugh Dickins 		return;
984bda97eabSHugh Dickins 
985bda97eabSHugh Dickins 	index = start;
986b1a36650SHugh Dickins 	while (index < end) {
987bda97eabSHugh Dickins 		cond_resched();
9880cd6144aSJohannes Weiner 
9890cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
99083e4fa9cSHugh Dickins 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
9917a5d0fbbSHugh Dickins 				pvec.pages, indices);
9927a5d0fbbSHugh Dickins 		if (!pvec.nr) {
993b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
994b1a36650SHugh Dickins 			if (index == start || end != -1)
995bda97eabSHugh Dickins 				break;
996b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
997bda97eabSHugh Dickins 			index = start;
998bda97eabSHugh Dickins 			continue;
999bda97eabSHugh Dickins 		}
1000bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
1001bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
1002bda97eabSHugh Dickins 
10037a5d0fbbSHugh Dickins 			index = indices[i];
100483e4fa9cSHugh Dickins 			if (index >= end)
1005bda97eabSHugh Dickins 				break;
1006bda97eabSHugh Dickins 
10073159f943SMatthew Wilcox 			if (xa_is_value(page)) {
10081635f6a7SHugh Dickins 				if (unfalloc)
10091635f6a7SHugh Dickins 					continue;
1010b1a36650SHugh Dickins 				if (shmem_free_swap(mapping, index, page)) {
1011b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
1012b1a36650SHugh Dickins 					index--;
1013b1a36650SHugh Dickins 					break;
1014b1a36650SHugh Dickins 				}
1015b1a36650SHugh Dickins 				nr_swaps_freed++;
10167a5d0fbbSHugh Dickins 				continue;
10177a5d0fbbSHugh Dickins 			}
10187a5d0fbbSHugh Dickins 
1019bda97eabSHugh Dickins 			lock_page(page);
1020800d8c63SKirill A. Shutemov 
10211635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
102271725ed1SHugh Dickins 				if (page_mapping(page) != mapping) {
1023b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
1024b1a36650SHugh Dickins 					unlock_page(page);
1025b1a36650SHugh Dickins 					index--;
1026b1a36650SHugh Dickins 					break;
10277a5d0fbbSHugh Dickins 				}
102871725ed1SHugh Dickins 				VM_BUG_ON_PAGE(PageWriteback(page), page);
102971725ed1SHugh Dickins 				if (shmem_punch_compound(page, start, end))
103071725ed1SHugh Dickins 					truncate_inode_page(mapping, page);
10310783ac95SHugh Dickins 				else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
103271725ed1SHugh Dickins 					/* Wipe the page and don't get stuck */
103371725ed1SHugh Dickins 					clear_highpage(page);
103471725ed1SHugh Dickins 					flush_dcache_page(page);
103571725ed1SHugh Dickins 					set_page_dirty(page);
103671725ed1SHugh Dickins 					if (index <
103771725ed1SHugh Dickins 					    round_up(start, HPAGE_PMD_NR))
103871725ed1SHugh Dickins 						start = index + 1;
103971725ed1SHugh Dickins 				}
10401635f6a7SHugh Dickins 			}
1041bda97eabSHugh Dickins 			unlock_page(page);
1042bda97eabSHugh Dickins 		}
10430cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
104424513264SHugh Dickins 		pagevec_release(&pvec);
1045bda97eabSHugh Dickins 		index++;
1046bda97eabSHugh Dickins 	}
104794c1e62dSHugh Dickins 
10484595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
10497a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
10501da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
10514595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
10521635f6a7SHugh Dickins }
10531da177e4SLinus Torvalds 
10541635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
10551635f6a7SHugh Dickins {
10561635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
1057078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
10581da177e4SLinus Torvalds }
105994c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
10601da177e4SLinus Torvalds 
1061a528d35eSDavid Howells static int shmem_getattr(const struct path *path, struct kstat *stat,
1062a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
106344a30220SYu Zhao {
1064a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
106544a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
106689fdcd26SYang Shi 	struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
106744a30220SYu Zhao 
1068d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
10694595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
107044a30220SYu Zhao 		shmem_recalc_inode(inode);
10714595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1072d0424c42SHugh Dickins 	}
107344a30220SYu Zhao 	generic_fillattr(inode, stat);
107489fdcd26SYang Shi 
107589fdcd26SYang Shi 	if (is_huge_enabled(sb_info))
107689fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
107789fdcd26SYang Shi 
107844a30220SYu Zhao 	return 0;
107944a30220SYu Zhao }
108044a30220SYu Zhao 
108194c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
10821da177e4SLinus Torvalds {
108375c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
108440e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
1085779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
10861da177e4SLinus Torvalds 	int error;
10871da177e4SLinus Torvalds 
108831051c85SJan Kara 	error = setattr_prepare(dentry, attr);
1089db78b877SChristoph Hellwig 	if (error)
1090db78b877SChristoph Hellwig 		return error;
1091db78b877SChristoph Hellwig 
109294c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
109394c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
109494c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
10953889e6e7Snpiggin@suse.de 
109640e041a2SDavid Herrmann 		/* protected by i_mutex */
109740e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
109840e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
109940e041a2SDavid Herrmann 			return -EPERM;
110040e041a2SDavid Herrmann 
110194c1e62dSHugh Dickins 		if (newsize != oldsize) {
110277142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
110377142517SKonstantin Khlebnikov 					oldsize, newsize);
110477142517SKonstantin Khlebnikov 			if (error)
110577142517SKonstantin Khlebnikov 				return error;
110694c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1107078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
110894c1e62dSHugh Dickins 		}
1109afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
111094c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1111d0424c42SHugh Dickins 			if (oldsize > holebegin)
1112d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1113d0424c42SHugh Dickins 							holebegin, 0, 1);
1114d0424c42SHugh Dickins 			if (info->alloced)
1115d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1116d0424c42SHugh Dickins 							newsize, (loff_t)-1);
111794c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1118d0424c42SHugh Dickins 			if (oldsize > holebegin)
1119d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1120d0424c42SHugh Dickins 							holebegin, 0, 1);
1121779750d2SKirill A. Shutemov 
1122779750d2SKirill A. Shutemov 			/*
1123779750d2SKirill A. Shutemov 			 * Part of the huge page can be beyond i_size: subject
1124779750d2SKirill A. Shutemov 			 * to shrink under memory pressure.
1125779750d2SKirill A. Shutemov 			 */
1126396bcc52SMatthew Wilcox (Oracle) 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1127779750d2SKirill A. Shutemov 				spin_lock(&sbinfo->shrinklist_lock);
1128d041353dSCong Wang 				/*
1129d041353dSCong Wang 				 * _careful to defend against unlocked access to
1130d041353dSCong Wang 				 * ->shrink_list in shmem_unused_huge_shrink()
1131d041353dSCong Wang 				 */
1132d041353dSCong Wang 				if (list_empty_careful(&info->shrinklist)) {
1133779750d2SKirill A. Shutemov 					list_add_tail(&info->shrinklist,
1134779750d2SKirill A. Shutemov 							&sbinfo->shrinklist);
1135779750d2SKirill A. Shutemov 					sbinfo->shrinklist_len++;
1136779750d2SKirill A. Shutemov 				}
1137779750d2SKirill A. Shutemov 				spin_unlock(&sbinfo->shrinklist_lock);
1138779750d2SKirill A. Shutemov 			}
113994c1e62dSHugh Dickins 		}
11401da177e4SLinus Torvalds 	}
11411da177e4SLinus Torvalds 
11426a1a90adSChristoph Hellwig 	setattr_copy(inode, attr);
1143db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1144feda821eSChristoph Hellwig 		error = posix_acl_chmod(inode, inode->i_mode);
11451da177e4SLinus Torvalds 	return error;
11461da177e4SLinus Torvalds }
11471da177e4SLinus Torvalds 
11481f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
11491da177e4SLinus Torvalds {
11501da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1151779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
11521da177e4SLinus Torvalds 
11533889e6e7Snpiggin@suse.de 	if (inode->i_mapping->a_ops == &shmem_aops) {
11541da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
11551da177e4SLinus Torvalds 		inode->i_size = 0;
11563889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1157779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1158779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1159779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1160779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1161779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1162779750d2SKirill A. Shutemov 			}
1163779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1164779750d2SKirill A. Shutemov 		}
1165af53d3e9SHugh Dickins 		while (!list_empty(&info->swaplist)) {
1166af53d3e9SHugh Dickins 			/* Wait while shmem_unuse() is scanning this inode... */
1167af53d3e9SHugh Dickins 			wait_var_event(&info->stop_eviction,
1168af53d3e9SHugh Dickins 				       !atomic_read(&info->stop_eviction));
1169cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
1170af53d3e9SHugh Dickins 			/* ...but beware of the race if we peeked too early */
1171af53d3e9SHugh Dickins 			if (!atomic_read(&info->stop_eviction))
11721da177e4SLinus Torvalds 				list_del_init(&info->swaplist);
1173cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
11741da177e4SLinus Torvalds 		}
11753ed47db3SAl Viro 	}
1176b09e0fa4SEric Paris 
117738f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
11780f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
11795b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1180dbd5768fSJan Kara 	clear_inode(inode);
11811da177e4SLinus Torvalds }
11821da177e4SLinus Torvalds 
1183b56a2d8aSVineeth Remanan Pillai extern struct swap_info_struct *swap_info[];
1184b56a2d8aSVineeth Remanan Pillai 
1185b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1186b56a2d8aSVineeth Remanan Pillai 				   pgoff_t start, unsigned int nr_entries,
1187b56a2d8aSVineeth Remanan Pillai 				   struct page **entries, pgoff_t *indices,
118887039546SHugh Dickins 				   unsigned int type, bool frontswap)
1189478922e2SMatthew Wilcox {
1190b56a2d8aSVineeth Remanan Pillai 	XA_STATE(xas, &mapping->i_pages, start);
1191b56a2d8aSVineeth Remanan Pillai 	struct page *page;
119287039546SHugh Dickins 	swp_entry_t entry;
1193b56a2d8aSVineeth Remanan Pillai 	unsigned int ret = 0;
1194b56a2d8aSVineeth Remanan Pillai 
1195b56a2d8aSVineeth Remanan Pillai 	if (!nr_entries)
1196b56a2d8aSVineeth Remanan Pillai 		return 0;
1197478922e2SMatthew Wilcox 
1198478922e2SMatthew Wilcox 	rcu_read_lock();
1199b56a2d8aSVineeth Remanan Pillai 	xas_for_each(&xas, page, ULONG_MAX) {
1200b56a2d8aSVineeth Remanan Pillai 		if (xas_retry(&xas, page))
12015b9c98f3SMike Kravetz 			continue;
1202b56a2d8aSVineeth Remanan Pillai 
1203b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1204478922e2SMatthew Wilcox 			continue;
1205b56a2d8aSVineeth Remanan Pillai 
120687039546SHugh Dickins 		entry = radix_to_swp_entry(page);
120787039546SHugh Dickins 		if (swp_type(entry) != type)
1208b56a2d8aSVineeth Remanan Pillai 			continue;
120987039546SHugh Dickins 		if (frontswap &&
121087039546SHugh Dickins 		    !frontswap_test(swap_info[type], swp_offset(entry)))
121187039546SHugh Dickins 			continue;
1212b56a2d8aSVineeth Remanan Pillai 
1213b56a2d8aSVineeth Remanan Pillai 		indices[ret] = xas.xa_index;
1214b56a2d8aSVineeth Remanan Pillai 		entries[ret] = page;
1215b56a2d8aSVineeth Remanan Pillai 
1216b56a2d8aSVineeth Remanan Pillai 		if (need_resched()) {
1217e21a2955SMatthew Wilcox 			xas_pause(&xas);
1218478922e2SMatthew Wilcox 			cond_resched_rcu();
1219478922e2SMatthew Wilcox 		}
1220b56a2d8aSVineeth Remanan Pillai 		if (++ret == nr_entries)
1221b56a2d8aSVineeth Remanan Pillai 			break;
1222b56a2d8aSVineeth Remanan Pillai 	}
1223478922e2SMatthew Wilcox 	rcu_read_unlock();
1224e21a2955SMatthew Wilcox 
1225b56a2d8aSVineeth Remanan Pillai 	return ret;
1226b56a2d8aSVineeth Remanan Pillai }
1227b56a2d8aSVineeth Remanan Pillai 
1228b56a2d8aSVineeth Remanan Pillai /*
1229b56a2d8aSVineeth Remanan Pillai  * Move the swapped pages for an inode to page cache. Returns the count
1230b56a2d8aSVineeth Remanan Pillai  * of pages swapped in, or the error in case of failure.
1231b56a2d8aSVineeth Remanan Pillai  */
1232b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1233b56a2d8aSVineeth Remanan Pillai 				    pgoff_t *indices)
1234b56a2d8aSVineeth Remanan Pillai {
1235b56a2d8aSVineeth Remanan Pillai 	int i = 0;
1236b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
1237b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1238b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1239b56a2d8aSVineeth Remanan Pillai 
1240b56a2d8aSVineeth Remanan Pillai 	for (i = 0; i < pvec.nr; i++) {
1241b56a2d8aSVineeth Remanan Pillai 		struct page *page = pvec.pages[i];
1242b56a2d8aSVineeth Remanan Pillai 
1243b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1244b56a2d8aSVineeth Remanan Pillai 			continue;
1245b56a2d8aSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, indices[i],
1246b56a2d8aSVineeth Remanan Pillai 					  &page, SGP_CACHE,
1247b56a2d8aSVineeth Remanan Pillai 					  mapping_gfp_mask(mapping),
1248b56a2d8aSVineeth Remanan Pillai 					  NULL, NULL);
1249b56a2d8aSVineeth Remanan Pillai 		if (error == 0) {
1250b56a2d8aSVineeth Remanan Pillai 			unlock_page(page);
1251b56a2d8aSVineeth Remanan Pillai 			put_page(page);
1252b56a2d8aSVineeth Remanan Pillai 			ret++;
1253b56a2d8aSVineeth Remanan Pillai 		}
1254b56a2d8aSVineeth Remanan Pillai 		if (error == -ENOMEM)
1255b56a2d8aSVineeth Remanan Pillai 			break;
1256b56a2d8aSVineeth Remanan Pillai 		error = 0;
1257b56a2d8aSVineeth Remanan Pillai 	}
1258b56a2d8aSVineeth Remanan Pillai 	return error ? error : ret;
1259478922e2SMatthew Wilcox }
1260478922e2SMatthew Wilcox 
126146f65ec1SHugh Dickins /*
126246f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
126346f65ec1SHugh Dickins  */
1264b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1265b56a2d8aSVineeth Remanan Pillai 			     bool frontswap, unsigned long *fs_pages_to_unuse)
12661da177e4SLinus Torvalds {
1267b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1268b56a2d8aSVineeth Remanan Pillai 	pgoff_t start = 0;
1269b56a2d8aSVineeth Remanan Pillai 	struct pagevec pvec;
1270b56a2d8aSVineeth Remanan Pillai 	pgoff_t indices[PAGEVEC_SIZE];
1271b56a2d8aSVineeth Remanan Pillai 	bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1272b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
12731da177e4SLinus Torvalds 
1274b56a2d8aSVineeth Remanan Pillai 	pagevec_init(&pvec);
1275b56a2d8aSVineeth Remanan Pillai 	do {
1276b56a2d8aSVineeth Remanan Pillai 		unsigned int nr_entries = PAGEVEC_SIZE;
12772e0e26c7SHugh Dickins 
1278b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1279b56a2d8aSVineeth Remanan Pillai 			nr_entries = *fs_pages_to_unuse;
12802e0e26c7SHugh Dickins 
1281b56a2d8aSVineeth Remanan Pillai 		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1282b56a2d8aSVineeth Remanan Pillai 						  pvec.pages, indices,
128387039546SHugh Dickins 						  type, frontswap);
1284b56a2d8aSVineeth Remanan Pillai 		if (pvec.nr == 0) {
1285b56a2d8aSVineeth Remanan Pillai 			ret = 0;
1286778dd893SHugh Dickins 			break;
1287b56a2d8aSVineeth Remanan Pillai 		}
1288b56a2d8aSVineeth Remanan Pillai 
1289b56a2d8aSVineeth Remanan Pillai 		ret = shmem_unuse_swap_entries(inode, pvec, indices);
1290b56a2d8aSVineeth Remanan Pillai 		if (ret < 0)
1291b56a2d8aSVineeth Remanan Pillai 			break;
1292b56a2d8aSVineeth Remanan Pillai 
1293b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial) {
1294b56a2d8aSVineeth Remanan Pillai 			*fs_pages_to_unuse -= ret;
1295b56a2d8aSVineeth Remanan Pillai 			if (*fs_pages_to_unuse == 0) {
1296b56a2d8aSVineeth Remanan Pillai 				ret = FRONTSWAP_PAGES_UNUSED;
1297b56a2d8aSVineeth Remanan Pillai 				break;
1298b56a2d8aSVineeth Remanan Pillai 			}
1299b56a2d8aSVineeth Remanan Pillai 		}
1300b56a2d8aSVineeth Remanan Pillai 
1301b56a2d8aSVineeth Remanan Pillai 		start = indices[pvec.nr - 1];
1302b56a2d8aSVineeth Remanan Pillai 	} while (true);
1303b56a2d8aSVineeth Remanan Pillai 
1304b56a2d8aSVineeth Remanan Pillai 	return ret;
1305b56a2d8aSVineeth Remanan Pillai }
1306b56a2d8aSVineeth Remanan Pillai 
1307b56a2d8aSVineeth Remanan Pillai /*
1308b56a2d8aSVineeth Remanan Pillai  * Read all the shared memory data that resides in the swap
1309b56a2d8aSVineeth Remanan Pillai  * device 'type' back into memory, so the swap device can be
1310b56a2d8aSVineeth Remanan Pillai  * unused.
1311b56a2d8aSVineeth Remanan Pillai  */
1312b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
1313b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
1314b56a2d8aSVineeth Remanan Pillai {
1315b56a2d8aSVineeth Remanan Pillai 	struct shmem_inode_info *info, *next;
1316b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1317b56a2d8aSVineeth Remanan Pillai 
1318b56a2d8aSVineeth Remanan Pillai 	if (list_empty(&shmem_swaplist))
1319b56a2d8aSVineeth Remanan Pillai 		return 0;
1320b56a2d8aSVineeth Remanan Pillai 
1321b56a2d8aSVineeth Remanan Pillai 	mutex_lock(&shmem_swaplist_mutex);
1322b56a2d8aSVineeth Remanan Pillai 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1323b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped) {
1324b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1325b56a2d8aSVineeth Remanan Pillai 			continue;
1326b56a2d8aSVineeth Remanan Pillai 		}
1327af53d3e9SHugh Dickins 		/*
1328af53d3e9SHugh Dickins 		 * Drop the swaplist mutex while searching the inode for swap;
1329af53d3e9SHugh Dickins 		 * but before doing so, make sure shmem_evict_inode() will not
1330af53d3e9SHugh Dickins 		 * remove placeholder inode from swaplist, nor let it be freed
1331af53d3e9SHugh Dickins 		 * (igrab() would protect from unlink, but not from unmount).
1332af53d3e9SHugh Dickins 		 */
1333af53d3e9SHugh Dickins 		atomic_inc(&info->stop_eviction);
1334b56a2d8aSVineeth Remanan Pillai 		mutex_unlock(&shmem_swaplist_mutex);
1335b56a2d8aSVineeth Remanan Pillai 
1336af53d3e9SHugh Dickins 		error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1337b56a2d8aSVineeth Remanan Pillai 					  fs_pages_to_unuse);
1338b56a2d8aSVineeth Remanan Pillai 		cond_resched();
1339b56a2d8aSVineeth Remanan Pillai 
1340b56a2d8aSVineeth Remanan Pillai 		mutex_lock(&shmem_swaplist_mutex);
1341b56a2d8aSVineeth Remanan Pillai 		next = list_next_entry(info, swaplist);
1342b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped)
1343b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1344af53d3e9SHugh Dickins 		if (atomic_dec_and_test(&info->stop_eviction))
1345af53d3e9SHugh Dickins 			wake_up_var(&info->stop_eviction);
1346b56a2d8aSVineeth Remanan Pillai 		if (error)
1347b56a2d8aSVineeth Remanan Pillai 			break;
13481da177e4SLinus Torvalds 	}
1349cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1350778dd893SHugh Dickins 
1351778dd893SHugh Dickins 	return error;
13521da177e4SLinus Torvalds }
13531da177e4SLinus Torvalds 
13541da177e4SLinus Torvalds /*
13551da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
13561da177e4SLinus Torvalds  */
13571da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
13581da177e4SLinus Torvalds {
13591da177e4SLinus Torvalds 	struct shmem_inode_info *info;
13601da177e4SLinus Torvalds 	struct address_space *mapping;
13611da177e4SLinus Torvalds 	struct inode *inode;
13626922c0c7SHugh Dickins 	swp_entry_t swap;
13636922c0c7SHugh Dickins 	pgoff_t index;
13641da177e4SLinus Torvalds 
1365800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
13661da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
13671da177e4SLinus Torvalds 	mapping = page->mapping;
13681da177e4SLinus Torvalds 	index = page->index;
13691da177e4SLinus Torvalds 	inode = mapping->host;
13701da177e4SLinus Torvalds 	info = SHMEM_I(inode);
13711da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
13721da177e4SLinus Torvalds 		goto redirty;
1373d9fe526aSHugh Dickins 	if (!total_swap_pages)
13741da177e4SLinus Torvalds 		goto redirty;
13751da177e4SLinus Torvalds 
1376d9fe526aSHugh Dickins 	/*
137797b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
137897b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
137997b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
138097b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
138197b713baSChristoph Hellwig 	 * threads or sync.
1382d9fe526aSHugh Dickins 	 */
138348f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
138448f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
138548f170fbSHugh Dickins 		goto redirty;
138648f170fbSHugh Dickins 	}
13871635f6a7SHugh Dickins 
13881635f6a7SHugh Dickins 	/*
13891635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13901635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
13911635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
13921aac1400SHugh Dickins 	 *
13931aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
13941aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
13951aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
13961aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
13971aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
13981635f6a7SHugh Dickins 	 */
13991635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
14001aac1400SHugh Dickins 		if (inode->i_private) {
14011aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
14021aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
14031aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
14041aac1400SHugh Dickins 			if (shmem_falloc &&
14058e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
14061aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
14071aac1400SHugh Dickins 			    index < shmem_falloc->next)
14081aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
14091aac1400SHugh Dickins 			else
14101aac1400SHugh Dickins 				shmem_falloc = NULL;
14111aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
14121aac1400SHugh Dickins 			if (shmem_falloc)
14131aac1400SHugh Dickins 				goto redirty;
14141aac1400SHugh Dickins 		}
14151635f6a7SHugh Dickins 		clear_highpage(page);
14161635f6a7SHugh Dickins 		flush_dcache_page(page);
14171635f6a7SHugh Dickins 		SetPageUptodate(page);
14181635f6a7SHugh Dickins 	}
14191635f6a7SHugh Dickins 
142038d8b4e6SHuang Ying 	swap = get_swap_page(page);
142148f170fbSHugh Dickins 	if (!swap.val)
142248f170fbSHugh Dickins 		goto redirty;
1423d9fe526aSHugh Dickins 
1424b1dea800SHugh Dickins 	/*
1425b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
14266922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
14276922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1428b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
14296922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
14306922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1431b1dea800SHugh Dickins 	 */
1432b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
143305bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
1434b56a2d8aSVineeth Remanan Pillai 		list_add(&info->swaplist, &shmem_swaplist);
1435b1dea800SHugh Dickins 
14364afab1cdSYang Shi 	if (add_to_swap_cache(page, swap,
14373852f676SJoonsoo Kim 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
14383852f676SJoonsoo Kim 			NULL) == 0) {
14394595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1440267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1441267a4c76SHugh Dickins 		info->swapped++;
14424595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1443267a4c76SHugh Dickins 
1444aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
14456922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
14466922c0c7SHugh Dickins 
14476922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1448d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
14499fab5619SHugh Dickins 		swap_writepage(page, wbc);
14501da177e4SLinus Torvalds 		return 0;
14511da177e4SLinus Torvalds 	}
14521da177e4SLinus Torvalds 
14536922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
145475f6d6d2SMinchan Kim 	put_swap_page(page, swap);
14551da177e4SLinus Torvalds redirty:
14561da177e4SLinus Torvalds 	set_page_dirty(page);
1457d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1458d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1459d9fe526aSHugh Dickins 	unlock_page(page);
1460d9fe526aSHugh Dickins 	return 0;
14611da177e4SLinus Torvalds }
14621da177e4SLinus Torvalds 
146375edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
146471fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1465680d794bSakpm@linux-foundation.org {
1466680d794bSakpm@linux-foundation.org 	char buffer[64];
1467680d794bSakpm@linux-foundation.org 
146871fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1469095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1470095f1fc4SLee Schermerhorn 
1471a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1472095f1fc4SLee Schermerhorn 
1473095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1474680d794bSakpm@linux-foundation.org }
147571fe804bSLee Schermerhorn 
147671fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
147771fe804bSLee Schermerhorn {
147871fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
147971fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
148071fe804bSLee Schermerhorn 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
148171fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
148271fe804bSLee Schermerhorn 		mpol_get(mpol);
148371fe804bSLee Schermerhorn 		spin_unlock(&sbinfo->stat_lock);
148471fe804bSLee Schermerhorn 	}
148571fe804bSLee Schermerhorn 	return mpol;
148671fe804bSLee Schermerhorn }
148775edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
148875edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
148975edd345SHugh Dickins {
149075edd345SHugh Dickins }
149175edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
149275edd345SHugh Dickins {
149375edd345SHugh Dickins 	return NULL;
149475edd345SHugh Dickins }
149575edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
149675edd345SHugh Dickins #ifndef CONFIG_NUMA
149775edd345SHugh Dickins #define vm_policy vm_private_data
149875edd345SHugh Dickins #endif
1499680d794bSakpm@linux-foundation.org 
1500800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1501800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1502800d8c63SKirill A. Shutemov {
1503800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
15042c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1505800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1506800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1507800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1508800d8c63SKirill A. Shutemov }
1509800d8c63SKirill A. Shutemov 
1510800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1511800d8c63SKirill A. Shutemov {
1512800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1513800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1514800d8c63SKirill A. Shutemov }
1515800d8c63SKirill A. Shutemov 
151641ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
151741ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
15181da177e4SLinus Torvalds {
15191da177e4SLinus Torvalds 	struct vm_area_struct pvma;
152018a2f371SMel Gorman 	struct page *page;
1521e9e9b7ecSMinchan Kim 	struct vm_fault vmf;
15221da177e4SLinus Torvalds 
1523800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1524e9e9b7ecSMinchan Kim 	vmf.vma = &pvma;
1525e9e9b7ecSMinchan Kim 	vmf.address = 0;
1526e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1527800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
152818a2f371SMel Gorman 
1529800d8c63SKirill A. Shutemov 	return page;
1530800d8c63SKirill A. Shutemov }
153118a2f371SMel Gorman 
1532800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp,
1533800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1534800d8c63SKirill A. Shutemov {
1535800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
15367b8d046fSMatthew Wilcox 	struct address_space *mapping = info->vfs_inode.i_mapping;
15377b8d046fSMatthew Wilcox 	pgoff_t hindex;
1538800d8c63SKirill A. Shutemov 	struct page *page;
1539800d8c63SKirill A. Shutemov 
15404620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
15417b8d046fSMatthew Wilcox 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
15427b8d046fSMatthew Wilcox 								XA_PRESENT))
1543800d8c63SKirill A. Shutemov 		return NULL;
1544800d8c63SKirill A. Shutemov 
1545800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1546800d8c63SKirill A. Shutemov 	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
154719deb769SDavid Rientjes 			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1548800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1549800d8c63SKirill A. Shutemov 	if (page)
1550800d8c63SKirill A. Shutemov 		prep_transhuge_page(page);
1551dcdf11eeSDavid Rientjes 	else
1552dcdf11eeSDavid Rientjes 		count_vm_event(THP_FILE_FALLBACK);
155318a2f371SMel Gorman 	return page;
155418a2f371SMel Gorman }
155518a2f371SMel Gorman 
155618a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp,
155718a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
155818a2f371SMel Gorman {
155918a2f371SMel Gorman 	struct vm_area_struct pvma;
156018a2f371SMel Gorman 	struct page *page;
156118a2f371SMel Gorman 
1562800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1563800d8c63SKirill A. Shutemov 	page = alloc_page_vma(gfp, &pvma, 0);
1564800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
156518a2f371SMel Gorman 
1566800d8c63SKirill A. Shutemov 	return page;
1567800d8c63SKirill A. Shutemov }
1568800d8c63SKirill A. Shutemov 
1569800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
15700f079694SMike Rapoport 		struct inode *inode,
1571800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1572800d8c63SKirill A. Shutemov {
15730f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
1574800d8c63SKirill A. Shutemov 	struct page *page;
1575800d8c63SKirill A. Shutemov 	int nr;
1576800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1577800d8c63SKirill A. Shutemov 
1578396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1579800d8c63SKirill A. Shutemov 		huge = false;
1580800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1581800d8c63SKirill A. Shutemov 
15820f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1583800d8c63SKirill A. Shutemov 		goto failed;
1584800d8c63SKirill A. Shutemov 
1585800d8c63SKirill A. Shutemov 	if (huge)
1586800d8c63SKirill A. Shutemov 		page = shmem_alloc_hugepage(gfp, info, index);
1587800d8c63SKirill A. Shutemov 	else
1588800d8c63SKirill A. Shutemov 		page = shmem_alloc_page(gfp, info, index);
158975edd345SHugh Dickins 	if (page) {
159075edd345SHugh Dickins 		__SetPageLocked(page);
159175edd345SHugh Dickins 		__SetPageSwapBacked(page);
1592800d8c63SKirill A. Shutemov 		return page;
159375edd345SHugh Dickins 	}
159418a2f371SMel Gorman 
1595800d8c63SKirill A. Shutemov 	err = -ENOMEM;
15960f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1597800d8c63SKirill A. Shutemov failed:
1598800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
15991da177e4SLinus Torvalds }
160071fe804bSLee Schermerhorn 
16011da177e4SLinus Torvalds /*
1602bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1603bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1604bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1605bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1606bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1607bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1608bde05d1cSHugh Dickins  *
1609bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1610bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1611bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1612bde05d1cSHugh Dickins  */
1613bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1614bde05d1cSHugh Dickins {
1615bde05d1cSHugh Dickins 	return page_zonenum(page) > gfp_zone(gfp);
1616bde05d1cSHugh Dickins }
1617bde05d1cSHugh Dickins 
1618bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1619bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1620bde05d1cSHugh Dickins {
1621bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
1622bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1623c1cb20d4SYu Zhao 	swp_entry_t entry;
1624bde05d1cSHugh Dickins 	pgoff_t swap_index;
1625bde05d1cSHugh Dickins 	int error;
1626bde05d1cSHugh Dickins 
1627bde05d1cSHugh Dickins 	oldpage = *pagep;
1628c1cb20d4SYu Zhao 	entry.val = page_private(oldpage);
1629c1cb20d4SYu Zhao 	swap_index = swp_offset(entry);
1630bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1631bde05d1cSHugh Dickins 
1632bde05d1cSHugh Dickins 	/*
1633bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1634bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1635bde05d1cSHugh Dickins 	 */
1636bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1637bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1638bde05d1cSHugh Dickins 	if (!newpage)
1639bde05d1cSHugh Dickins 		return -ENOMEM;
1640bde05d1cSHugh Dickins 
164109cbfeafSKirill A. Shutemov 	get_page(newpage);
1642bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
16430142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1644bde05d1cSHugh Dickins 
16459956edf3SHugh Dickins 	__SetPageLocked(newpage);
16469956edf3SHugh Dickins 	__SetPageSwapBacked(newpage);
1647bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1648c1cb20d4SYu Zhao 	set_page_private(newpage, entry.val);
1649bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1650bde05d1cSHugh Dickins 
1651bde05d1cSHugh Dickins 	/*
1652bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1653bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1654bde05d1cSHugh Dickins 	 */
1655b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
165662f945b6SMatthew Wilcox 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
16570142ef6cSHugh Dickins 	if (!error) {
16580d1c2072SJohannes Weiner 		mem_cgroup_migrate(oldpage, newpage);
16590d1c2072SJohannes Weiner 		__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
16600d1c2072SJohannes Weiner 		__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
16610142ef6cSHugh Dickins 	}
1662b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1663bde05d1cSHugh Dickins 
16640142ef6cSHugh Dickins 	if (unlikely(error)) {
16650142ef6cSHugh Dickins 		/*
16660142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
16670142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
16680142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
16690142ef6cSHugh Dickins 		 */
16700142ef6cSHugh Dickins 		oldpage = newpage;
16710142ef6cSHugh Dickins 	} else {
16726058eaecSJohannes Weiner 		lru_cache_add(newpage);
16730142ef6cSHugh Dickins 		*pagep = newpage;
16740142ef6cSHugh Dickins 	}
1675bde05d1cSHugh Dickins 
1676bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1677bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1678bde05d1cSHugh Dickins 
1679bde05d1cSHugh Dickins 	unlock_page(oldpage);
168009cbfeafSKirill A. Shutemov 	put_page(oldpage);
168109cbfeafSKirill A. Shutemov 	put_page(oldpage);
16820142ef6cSHugh Dickins 	return error;
1683bde05d1cSHugh Dickins }
1684bde05d1cSHugh Dickins 
1685bde05d1cSHugh Dickins /*
1686c5bf121eSVineeth Remanan Pillai  * Swap in the page pointed to by *pagep.
1687c5bf121eSVineeth Remanan Pillai  * Caller has to make sure that *pagep contains a valid swapped page.
1688c5bf121eSVineeth Remanan Pillai  * Returns 0 and the page in pagep if success. On failure, returns the
1689af44c12fSRandy Dunlap  * error code and NULL in *pagep.
16901da177e4SLinus Torvalds  */
1691c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1692c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
1693c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
16942b740303SSouptick Joarder 			     vm_fault_t *fault_type)
16951da177e4SLinus Torvalds {
16961da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
169723f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
1698c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
169927ab7006SHugh Dickins 	struct page *page;
17001da177e4SLinus Torvalds 	swp_entry_t swap;
17011da177e4SLinus Torvalds 	int error;
17021da177e4SLinus Torvalds 
1703c5bf121eSVineeth Remanan Pillai 	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1704c5bf121eSVineeth Remanan Pillai 	swap = radix_to_swp_entry(*pagep);
1705c5bf121eSVineeth Remanan Pillai 	*pagep = NULL;
170654af6042SHugh Dickins 
17071da177e4SLinus Torvalds 	/* Look it up and read it in.. */
1708ec560175SHuang Ying 	page = lookup_swap_cache(swap, NULL, 0);
170927ab7006SHugh Dickins 	if (!page) {
17109e18eb29SAndres Lagar-Cavilla 		/* Or update major stats only when swapin succeeds?? */
17119e18eb29SAndres Lagar-Cavilla 		if (fault_type) {
171268da9f05SHugh Dickins 			*fault_type |= VM_FAULT_MAJOR;
17139e18eb29SAndres Lagar-Cavilla 			count_vm_event(PGMAJFAULT);
17142262185cSRoman Gushchin 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
17159e18eb29SAndres Lagar-Cavilla 		}
17169e18eb29SAndres Lagar-Cavilla 		/* Here we actually start the io */
171741ffe5d5SHugh Dickins 		page = shmem_swapin(swap, gfp, info, index);
171827ab7006SHugh Dickins 		if (!page) {
17191da177e4SLinus Torvalds 			error = -ENOMEM;
172054af6042SHugh Dickins 			goto failed;
1721285b2c4fSHugh Dickins 		}
17221da177e4SLinus Torvalds 	}
17231da177e4SLinus Torvalds 
17241da177e4SLinus Torvalds 	/* We have to do this with page locked to prevent races */
172554af6042SHugh Dickins 	lock_page(page);
17260142ef6cSHugh Dickins 	if (!PageSwapCache(page) || page_private(page) != swap.val ||
1727d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap)) {
1728c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1729d1899228SHugh Dickins 		goto unlock;
1730bde05d1cSHugh Dickins 	}
173127ab7006SHugh Dickins 	if (!PageUptodate(page)) {
17321da177e4SLinus Torvalds 		error = -EIO;
173354af6042SHugh Dickins 		goto failed;
173454af6042SHugh Dickins 	}
173554af6042SHugh Dickins 	wait_on_page_writeback(page);
173654af6042SHugh Dickins 
1737bde05d1cSHugh Dickins 	if (shmem_should_replace_page(page, gfp)) {
1738bde05d1cSHugh Dickins 		error = shmem_replace_page(&page, gfp, info, index);
1739bde05d1cSHugh Dickins 		if (error)
174054af6042SHugh Dickins 			goto failed;
17411da177e4SLinus Torvalds 	}
17421da177e4SLinus Torvalds 
17433fea5a49SJohannes Weiner 	error = shmem_add_to_page_cache(page, mapping, index,
17443fea5a49SJohannes Weiner 					swp_to_radix_entry(swap), gfp,
17453fea5a49SJohannes Weiner 					charge_mm);
174654af6042SHugh Dickins 	if (error)
174754af6042SHugh Dickins 		goto failed;
174854af6042SHugh Dickins 
17494595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
175054af6042SHugh Dickins 	info->swapped--;
175154af6042SHugh Dickins 	shmem_recalc_inode(inode);
17524595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
175327ab7006SHugh Dickins 
175466d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
175566d2f4d2SHugh Dickins 		mark_page_accessed(page);
175666d2f4d2SHugh Dickins 
175727ab7006SHugh Dickins 	delete_from_swap_cache(page);
175827ab7006SHugh Dickins 	set_page_dirty(page);
175927ab7006SHugh Dickins 	swap_free(swap);
176027ab7006SHugh Dickins 
1761c5bf121eSVineeth Remanan Pillai 	*pagep = page;
1762c5bf121eSVineeth Remanan Pillai 	return 0;
1763c5bf121eSVineeth Remanan Pillai failed:
1764c5bf121eSVineeth Remanan Pillai 	if (!shmem_confirm_swap(mapping, index, swap))
1765c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1766c5bf121eSVineeth Remanan Pillai unlock:
1767c5bf121eSVineeth Remanan Pillai 	if (page) {
1768c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1769c5bf121eSVineeth Remanan Pillai 		put_page(page);
1770c5bf121eSVineeth Remanan Pillai 	}
1771c5bf121eSVineeth Remanan Pillai 
1772c5bf121eSVineeth Remanan Pillai 	return error;
1773c5bf121eSVineeth Remanan Pillai }
1774c5bf121eSVineeth Remanan Pillai 
1775c5bf121eSVineeth Remanan Pillai /*
1776c5bf121eSVineeth Remanan Pillai  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1777c5bf121eSVineeth Remanan Pillai  *
1778c5bf121eSVineeth Remanan Pillai  * If we allocate a new one we do not mark it dirty. That's up to the
1779c5bf121eSVineeth Remanan Pillai  * vm. If we swap it in we mark it dirty since we also free the swap
1780c5bf121eSVineeth Remanan Pillai  * entry since a page cannot live in both the swap and page cache.
1781c5bf121eSVineeth Remanan Pillai  *
178228eb3c80SMiles Chen  * vmf and fault_type are only supplied by shmem_fault:
1783c5bf121eSVineeth Remanan Pillai  * otherwise they are NULL.
1784c5bf121eSVineeth Remanan Pillai  */
1785c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1786c5bf121eSVineeth Remanan Pillai 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1787c5bf121eSVineeth Remanan Pillai 	struct vm_area_struct *vma, struct vm_fault *vmf,
1788c5bf121eSVineeth Remanan Pillai 			vm_fault_t *fault_type)
1789c5bf121eSVineeth Remanan Pillai {
1790c5bf121eSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1791c5bf121eSVineeth Remanan Pillai 	struct shmem_inode_info *info = SHMEM_I(inode);
1792c5bf121eSVineeth Remanan Pillai 	struct shmem_sb_info *sbinfo;
1793c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm;
1794c5bf121eSVineeth Remanan Pillai 	struct page *page;
1795c5bf121eSVineeth Remanan Pillai 	enum sgp_type sgp_huge = sgp;
1796c5bf121eSVineeth Remanan Pillai 	pgoff_t hindex = index;
1797c5bf121eSVineeth Remanan Pillai 	int error;
1798c5bf121eSVineeth Remanan Pillai 	int once = 0;
1799c5bf121eSVineeth Remanan Pillai 	int alloced = 0;
1800c5bf121eSVineeth Remanan Pillai 
1801c5bf121eSVineeth Remanan Pillai 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1802c5bf121eSVineeth Remanan Pillai 		return -EFBIG;
1803c5bf121eSVineeth Remanan Pillai 	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1804c5bf121eSVineeth Remanan Pillai 		sgp = SGP_CACHE;
1805c5bf121eSVineeth Remanan Pillai repeat:
1806c5bf121eSVineeth Remanan Pillai 	if (sgp <= SGP_CACHE &&
1807c5bf121eSVineeth Remanan Pillai 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1808c5bf121eSVineeth Remanan Pillai 		return -EINVAL;
1809c5bf121eSVineeth Remanan Pillai 	}
1810c5bf121eSVineeth Remanan Pillai 
1811c5bf121eSVineeth Remanan Pillai 	sbinfo = SHMEM_SB(inode->i_sb);
1812c5bf121eSVineeth Remanan Pillai 	charge_mm = vma ? vma->vm_mm : current->mm;
1813c5bf121eSVineeth Remanan Pillai 
1814c5bf121eSVineeth Remanan Pillai 	page = find_lock_entry(mapping, index);
1815c5bf121eSVineeth Remanan Pillai 	if (xa_is_value(page)) {
1816c5bf121eSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, index, &page,
1817c5bf121eSVineeth Remanan Pillai 					  sgp, gfp, vma, fault_type);
1818c5bf121eSVineeth Remanan Pillai 		if (error == -EEXIST)
1819c5bf121eSVineeth Remanan Pillai 			goto repeat;
1820c5bf121eSVineeth Remanan Pillai 
1821c5bf121eSVineeth Remanan Pillai 		*pagep = page;
1822c5bf121eSVineeth Remanan Pillai 		return error;
1823c5bf121eSVineeth Remanan Pillai 	}
1824c5bf121eSVineeth Remanan Pillai 
1825c5bf121eSVineeth Remanan Pillai 	if (page && sgp == SGP_WRITE)
1826c5bf121eSVineeth Remanan Pillai 		mark_page_accessed(page);
1827c5bf121eSVineeth Remanan Pillai 
1828c5bf121eSVineeth Remanan Pillai 	/* fallocated page? */
1829c5bf121eSVineeth Remanan Pillai 	if (page && !PageUptodate(page)) {
1830c5bf121eSVineeth Remanan Pillai 		if (sgp != SGP_READ)
1831c5bf121eSVineeth Remanan Pillai 			goto clear;
1832c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1833c5bf121eSVineeth Remanan Pillai 		put_page(page);
1834c5bf121eSVineeth Remanan Pillai 		page = NULL;
1835c5bf121eSVineeth Remanan Pillai 	}
1836c5bf121eSVineeth Remanan Pillai 	if (page || sgp == SGP_READ) {
1837c5bf121eSVineeth Remanan Pillai 		*pagep = page;
1838c5bf121eSVineeth Remanan Pillai 		return 0;
1839c5bf121eSVineeth Remanan Pillai 	}
1840c5bf121eSVineeth Remanan Pillai 
1841c5bf121eSVineeth Remanan Pillai 	/*
1842c5bf121eSVineeth Remanan Pillai 	 * Fast cache lookup did not find it:
1843c5bf121eSVineeth Remanan Pillai 	 * bring it back from swap or allocate.
1844c5bf121eSVineeth Remanan Pillai 	 */
1845c5bf121eSVineeth Remanan Pillai 
1846cfda0526SMike Rapoport 	if (vma && userfaultfd_missing(vma)) {
1847cfda0526SMike Rapoport 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1848cfda0526SMike Rapoport 		return 0;
1849cfda0526SMike Rapoport 	}
1850cfda0526SMike Rapoport 
1851800d8c63SKirill A. Shutemov 	/* shmem_symlink() */
1852800d8c63SKirill A. Shutemov 	if (mapping->a_ops != &shmem_aops)
1853800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1854657e3038SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1855800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1856800d8c63SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
1857800d8c63SKirill A. Shutemov 		goto alloc_huge;
1858800d8c63SKirill A. Shutemov 	switch (sbinfo->huge) {
1859800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
1860800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
186127d80fa2SKees Cook 	case SHMEM_HUGE_WITHIN_SIZE: {
186227d80fa2SKees Cook 		loff_t i_size;
186327d80fa2SKees Cook 		pgoff_t off;
186427d80fa2SKees Cook 
1865800d8c63SKirill A. Shutemov 		off = round_up(index, HPAGE_PMD_NR);
1866800d8c63SKirill A. Shutemov 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1867800d8c63SKirill A. Shutemov 		if (i_size >= HPAGE_PMD_SIZE &&
1868800d8c63SKirill A. Shutemov 		    i_size >> PAGE_SHIFT >= off)
1869800d8c63SKirill A. Shutemov 			goto alloc_huge;
187027d80fa2SKees Cook 
187127d80fa2SKees Cook 		fallthrough;
187227d80fa2SKees Cook 	}
1873800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
1874657e3038SKirill A. Shutemov 		if (sgp_huge == SGP_HUGE)
1875657e3038SKirill A. Shutemov 			goto alloc_huge;
1876657e3038SKirill A. Shutemov 		/* TODO: implement fadvise() hints */
1877800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
187859a16eadSHugh Dickins 	}
18791da177e4SLinus Torvalds 
1880800d8c63SKirill A. Shutemov alloc_huge:
18810f079694SMike Rapoport 	page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1882800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1883c5bf121eSVineeth Remanan Pillai alloc_nohuge:
1884c5bf121eSVineeth Remanan Pillai 		page = shmem_alloc_and_acct_page(gfp, inode,
1885800d8c63SKirill A. Shutemov 						 index, false);
188654af6042SHugh Dickins 	}
1887800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1888779750d2SKirill A. Shutemov 		int retry = 5;
1889c5bf121eSVineeth Remanan Pillai 
1890800d8c63SKirill A. Shutemov 		error = PTR_ERR(page);
1891800d8c63SKirill A. Shutemov 		page = NULL;
1892779750d2SKirill A. Shutemov 		if (error != -ENOSPC)
1893c5bf121eSVineeth Remanan Pillai 			goto unlock;
1894779750d2SKirill A. Shutemov 		/*
1895c5bf121eSVineeth Remanan Pillai 		 * Try to reclaim some space by splitting a huge page
1896779750d2SKirill A. Shutemov 		 * beyond i_size on the filesystem.
1897779750d2SKirill A. Shutemov 		 */
1898779750d2SKirill A. Shutemov 		while (retry--) {
1899779750d2SKirill A. Shutemov 			int ret;
1900c5bf121eSVineeth Remanan Pillai 
1901779750d2SKirill A. Shutemov 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1902779750d2SKirill A. Shutemov 			if (ret == SHRINK_STOP)
1903779750d2SKirill A. Shutemov 				break;
1904779750d2SKirill A. Shutemov 			if (ret)
1905779750d2SKirill A. Shutemov 				goto alloc_nohuge;
1906779750d2SKirill A. Shutemov 		}
1907c5bf121eSVineeth Remanan Pillai 		goto unlock;
1908800d8c63SKirill A. Shutemov 	}
1909800d8c63SKirill A. Shutemov 
1910800d8c63SKirill A. Shutemov 	if (PageTransHuge(page))
1911800d8c63SKirill A. Shutemov 		hindex = round_down(index, HPAGE_PMD_NR);
1912800d8c63SKirill A. Shutemov 	else
1913800d8c63SKirill A. Shutemov 		hindex = index;
1914800d8c63SKirill A. Shutemov 
191566d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1916eb39d618SHugh Dickins 		__SetPageReferenced(page);
191766d2f4d2SHugh Dickins 
1918800d8c63SKirill A. Shutemov 	error = shmem_add_to_page_cache(page, mapping, hindex,
19193fea5a49SJohannes Weiner 					NULL, gfp & GFP_RECLAIM_MASK,
19203fea5a49SJohannes Weiner 					charge_mm);
19213fea5a49SJohannes Weiner 	if (error)
1922800d8c63SKirill A. Shutemov 		goto unacct;
19236058eaecSJohannes Weiner 	lru_cache_add(page);
192454af6042SHugh Dickins 
19254595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
1926d8c6546bSMatthew Wilcox (Oracle) 	info->alloced += compound_nr(page);
1927800d8c63SKirill A. Shutemov 	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
192854af6042SHugh Dickins 	shmem_recalc_inode(inode);
19294595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
19301635f6a7SHugh Dickins 	alloced = true;
193154af6042SHugh Dickins 
1932779750d2SKirill A. Shutemov 	if (PageTransHuge(page) &&
1933779750d2SKirill A. Shutemov 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1934779750d2SKirill A. Shutemov 			hindex + HPAGE_PMD_NR - 1) {
1935779750d2SKirill A. Shutemov 		/*
1936779750d2SKirill A. Shutemov 		 * Part of the huge page is beyond i_size: subject
1937779750d2SKirill A. Shutemov 		 * to shrink under memory pressure.
1938779750d2SKirill A. Shutemov 		 */
1939779750d2SKirill A. Shutemov 		spin_lock(&sbinfo->shrinklist_lock);
1940d041353dSCong Wang 		/*
1941d041353dSCong Wang 		 * _careful to defend against unlocked access to
1942d041353dSCong Wang 		 * ->shrink_list in shmem_unused_huge_shrink()
1943d041353dSCong Wang 		 */
1944d041353dSCong Wang 		if (list_empty_careful(&info->shrinklist)) {
1945779750d2SKirill A. Shutemov 			list_add_tail(&info->shrinklist,
1946779750d2SKirill A. Shutemov 				      &sbinfo->shrinklist);
1947779750d2SKirill A. Shutemov 			sbinfo->shrinklist_len++;
1948779750d2SKirill A. Shutemov 		}
1949779750d2SKirill A. Shutemov 		spin_unlock(&sbinfo->shrinklist_lock);
1950779750d2SKirill A. Shutemov 	}
1951779750d2SKirill A. Shutemov 
1952ec9516fbSHugh Dickins 	/*
19531635f6a7SHugh Dickins 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
19541635f6a7SHugh Dickins 	 */
19551635f6a7SHugh Dickins 	if (sgp == SGP_FALLOC)
19561635f6a7SHugh Dickins 		sgp = SGP_WRITE;
19571635f6a7SHugh Dickins clear:
19581635f6a7SHugh Dickins 	/*
19591635f6a7SHugh Dickins 	 * Let SGP_WRITE caller clear ends if write does not fill page;
19601635f6a7SHugh Dickins 	 * but SGP_FALLOC on a page fallocated earlier must initialize
19611635f6a7SHugh Dickins 	 * it now, lest undo on failure cancel our earlier guarantee.
1962ec9516fbSHugh Dickins 	 */
1963800d8c63SKirill A. Shutemov 	if (sgp != SGP_WRITE && !PageUptodate(page)) {
1964800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
1965800d8c63SKirill A. Shutemov 		int i;
1966800d8c63SKirill A. Shutemov 
1967d8c6546bSMatthew Wilcox (Oracle) 		for (i = 0; i < compound_nr(head); i++) {
1968800d8c63SKirill A. Shutemov 			clear_highpage(head + i);
1969800d8c63SKirill A. Shutemov 			flush_dcache_page(head + i);
1970800d8c63SKirill A. Shutemov 		}
1971800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
1972ec9516fbSHugh Dickins 	}
1973bde05d1cSHugh Dickins 
197454af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
197575edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
197609cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1977267a4c76SHugh Dickins 		if (alloced) {
1978267a4c76SHugh Dickins 			ClearPageDirty(page);
1979267a4c76SHugh Dickins 			delete_from_page_cache(page);
19804595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1981267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
19824595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
1983267a4c76SHugh Dickins 		}
198454af6042SHugh Dickins 		error = -EINVAL;
1985267a4c76SHugh Dickins 		goto unlock;
1986ff36b801SShaohua Li 	}
1987800d8c63SKirill A. Shutemov 	*pagep = page + index - hindex;
198854af6042SHugh Dickins 	return 0;
1989d00806b1SNick Piggin 
1990d0217ac0SNick Piggin 	/*
199154af6042SHugh Dickins 	 * Error recovery.
19921da177e4SLinus Torvalds 	 */
199354af6042SHugh Dickins unacct:
1994d8c6546bSMatthew Wilcox (Oracle) 	shmem_inode_unacct_blocks(inode, compound_nr(page));
1995800d8c63SKirill A. Shutemov 
1996800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
1997800d8c63SKirill A. Shutemov 		unlock_page(page);
1998800d8c63SKirill A. Shutemov 		put_page(page);
1999800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
2000800d8c63SKirill A. Shutemov 	}
2001d1899228SHugh Dickins unlock:
200227ab7006SHugh Dickins 	if (page) {
200354af6042SHugh Dickins 		unlock_page(page);
200409cbfeafSKirill A. Shutemov 		put_page(page);
200554af6042SHugh Dickins 	}
200654af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
20074595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
200854af6042SHugh Dickins 		shmem_recalc_inode(inode);
20094595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
20101da177e4SLinus Torvalds 		goto repeat;
2011d8dc74f2SAdrian Bunk 	}
20127f4446eeSMatthew Wilcox 	if (error == -EEXIST)
201354af6042SHugh Dickins 		goto repeat;
201454af6042SHugh Dickins 	return error;
20151da177e4SLinus Torvalds }
20161da177e4SLinus Torvalds 
201710d20bd2SLinus Torvalds /*
201810d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
201910d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
202010d20bd2SLinus Torvalds  * target.
202110d20bd2SLinus Torvalds  */
2022ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
202310d20bd2SLinus Torvalds {
202410d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
20252055da97SIngo Molnar 	list_del_init(&wait->entry);
202610d20bd2SLinus Torvalds 	return ret;
202710d20bd2SLinus Torvalds }
202810d20bd2SLinus Torvalds 
202920acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
20301da177e4SLinus Torvalds {
203111bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
2032496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
20339e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2034657e3038SKirill A. Shutemov 	enum sgp_type sgp;
203520acce67SSouptick Joarder 	int err;
203620acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
20371da177e4SLinus Torvalds 
2038f00cdc6dSHugh Dickins 	/*
2039f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
2040f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
2041f00cdc6dSHugh Dickins 	 * locks writers out with its hold on i_mutex.  So refrain from
20428e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
20438e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
20448e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
20458e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
20468e205f77SHugh Dickins 	 *
20478e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
20488e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
20498e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
20508e205f77SHugh Dickins 	 *
20518e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
20528e205f77SHugh Dickins 	 * standard mutex or completion: but we cannot take i_mutex in fault,
20538e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
2054f00cdc6dSHugh Dickins 	 */
2055f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
2056f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
2057f00cdc6dSHugh Dickins 
2058f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2059f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
20608e205f77SHugh Dickins 		if (shmem_falloc &&
20618e205f77SHugh Dickins 		    shmem_falloc->waitq &&
20628e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
20638e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
20648897c1b1SKirill A. Shutemov 			struct file *fpin;
20658e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
206610d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
20678e205f77SHugh Dickins 
20688e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
20698897c1b1SKirill A. Shutemov 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
20708897c1b1SKirill A. Shutemov 			if (fpin)
20718e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
20728e205f77SHugh Dickins 
20738e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
20748e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
20758e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
20768e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20778e205f77SHugh Dickins 			schedule();
20788e205f77SHugh Dickins 
20798e205f77SHugh Dickins 			/*
20808e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
20818e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
20828e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
20838e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
20848e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
20858e205f77SHugh Dickins 			 */
20868e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
20878e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
20888e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20898897c1b1SKirill A. Shutemov 
20908897c1b1SKirill A. Shutemov 			if (fpin)
20918897c1b1SKirill A. Shutemov 				fput(fpin);
20928e205f77SHugh Dickins 			return ret;
2093f00cdc6dSHugh Dickins 		}
20948e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2095f00cdc6dSHugh Dickins 	}
2096f00cdc6dSHugh Dickins 
2097657e3038SKirill A. Shutemov 	sgp = SGP_CACHE;
209818600332SMichal Hocko 
209918600332SMichal Hocko 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
210018600332SMichal Hocko 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2101657e3038SKirill A. Shutemov 		sgp = SGP_NOHUGE;
210218600332SMichal Hocko 	else if (vma->vm_flags & VM_HUGEPAGE)
210318600332SMichal Hocko 		sgp = SGP_HUGE;
2104657e3038SKirill A. Shutemov 
210520acce67SSouptick Joarder 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2106cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
210720acce67SSouptick Joarder 	if (err)
210820acce67SSouptick Joarder 		return vmf_error(err);
210968da9f05SHugh Dickins 	return ret;
21101da177e4SLinus Torvalds }
21111da177e4SLinus Torvalds 
2112c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2113c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2114c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2115c01d5b30SHugh Dickins {
2116c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2117c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2118c01d5b30SHugh Dickins 	unsigned long addr;
2119c01d5b30SHugh Dickins 	unsigned long offset;
2120c01d5b30SHugh Dickins 	unsigned long inflated_len;
2121c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2122c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2123c01d5b30SHugh Dickins 
2124c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2125c01d5b30SHugh Dickins 		return -ENOMEM;
2126c01d5b30SHugh Dickins 
2127c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2128c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2129c01d5b30SHugh Dickins 
2130396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2131c01d5b30SHugh Dickins 		return addr;
2132c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2133c01d5b30SHugh Dickins 		return addr;
2134c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2135c01d5b30SHugh Dickins 		return addr;
2136c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2137c01d5b30SHugh Dickins 		return addr;
2138c01d5b30SHugh Dickins 
2139c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2140c01d5b30SHugh Dickins 		return addr;
2141c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2142c01d5b30SHugh Dickins 		return addr;
2143c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2144c01d5b30SHugh Dickins 		return addr;
2145c01d5b30SHugh Dickins 	/*
2146c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2147c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
214899158997SKirill A. Shutemov 	 * But if caller specified an address hint and we allocated area there
214999158997SKirill A. Shutemov 	 * successfully, respect that as before.
2150c01d5b30SHugh Dickins 	 */
215199158997SKirill A. Shutemov 	if (uaddr == addr)
2152c01d5b30SHugh Dickins 		return addr;
2153c01d5b30SHugh Dickins 
2154c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2155c01d5b30SHugh Dickins 		struct super_block *sb;
2156c01d5b30SHugh Dickins 
2157c01d5b30SHugh Dickins 		if (file) {
2158c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2159c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2160c01d5b30SHugh Dickins 		} else {
2161c01d5b30SHugh Dickins 			/*
2162c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2163c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2164c01d5b30SHugh Dickins 			 */
2165c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2166c01d5b30SHugh Dickins 				return addr;
2167c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2168c01d5b30SHugh Dickins 		}
21693089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2170c01d5b30SHugh Dickins 			return addr;
2171c01d5b30SHugh Dickins 	}
2172c01d5b30SHugh Dickins 
2173c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2174c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2175c01d5b30SHugh Dickins 		return addr;
2176c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2177c01d5b30SHugh Dickins 		return addr;
2178c01d5b30SHugh Dickins 
2179c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2180c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2181c01d5b30SHugh Dickins 		return addr;
2182c01d5b30SHugh Dickins 	if (inflated_len < len)
2183c01d5b30SHugh Dickins 		return addr;
2184c01d5b30SHugh Dickins 
218599158997SKirill A. Shutemov 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2186c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2187c01d5b30SHugh Dickins 		return addr;
2188c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2189c01d5b30SHugh Dickins 		return addr;
2190c01d5b30SHugh Dickins 
2191c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2192c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2193c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2194c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2195c01d5b30SHugh Dickins 
2196c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2197c01d5b30SHugh Dickins 		return addr;
2198c01d5b30SHugh Dickins 	return inflated_addr;
2199c01d5b30SHugh Dickins }
2200c01d5b30SHugh Dickins 
22011da177e4SLinus Torvalds #ifdef CONFIG_NUMA
220241ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
22031da177e4SLinus Torvalds {
2204496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
220541ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
22061da177e4SLinus Torvalds }
22071da177e4SLinus Torvalds 
2208d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2209d8dc74f2SAdrian Bunk 					  unsigned long addr)
22101da177e4SLinus Torvalds {
2211496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
221241ffe5d5SHugh Dickins 	pgoff_t index;
22131da177e4SLinus Torvalds 
221441ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
221541ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
22161da177e4SLinus Torvalds }
22171da177e4SLinus Torvalds #endif
22181da177e4SLinus Torvalds 
22191da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user)
22201da177e4SLinus Torvalds {
2221496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
22221da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
22231da177e4SLinus Torvalds 	int retval = -ENOMEM;
22241da177e4SLinus Torvalds 
2225ea0dfeb4SHugh Dickins 	/*
2226ea0dfeb4SHugh Dickins 	 * What serializes the accesses to info->flags?
2227ea0dfeb4SHugh Dickins 	 * ipc_lock_object() when called from shmctl_do_lock(),
2228ea0dfeb4SHugh Dickins 	 * no serialization needed when called from shm_destroy().
2229ea0dfeb4SHugh Dickins 	 */
22301da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
22311da177e4SLinus Torvalds 		if (!user_shm_lock(inode->i_size, user))
22321da177e4SLinus Torvalds 			goto out_nomem;
22331da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
223489e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
22351da177e4SLinus Torvalds 	}
22361da177e4SLinus Torvalds 	if (!lock && (info->flags & VM_LOCKED) && user) {
22371da177e4SLinus Torvalds 		user_shm_unlock(inode->i_size, user);
22381da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
223989e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
22401da177e4SLinus Torvalds 	}
22411da177e4SLinus Torvalds 	retval = 0;
224289e004eaSLee Schermerhorn 
22431da177e4SLinus Torvalds out_nomem:
22441da177e4SLinus Torvalds 	return retval;
22451da177e4SLinus Torvalds }
22461da177e4SLinus Torvalds 
22479b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
22481da177e4SLinus Torvalds {
2249ab3948f5SJoel Fernandes (Google) 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2250ab3948f5SJoel Fernandes (Google) 
2251ab3948f5SJoel Fernandes (Google) 	if (info->seals & F_SEAL_FUTURE_WRITE) {
2252ab3948f5SJoel Fernandes (Google) 		/*
2253ab3948f5SJoel Fernandes (Google) 		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2254ab3948f5SJoel Fernandes (Google) 		 * "future write" seal active.
2255ab3948f5SJoel Fernandes (Google) 		 */
2256ab3948f5SJoel Fernandes (Google) 		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2257ab3948f5SJoel Fernandes (Google) 			return -EPERM;
2258ab3948f5SJoel Fernandes (Google) 
2259ab3948f5SJoel Fernandes (Google) 		/*
226005d35110SNicolas Geoffray 		 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
226105d35110SNicolas Geoffray 		 * MAP_SHARED and read-only, take care to not allow mprotect to
226205d35110SNicolas Geoffray 		 * revert protections on such mappings. Do this only for shared
226305d35110SNicolas Geoffray 		 * mappings. For private mappings, don't need to mask
226405d35110SNicolas Geoffray 		 * VM_MAYWRITE as we still want them to be COW-writable.
2265ab3948f5SJoel Fernandes (Google) 		 */
226605d35110SNicolas Geoffray 		if (vma->vm_flags & VM_SHARED)
2267ab3948f5SJoel Fernandes (Google) 			vma->vm_flags &= ~(VM_MAYWRITE);
2268ab3948f5SJoel Fernandes (Google) 	}
2269ab3948f5SJoel Fernandes (Google) 
2270*51b0bff2SCatalin Marinas 	/* arm64 - allow memory tagging on RAM-based files */
2271*51b0bff2SCatalin Marinas 	vma->vm_flags |= VM_MTE_ALLOWED;
2272*51b0bff2SCatalin Marinas 
22731da177e4SLinus Torvalds 	file_accessed(file);
22741da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
2275396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2276f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2277f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
2278f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
2279f3f0e1d2SKirill A. Shutemov 	}
22801da177e4SLinus Torvalds 	return 0;
22811da177e4SLinus Torvalds }
22821da177e4SLinus Torvalds 
2283454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
228409208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
22851da177e4SLinus Torvalds {
22861da177e4SLinus Torvalds 	struct inode *inode;
22871da177e4SLinus Torvalds 	struct shmem_inode_info *info;
22881da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2289e809d5f0SChris Down 	ino_t ino;
22901da177e4SLinus Torvalds 
2291e809d5f0SChris Down 	if (shmem_reserve_inode(sb, &ino))
22921da177e4SLinus Torvalds 		return NULL;
22931da177e4SLinus Torvalds 
22941da177e4SLinus Torvalds 	inode = new_inode(sb);
22951da177e4SLinus Torvalds 	if (inode) {
2296e809d5f0SChris Down 		inode->i_ino = ino;
2297454abafeSDmitry Monakhov 		inode_init_owner(inode, dir, mode);
22981da177e4SLinus Torvalds 		inode->i_blocks = 0;
2299078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
230046c9a946SArnd Bergmann 		inode->i_generation = prandom_u32();
23011da177e4SLinus Torvalds 		info = SHMEM_I(inode);
23021da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
23031da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
2304af53d3e9SHugh Dickins 		atomic_set(&info->stop_eviction, 0);
230540e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
23060b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2307779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
23081da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
230938f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
231072c04902SAl Viro 		cache_no_acl(inode);
23111da177e4SLinus Torvalds 
23121da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
23131da177e4SLinus Torvalds 		default:
231439f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
23151da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
23161da177e4SLinus Torvalds 			break;
23171da177e4SLinus Torvalds 		case S_IFREG:
231814fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
23191da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
23201da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
232171fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
232271fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
23231da177e4SLinus Torvalds 			break;
23241da177e4SLinus Torvalds 		case S_IFDIR:
2325d8c76e6fSDave Hansen 			inc_nlink(inode);
23261da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
23271da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
23281da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
23291da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
23301da177e4SLinus Torvalds 			break;
23311da177e4SLinus Torvalds 		case S_IFLNK:
23321da177e4SLinus Torvalds 			/*
23331da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
23341da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
23351da177e4SLinus Torvalds 			 */
233671fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
23371da177e4SLinus Torvalds 			break;
23381da177e4SLinus Torvalds 		}
2339b45d71fbSJoel Fernandes (Google) 
2340b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
23415b04c689SPavel Emelyanov 	} else
23425b04c689SPavel Emelyanov 		shmem_free_inode(sb);
23431da177e4SLinus Torvalds 	return inode;
23441da177e4SLinus Torvalds }
23451da177e4SLinus Torvalds 
23460cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping)
23470cd6144aSJohannes Weiner {
2348f8005451SHugh Dickins 	return mapping->a_ops == &shmem_aops;
23490cd6144aSJohannes Weiner }
23500cd6144aSJohannes Weiner 
23518d103963SMike Rapoport static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
23524c27fe4cSMike Rapoport 				  pmd_t *dst_pmd,
23534c27fe4cSMike Rapoport 				  struct vm_area_struct *dst_vma,
23544c27fe4cSMike Rapoport 				  unsigned long dst_addr,
23554c27fe4cSMike Rapoport 				  unsigned long src_addr,
23568d103963SMike Rapoport 				  bool zeropage,
23574c27fe4cSMike Rapoport 				  struct page **pagep)
23584c27fe4cSMike Rapoport {
23594c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
23604c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
23614c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
23624c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
23634c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
23644c27fe4cSMike Rapoport 	spinlock_t *ptl;
23654c27fe4cSMike Rapoport 	void *page_kaddr;
23664c27fe4cSMike Rapoport 	struct page *page;
23674c27fe4cSMike Rapoport 	pte_t _dst_pte, *dst_pte;
23684c27fe4cSMike Rapoport 	int ret;
2369e2a50c1fSAndrea Arcangeli 	pgoff_t offset, max_off;
23704c27fe4cSMike Rapoport 
23714c27fe4cSMike Rapoport 	ret = -ENOMEM;
23720f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, 1))
23734c27fe4cSMike Rapoport 		goto out;
23744c27fe4cSMike Rapoport 
2375cb658a45SAndrea Arcangeli 	if (!*pagep) {
23764c27fe4cSMike Rapoport 		page = shmem_alloc_page(gfp, info, pgoff);
23774c27fe4cSMike Rapoport 		if (!page)
23780f079694SMike Rapoport 			goto out_unacct_blocks;
23794c27fe4cSMike Rapoport 
23808d103963SMike Rapoport 		if (!zeropage) {	/* mcopy_atomic */
23814c27fe4cSMike Rapoport 			page_kaddr = kmap_atomic(page);
23828d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
23838d103963SMike Rapoport 					     (const void __user *)src_addr,
23844c27fe4cSMike Rapoport 					     PAGE_SIZE);
23854c27fe4cSMike Rapoport 			kunmap_atomic(page_kaddr);
23864c27fe4cSMike Rapoport 
2387c1e8d7c6SMichel Lespinasse 			/* fallback to copy_from_user outside mmap_lock */
23884c27fe4cSMike Rapoport 			if (unlikely(ret)) {
23894c27fe4cSMike Rapoport 				*pagep = page;
23900f079694SMike Rapoport 				shmem_inode_unacct_blocks(inode, 1);
23914c27fe4cSMike Rapoport 				/* don't free the page */
23929e368259SAndrea Arcangeli 				return -ENOENT;
23934c27fe4cSMike Rapoport 			}
23948d103963SMike Rapoport 		} else {		/* mfill_zeropage_atomic */
23958d103963SMike Rapoport 			clear_highpage(page);
23968d103963SMike Rapoport 		}
23974c27fe4cSMike Rapoport 	} else {
23984c27fe4cSMike Rapoport 		page = *pagep;
23994c27fe4cSMike Rapoport 		*pagep = NULL;
24004c27fe4cSMike Rapoport 	}
24014c27fe4cSMike Rapoport 
24029cc90c66SAndrea Arcangeli 	VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
24039cc90c66SAndrea Arcangeli 	__SetPageLocked(page);
24049cc90c66SAndrea Arcangeli 	__SetPageSwapBacked(page);
2405a425d358SAndrea Arcangeli 	__SetPageUptodate(page);
24069cc90c66SAndrea Arcangeli 
2407e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2408e2a50c1fSAndrea Arcangeli 	offset = linear_page_index(dst_vma, dst_addr);
2409e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2410e2a50c1fSAndrea Arcangeli 	if (unlikely(offset >= max_off))
2411e2a50c1fSAndrea Arcangeli 		goto out_release;
2412e2a50c1fSAndrea Arcangeli 
24133fea5a49SJohannes Weiner 	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
24143fea5a49SJohannes Weiner 				      gfp & GFP_RECLAIM_MASK, dst_mm);
24154c27fe4cSMike Rapoport 	if (ret)
24164c27fe4cSMike Rapoport 		goto out_release;
24174c27fe4cSMike Rapoport 
24184c27fe4cSMike Rapoport 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
24194c27fe4cSMike Rapoport 	if (dst_vma->vm_flags & VM_WRITE)
24204c27fe4cSMike Rapoport 		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2421dcf7fe9dSAndrea Arcangeli 	else {
2422dcf7fe9dSAndrea Arcangeli 		/*
2423dcf7fe9dSAndrea Arcangeli 		 * We don't set the pte dirty if the vma has no
2424dcf7fe9dSAndrea Arcangeli 		 * VM_WRITE permission, so mark the page dirty or it
2425dcf7fe9dSAndrea Arcangeli 		 * could be freed from under us. We could do it
2426dcf7fe9dSAndrea Arcangeli 		 * unconditionally before unlock_page(), but doing it
2427dcf7fe9dSAndrea Arcangeli 		 * only if VM_WRITE is not set is faster.
2428dcf7fe9dSAndrea Arcangeli 		 */
2429dcf7fe9dSAndrea Arcangeli 		set_page_dirty(page);
2430dcf7fe9dSAndrea Arcangeli 	}
24314c27fe4cSMike Rapoport 
24324c27fe4cSMike Rapoport 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2433e2a50c1fSAndrea Arcangeli 
2434e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2435e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2436e2a50c1fSAndrea Arcangeli 	if (unlikely(offset >= max_off))
24373fea5a49SJohannes Weiner 		goto out_release_unlock;
2438e2a50c1fSAndrea Arcangeli 
2439e2a50c1fSAndrea Arcangeli 	ret = -EEXIST;
24404c27fe4cSMike Rapoport 	if (!pte_none(*dst_pte))
24413fea5a49SJohannes Weiner 		goto out_release_unlock;
24424c27fe4cSMike Rapoport 
24436058eaecSJohannes Weiner 	lru_cache_add(page);
24444c27fe4cSMike Rapoport 
244594b7cc01SYang Shi 	spin_lock_irq(&info->lock);
24464c27fe4cSMike Rapoport 	info->alloced++;
24474c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
24484c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
244994b7cc01SYang Shi 	spin_unlock_irq(&info->lock);
24504c27fe4cSMike Rapoport 
24514c27fe4cSMike Rapoport 	inc_mm_counter(dst_mm, mm_counter_file(page));
24524c27fe4cSMike Rapoport 	page_add_file_rmap(page, false);
24534c27fe4cSMike Rapoport 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
24544c27fe4cSMike Rapoport 
24554c27fe4cSMike Rapoport 	/* No need to invalidate - it was non-present before */
24564c27fe4cSMike Rapoport 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
24574c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
2458e2a50c1fSAndrea Arcangeli 	unlock_page(page);
24594c27fe4cSMike Rapoport 	ret = 0;
24604c27fe4cSMike Rapoport out:
24614c27fe4cSMike Rapoport 	return ret;
24623fea5a49SJohannes Weiner out_release_unlock:
24634c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
2464dcf7fe9dSAndrea Arcangeli 	ClearPageDirty(page);
2465e2a50c1fSAndrea Arcangeli 	delete_from_page_cache(page);
24664c27fe4cSMike Rapoport out_release:
24679cc90c66SAndrea Arcangeli 	unlock_page(page);
24684c27fe4cSMike Rapoport 	put_page(page);
24694c27fe4cSMike Rapoport out_unacct_blocks:
24700f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
24714c27fe4cSMike Rapoport 	goto out;
24724c27fe4cSMike Rapoport }
24734c27fe4cSMike Rapoport 
24748d103963SMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
24758d103963SMike Rapoport 			   pmd_t *dst_pmd,
24768d103963SMike Rapoport 			   struct vm_area_struct *dst_vma,
24778d103963SMike Rapoport 			   unsigned long dst_addr,
24788d103963SMike Rapoport 			   unsigned long src_addr,
24798d103963SMike Rapoport 			   struct page **pagep)
24808d103963SMike Rapoport {
24818d103963SMike Rapoport 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
24828d103963SMike Rapoport 				      dst_addr, src_addr, false, pagep);
24838d103963SMike Rapoport }
24848d103963SMike Rapoport 
24858d103963SMike Rapoport int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
24868d103963SMike Rapoport 			     pmd_t *dst_pmd,
24878d103963SMike Rapoport 			     struct vm_area_struct *dst_vma,
24888d103963SMike Rapoport 			     unsigned long dst_addr)
24898d103963SMike Rapoport {
24908d103963SMike Rapoport 	struct page *page = NULL;
24918d103963SMike Rapoport 
24928d103963SMike Rapoport 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
24938d103963SMike Rapoport 				      dst_addr, 0, true, &page);
24948d103963SMike Rapoport }
24958d103963SMike Rapoport 
24961da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
249792e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
249869f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
24991da177e4SLinus Torvalds 
25006d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
25016d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
25026d9d88d0SJarkko Sakkinen #else
25036d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
25046d9d88d0SJarkko Sakkinen #endif
25056d9d88d0SJarkko Sakkinen 
25061da177e4SLinus Torvalds static int
2507800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
2508800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
2509800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
25101da177e4SLinus Torvalds {
2511800d15a5SNick Piggin 	struct inode *inode = mapping->host;
251240e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
251309cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
251440e041a2SDavid Herrmann 
251540e041a2SDavid Herrmann 	/* i_mutex is held by caller */
2516ab3948f5SJoel Fernandes (Google) 	if (unlikely(info->seals & (F_SEAL_GROW |
2517ab3948f5SJoel Fernandes (Google) 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2518ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
251940e041a2SDavid Herrmann 			return -EPERM;
252040e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
252140e041a2SDavid Herrmann 			return -EPERM;
252240e041a2SDavid Herrmann 	}
252340e041a2SDavid Herrmann 
25249e18eb29SAndres Lagar-Cavilla 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2525800d15a5SNick Piggin }
2526800d15a5SNick Piggin 
2527800d15a5SNick Piggin static int
2528800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2529800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2530800d15a5SNick Piggin 			struct page *page, void *fsdata)
2531800d15a5SNick Piggin {
2532800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2533800d15a5SNick Piggin 
2534800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2535800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2536800d15a5SNick Piggin 
2537ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2538800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2539800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2540800d8c63SKirill A. Shutemov 			int i;
2541800d8c63SKirill A. Shutemov 
2542800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2543800d8c63SKirill A. Shutemov 				if (head + i == page)
2544800d8c63SKirill A. Shutemov 					continue;
2545800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2546800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2547800d8c63SKirill A. Shutemov 			}
2548800d8c63SKirill A. Shutemov 		}
254909cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
255009cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2551ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
255209cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2553ec9516fbSHugh Dickins 		}
2554800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2555ec9516fbSHugh Dickins 	}
2556d3602444SHugh Dickins 	set_page_dirty(page);
25576746aff7SWu Fengguang 	unlock_page(page);
255809cbfeafSKirill A. Shutemov 	put_page(page);
2559d3602444SHugh Dickins 
2560800d15a5SNick Piggin 	return copied;
25611da177e4SLinus Torvalds }
25621da177e4SLinus Torvalds 
25632ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
25641da177e4SLinus Torvalds {
25656e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
25666e58e79dSAl Viro 	struct inode *inode = file_inode(file);
25671da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
256841ffe5d5SHugh Dickins 	pgoff_t index;
256941ffe5d5SHugh Dickins 	unsigned long offset;
2570a0ee5ec5SHugh Dickins 	enum sgp_type sgp = SGP_READ;
2571f7c1d074SGeert Uytterhoeven 	int error = 0;
2572cb66a7a1SAl Viro 	ssize_t retval = 0;
25736e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2574a0ee5ec5SHugh Dickins 
2575a0ee5ec5SHugh Dickins 	/*
2576a0ee5ec5SHugh Dickins 	 * Might this read be for a stacking filesystem?  Then when reading
2577a0ee5ec5SHugh Dickins 	 * holes of a sparse file, we actually need to allocate those pages,
2578a0ee5ec5SHugh Dickins 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2579a0ee5ec5SHugh Dickins 	 */
2580777eda2cSAl Viro 	if (!iter_is_iovec(to))
258175edd345SHugh Dickins 		sgp = SGP_CACHE;
25821da177e4SLinus Torvalds 
258309cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
258409cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
25851da177e4SLinus Torvalds 
25861da177e4SLinus Torvalds 	for (;;) {
25871da177e4SLinus Torvalds 		struct page *page = NULL;
258841ffe5d5SHugh Dickins 		pgoff_t end_index;
258941ffe5d5SHugh Dickins 		unsigned long nr, ret;
25901da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
25911da177e4SLinus Torvalds 
259209cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25931da177e4SLinus Torvalds 		if (index > end_index)
25941da177e4SLinus Torvalds 			break;
25951da177e4SLinus Torvalds 		if (index == end_index) {
259609cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25971da177e4SLinus Torvalds 			if (nr <= offset)
25981da177e4SLinus Torvalds 				break;
25991da177e4SLinus Torvalds 		}
26001da177e4SLinus Torvalds 
26019e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, index, &page, sgp);
26026e58e79dSAl Viro 		if (error) {
26036e58e79dSAl Viro 			if (error == -EINVAL)
26046e58e79dSAl Viro 				error = 0;
26051da177e4SLinus Torvalds 			break;
26061da177e4SLinus Torvalds 		}
260775edd345SHugh Dickins 		if (page) {
260875edd345SHugh Dickins 			if (sgp == SGP_CACHE)
260975edd345SHugh Dickins 				set_page_dirty(page);
2610d3602444SHugh Dickins 			unlock_page(page);
261175edd345SHugh Dickins 		}
26121da177e4SLinus Torvalds 
26131da177e4SLinus Torvalds 		/*
26141da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
26151b1dcc1bSJes Sorensen 		 * are called without i_mutex protection against truncate
26161da177e4SLinus Torvalds 		 */
261709cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
26181da177e4SLinus Torvalds 		i_size = i_size_read(inode);
261909cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
26201da177e4SLinus Torvalds 		if (index == end_index) {
262109cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
26221da177e4SLinus Torvalds 			if (nr <= offset) {
26231da177e4SLinus Torvalds 				if (page)
262409cbfeafSKirill A. Shutemov 					put_page(page);
26251da177e4SLinus Torvalds 				break;
26261da177e4SLinus Torvalds 			}
26271da177e4SLinus Torvalds 		}
26281da177e4SLinus Torvalds 		nr -= offset;
26291da177e4SLinus Torvalds 
26301da177e4SLinus Torvalds 		if (page) {
26311da177e4SLinus Torvalds 			/*
26321da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
26331da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
26341da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
26351da177e4SLinus Torvalds 			 */
26361da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
26371da177e4SLinus Torvalds 				flush_dcache_page(page);
26381da177e4SLinus Torvalds 			/*
26391da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
26401da177e4SLinus Torvalds 			 */
26411da177e4SLinus Torvalds 			if (!offset)
26421da177e4SLinus Torvalds 				mark_page_accessed(page);
2643b5810039SNick Piggin 		} else {
26441da177e4SLinus Torvalds 			page = ZERO_PAGE(0);
264509cbfeafSKirill A. Shutemov 			get_page(page);
2646b5810039SNick Piggin 		}
26471da177e4SLinus Torvalds 
26481da177e4SLinus Torvalds 		/*
26491da177e4SLinus Torvalds 		 * Ok, we have the page, and it's up-to-date, so
26501da177e4SLinus Torvalds 		 * now we can copy it to user space...
26511da177e4SLinus Torvalds 		 */
26522ba5bbedSAl Viro 		ret = copy_page_to_iter(page, offset, nr, to);
26536e58e79dSAl Viro 		retval += ret;
26541da177e4SLinus Torvalds 		offset += ret;
265509cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
265609cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
26571da177e4SLinus Torvalds 
265809cbfeafSKirill A. Shutemov 		put_page(page);
26592ba5bbedSAl Viro 		if (!iov_iter_count(to))
26601da177e4SLinus Torvalds 			break;
26616e58e79dSAl Viro 		if (ret < nr) {
26626e58e79dSAl Viro 			error = -EFAULT;
26636e58e79dSAl Viro 			break;
26646e58e79dSAl Viro 		}
26651da177e4SLinus Torvalds 		cond_resched();
26661da177e4SLinus Torvalds 	}
26671da177e4SLinus Torvalds 
266809cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
26696e58e79dSAl Viro 	file_accessed(file);
26706e58e79dSAl Viro 	return retval ? retval : error;
26711da177e4SLinus Torvalds }
26721da177e4SLinus Torvalds 
2673220f2ac9SHugh Dickins /*
26747f4446eeSMatthew Wilcox  * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2675220f2ac9SHugh Dickins  */
2676220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2677965c8e59SAndrew Morton 				    pgoff_t index, pgoff_t end, int whence)
2678220f2ac9SHugh Dickins {
2679220f2ac9SHugh Dickins 	struct page *page;
2680220f2ac9SHugh Dickins 	struct pagevec pvec;
2681220f2ac9SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
2682220f2ac9SHugh Dickins 	bool done = false;
2683220f2ac9SHugh Dickins 	int i;
2684220f2ac9SHugh Dickins 
268586679820SMel Gorman 	pagevec_init(&pvec);
2686220f2ac9SHugh Dickins 	pvec.nr = 1;		/* start small: we may be there already */
2687220f2ac9SHugh Dickins 	while (!done) {
26880cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
2689220f2ac9SHugh Dickins 					pvec.nr, pvec.pages, indices);
2690220f2ac9SHugh Dickins 		if (!pvec.nr) {
2691965c8e59SAndrew Morton 			if (whence == SEEK_DATA)
2692220f2ac9SHugh Dickins 				index = end;
2693220f2ac9SHugh Dickins 			break;
2694220f2ac9SHugh Dickins 		}
2695220f2ac9SHugh Dickins 		for (i = 0; i < pvec.nr; i++, index++) {
2696220f2ac9SHugh Dickins 			if (index < indices[i]) {
2697965c8e59SAndrew Morton 				if (whence == SEEK_HOLE) {
2698220f2ac9SHugh Dickins 					done = true;
2699220f2ac9SHugh Dickins 					break;
2700220f2ac9SHugh Dickins 				}
2701220f2ac9SHugh Dickins 				index = indices[i];
2702220f2ac9SHugh Dickins 			}
2703220f2ac9SHugh Dickins 			page = pvec.pages[i];
27043159f943SMatthew Wilcox 			if (page && !xa_is_value(page)) {
2705220f2ac9SHugh Dickins 				if (!PageUptodate(page))
2706220f2ac9SHugh Dickins 					page = NULL;
2707220f2ac9SHugh Dickins 			}
2708220f2ac9SHugh Dickins 			if (index >= end ||
2709965c8e59SAndrew Morton 			    (page && whence == SEEK_DATA) ||
2710965c8e59SAndrew Morton 			    (!page && whence == SEEK_HOLE)) {
2711220f2ac9SHugh Dickins 				done = true;
2712220f2ac9SHugh Dickins 				break;
2713220f2ac9SHugh Dickins 			}
2714220f2ac9SHugh Dickins 		}
27150cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
2716220f2ac9SHugh Dickins 		pagevec_release(&pvec);
2717220f2ac9SHugh Dickins 		pvec.nr = PAGEVEC_SIZE;
2718220f2ac9SHugh Dickins 		cond_resched();
2719220f2ac9SHugh Dickins 	}
2720220f2ac9SHugh Dickins 	return index;
2721220f2ac9SHugh Dickins }
2722220f2ac9SHugh Dickins 
2723965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2724220f2ac9SHugh Dickins {
2725220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2726220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2727220f2ac9SHugh Dickins 	pgoff_t start, end;
2728220f2ac9SHugh Dickins 	loff_t new_offset;
2729220f2ac9SHugh Dickins 
2730965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2731965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2732220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
27335955102cSAl Viro 	inode_lock(inode);
2734220f2ac9SHugh Dickins 	/* We're holding i_mutex so we can access i_size directly */
2735220f2ac9SHugh Dickins 
27361a413646SYufen Yu 	if (offset < 0 || offset >= inode->i_size)
2737220f2ac9SHugh Dickins 		offset = -ENXIO;
2738220f2ac9SHugh Dickins 	else {
273909cbfeafSKirill A. Shutemov 		start = offset >> PAGE_SHIFT;
274009cbfeafSKirill A. Shutemov 		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2741965c8e59SAndrew Morton 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
274209cbfeafSKirill A. Shutemov 		new_offset <<= PAGE_SHIFT;
2743220f2ac9SHugh Dickins 		if (new_offset > offset) {
2744220f2ac9SHugh Dickins 			if (new_offset < inode->i_size)
2745220f2ac9SHugh Dickins 				offset = new_offset;
2746965c8e59SAndrew Morton 			else if (whence == SEEK_DATA)
2747220f2ac9SHugh Dickins 				offset = -ENXIO;
2748220f2ac9SHugh Dickins 			else
2749220f2ac9SHugh Dickins 				offset = inode->i_size;
2750220f2ac9SHugh Dickins 		}
2751220f2ac9SHugh Dickins 	}
2752220f2ac9SHugh Dickins 
2753387aae6fSHugh Dickins 	if (offset >= 0)
275446a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
27555955102cSAl Viro 	inode_unlock(inode);
2756220f2ac9SHugh Dickins 	return offset;
2757220f2ac9SHugh Dickins }
2758220f2ac9SHugh Dickins 
275983e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
276083e4fa9cSHugh Dickins 							 loff_t len)
276183e4fa9cSHugh Dickins {
2762496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2763e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
276440e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
27651aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2766e2d12e22SHugh Dickins 	pgoff_t start, index, end;
2767e2d12e22SHugh Dickins 	int error;
276883e4fa9cSHugh Dickins 
276913ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
277013ace4d0SHugh Dickins 		return -EOPNOTSUPP;
277113ace4d0SHugh Dickins 
27725955102cSAl Viro 	inode_lock(inode);
277383e4fa9cSHugh Dickins 
277483e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
277583e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
277683e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
277783e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
27788e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
277983e4fa9cSHugh Dickins 
278040e041a2SDavid Herrmann 		/* protected by i_mutex */
2781ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
278240e041a2SDavid Herrmann 			error = -EPERM;
278340e041a2SDavid Herrmann 			goto out;
278440e041a2SDavid Herrmann 		}
278540e041a2SDavid Herrmann 
27868e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2787aa71ecd8SChen Jun 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2788f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2789f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2790f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2791f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2792f00cdc6dSHugh Dickins 
279383e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
279483e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
279583e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
279683e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
279783e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
27988e205f77SHugh Dickins 
27998e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
28008e205f77SHugh Dickins 		inode->i_private = NULL;
28018e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
28022055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
28038e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
280483e4fa9cSHugh Dickins 		error = 0;
28058e205f77SHugh Dickins 		goto out;
280683e4fa9cSHugh Dickins 	}
280783e4fa9cSHugh Dickins 
2808e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2809e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2810e2d12e22SHugh Dickins 	if (error)
2811e2d12e22SHugh Dickins 		goto out;
2812e2d12e22SHugh Dickins 
281340e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
281440e041a2SDavid Herrmann 		error = -EPERM;
281540e041a2SDavid Herrmann 		goto out;
281640e041a2SDavid Herrmann 	}
281740e041a2SDavid Herrmann 
281809cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
281909cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2820e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2821e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2822e2d12e22SHugh Dickins 		error = -ENOSPC;
2823e2d12e22SHugh Dickins 		goto out;
2824e2d12e22SHugh Dickins 	}
2825e2d12e22SHugh Dickins 
28268e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
28271aac1400SHugh Dickins 	shmem_falloc.start = start;
28281aac1400SHugh Dickins 	shmem_falloc.next  = start;
28291aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
28301aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
28311aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
28321aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
28331aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
28341aac1400SHugh Dickins 
2835e2d12e22SHugh Dickins 	for (index = start; index < end; index++) {
2836e2d12e22SHugh Dickins 		struct page *page;
2837e2d12e22SHugh Dickins 
2838e2d12e22SHugh Dickins 		/*
2839e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2840e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2841e2d12e22SHugh Dickins 		 */
2842e2d12e22SHugh Dickins 		if (signal_pending(current))
2843e2d12e22SHugh Dickins 			error = -EINTR;
28441aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
28451aac1400SHugh Dickins 			error = -ENOMEM;
2846e2d12e22SHugh Dickins 		else
28479e18eb29SAndres Lagar-Cavilla 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2848e2d12e22SHugh Dickins 		if (error) {
28491635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
28507f556567SHugh Dickins 			if (index > start) {
28511635f6a7SHugh Dickins 				shmem_undo_range(inode,
285209cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2853b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
28547f556567SHugh Dickins 			}
28551aac1400SHugh Dickins 			goto undone;
2856e2d12e22SHugh Dickins 		}
2857e2d12e22SHugh Dickins 
2858e2d12e22SHugh Dickins 		/*
28591aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
28601aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
28611aac1400SHugh Dickins 		 */
28621aac1400SHugh Dickins 		shmem_falloc.next++;
28631aac1400SHugh Dickins 		if (!PageUptodate(page))
28641aac1400SHugh Dickins 			shmem_falloc.nr_falloced++;
28651aac1400SHugh Dickins 
28661aac1400SHugh Dickins 		/*
28671635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
28681635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
28691635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
2870e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
2871e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2872e2d12e22SHugh Dickins 		 */
2873e2d12e22SHugh Dickins 		set_page_dirty(page);
2874e2d12e22SHugh Dickins 		unlock_page(page);
287509cbfeafSKirill A. Shutemov 		put_page(page);
2876e2d12e22SHugh Dickins 		cond_resched();
2877e2d12e22SHugh Dickins 	}
2878e2d12e22SHugh Dickins 
2879e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2880e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
2881078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
28821aac1400SHugh Dickins undone:
28831aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
28841aac1400SHugh Dickins 	inode->i_private = NULL;
28851aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2886e2d12e22SHugh Dickins out:
28875955102cSAl Viro 	inode_unlock(inode);
288883e4fa9cSHugh Dickins 	return error;
288983e4fa9cSHugh Dickins }
289083e4fa9cSHugh Dickins 
2891726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
28921da177e4SLinus Torvalds {
2893726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
28941da177e4SLinus Torvalds 
28951da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
289609cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
28971da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
28980edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
28991da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
290041ffe5d5SHugh Dickins 		buf->f_bavail =
290141ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
290241ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
29030edd73b3SHugh Dickins 	}
29040edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
29051da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
29061da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
29071da177e4SLinus Torvalds 	}
29081da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
29091da177e4SLinus Torvalds 	return 0;
29101da177e4SLinus Torvalds }
29111da177e4SLinus Torvalds 
29121da177e4SLinus Torvalds /*
29131da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
29141da177e4SLinus Torvalds  */
29151da177e4SLinus Torvalds static int
29161a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
29171da177e4SLinus Torvalds {
29180b0a0806SHugh Dickins 	struct inode *inode;
29191da177e4SLinus Torvalds 	int error = -ENOSPC;
29201da177e4SLinus Torvalds 
2921454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
29221da177e4SLinus Torvalds 	if (inode) {
2923feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2924feda821eSChristoph Hellwig 		if (error)
2925feda821eSChristoph Hellwig 			goto out_iput;
29262a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
29279d8f13baSMimi Zohar 						     &dentry->d_name,
29286d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2929feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2930feda821eSChristoph Hellwig 			goto out_iput;
293137ec43cdSMimi Zohar 
2932718deb6bSAl Viro 		error = 0;
29331da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2934078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
29351da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
29361da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
29371da177e4SLinus Torvalds 	}
29381da177e4SLinus Torvalds 	return error;
2939feda821eSChristoph Hellwig out_iput:
2940feda821eSChristoph Hellwig 	iput(inode);
2941feda821eSChristoph Hellwig 	return error;
29421da177e4SLinus Torvalds }
29431da177e4SLinus Torvalds 
294460545d0dSAl Viro static int
294560545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
294660545d0dSAl Viro {
294760545d0dSAl Viro 	struct inode *inode;
294860545d0dSAl Viro 	int error = -ENOSPC;
294960545d0dSAl Viro 
295060545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
295160545d0dSAl Viro 	if (inode) {
295260545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
295360545d0dSAl Viro 						     NULL,
295460545d0dSAl Viro 						     shmem_initxattrs, NULL);
2955feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2956feda821eSChristoph Hellwig 			goto out_iput;
2957feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2958feda821eSChristoph Hellwig 		if (error)
2959feda821eSChristoph Hellwig 			goto out_iput;
296060545d0dSAl Viro 		d_tmpfile(dentry, inode);
296160545d0dSAl Viro 	}
296260545d0dSAl Viro 	return error;
2963feda821eSChristoph Hellwig out_iput:
2964feda821eSChristoph Hellwig 	iput(inode);
2965feda821eSChristoph Hellwig 	return error;
296660545d0dSAl Viro }
296760545d0dSAl Viro 
296818bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
29691da177e4SLinus Torvalds {
29701da177e4SLinus Torvalds 	int error;
29711da177e4SLinus Torvalds 
29721da177e4SLinus Torvalds 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
29731da177e4SLinus Torvalds 		return error;
2974d8c76e6fSDave Hansen 	inc_nlink(dir);
29751da177e4SLinus Torvalds 	return 0;
29761da177e4SLinus Torvalds }
29771da177e4SLinus Torvalds 
29784acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2979ebfc3b49SAl Viro 		bool excl)
29801da177e4SLinus Torvalds {
29811da177e4SLinus Torvalds 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
29821da177e4SLinus Torvalds }
29831da177e4SLinus Torvalds 
29841da177e4SLinus Torvalds /*
29851da177e4SLinus Torvalds  * Link a file..
29861da177e4SLinus Torvalds  */
29871da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
29881da177e4SLinus Torvalds {
298975c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
299029b00e60SDarrick J. Wong 	int ret = 0;
29911da177e4SLinus Torvalds 
29921da177e4SLinus Torvalds 	/*
29931da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
29941da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
29951da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
29961062af92SDarrick J. Wong 	 * But if an O_TMPFILE file is linked into the tmpfs, the
29971062af92SDarrick J. Wong 	 * first link must skip that, to get the accounting right.
29981da177e4SLinus Torvalds 	 */
29991062af92SDarrick J. Wong 	if (inode->i_nlink) {
3000e809d5f0SChris Down 		ret = shmem_reserve_inode(inode->i_sb, NULL);
30015b04c689SPavel Emelyanov 		if (ret)
30025b04c689SPavel Emelyanov 			goto out;
30031062af92SDarrick J. Wong 	}
30041da177e4SLinus Torvalds 
30051da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3006078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3007d8c76e6fSDave Hansen 	inc_nlink(inode);
30087de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
30091da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
30101da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
30115b04c689SPavel Emelyanov out:
30125b04c689SPavel Emelyanov 	return ret;
30131da177e4SLinus Torvalds }
30141da177e4SLinus Torvalds 
30151da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
30161da177e4SLinus Torvalds {
301775c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
30181da177e4SLinus Torvalds 
30195b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
30205b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
30211da177e4SLinus Torvalds 
30221da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
3023078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
30249a53c3a7SDave Hansen 	drop_nlink(inode);
30251da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
30261da177e4SLinus Torvalds 	return 0;
30271da177e4SLinus Torvalds }
30281da177e4SLinus Torvalds 
30291da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
30301da177e4SLinus Torvalds {
30311da177e4SLinus Torvalds 	if (!simple_empty(dentry))
30321da177e4SLinus Torvalds 		return -ENOTEMPTY;
30331da177e4SLinus Torvalds 
303475c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
30359a53c3a7SDave Hansen 	drop_nlink(dir);
30361da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
30371da177e4SLinus Torvalds }
30381da177e4SLinus Torvalds 
303937456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
304037456771SMiklos Szeredi {
3041e36cb0b8SDavid Howells 	bool old_is_dir = d_is_dir(old_dentry);
3042e36cb0b8SDavid Howells 	bool new_is_dir = d_is_dir(new_dentry);
304337456771SMiklos Szeredi 
304437456771SMiklos Szeredi 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
304537456771SMiklos Szeredi 		if (old_is_dir) {
304637456771SMiklos Szeredi 			drop_nlink(old_dir);
304737456771SMiklos Szeredi 			inc_nlink(new_dir);
304837456771SMiklos Szeredi 		} else {
304937456771SMiklos Szeredi 			drop_nlink(new_dir);
305037456771SMiklos Szeredi 			inc_nlink(old_dir);
305137456771SMiklos Szeredi 		}
305237456771SMiklos Szeredi 	}
305337456771SMiklos Szeredi 	old_dir->i_ctime = old_dir->i_mtime =
305437456771SMiklos Szeredi 	new_dir->i_ctime = new_dir->i_mtime =
305575c3cfa8SDavid Howells 	d_inode(old_dentry)->i_ctime =
3056078cd827SDeepa Dinamani 	d_inode(new_dentry)->i_ctime = current_time(old_dir);
305737456771SMiklos Szeredi 
305837456771SMiklos Szeredi 	return 0;
305937456771SMiklos Szeredi }
306037456771SMiklos Szeredi 
306146fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
306246fdb794SMiklos Szeredi {
306346fdb794SMiklos Szeredi 	struct dentry *whiteout;
306446fdb794SMiklos Szeredi 	int error;
306546fdb794SMiklos Szeredi 
306646fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
306746fdb794SMiklos Szeredi 	if (!whiteout)
306846fdb794SMiklos Szeredi 		return -ENOMEM;
306946fdb794SMiklos Szeredi 
307046fdb794SMiklos Szeredi 	error = shmem_mknod(old_dir, whiteout,
307146fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
307246fdb794SMiklos Szeredi 	dput(whiteout);
307346fdb794SMiklos Szeredi 	if (error)
307446fdb794SMiklos Szeredi 		return error;
307546fdb794SMiklos Szeredi 
307646fdb794SMiklos Szeredi 	/*
307746fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
307846fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
307946fdb794SMiklos Szeredi 	 *
308046fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
308146fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
308246fdb794SMiklos Szeredi 	 */
308346fdb794SMiklos Szeredi 	d_rehash(whiteout);
308446fdb794SMiklos Szeredi 	return 0;
308546fdb794SMiklos Szeredi }
308646fdb794SMiklos Szeredi 
30871da177e4SLinus Torvalds /*
30881da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
30891da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
30901da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
30911da177e4SLinus Torvalds  * gets overwritten.
30921da177e4SLinus Torvalds  */
30933b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
30941da177e4SLinus Torvalds {
309575c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
30961da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
30971da177e4SLinus Torvalds 
309846fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
30993b69ff51SMiklos Szeredi 		return -EINVAL;
31003b69ff51SMiklos Szeredi 
310137456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
310237456771SMiklos Szeredi 		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
310337456771SMiklos Szeredi 
31041da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
31051da177e4SLinus Torvalds 		return -ENOTEMPTY;
31061da177e4SLinus Torvalds 
310746fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
310846fdb794SMiklos Szeredi 		int error;
310946fdb794SMiklos Szeredi 
311046fdb794SMiklos Szeredi 		error = shmem_whiteout(old_dir, old_dentry);
311146fdb794SMiklos Szeredi 		if (error)
311246fdb794SMiklos Szeredi 			return error;
311346fdb794SMiklos Szeredi 	}
311446fdb794SMiklos Szeredi 
311575c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
31161da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3117b928095bSMiklos Szeredi 		if (they_are_dirs) {
311875c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
31199a53c3a7SDave Hansen 			drop_nlink(old_dir);
3120b928095bSMiklos Szeredi 		}
31211da177e4SLinus Torvalds 	} else if (they_are_dirs) {
31229a53c3a7SDave Hansen 		drop_nlink(old_dir);
3123d8c76e6fSDave Hansen 		inc_nlink(new_dir);
31241da177e4SLinus Torvalds 	}
31251da177e4SLinus Torvalds 
31261da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
31271da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
31281da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
31291da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3130078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
31311da177e4SLinus Torvalds 	return 0;
31321da177e4SLinus Torvalds }
31331da177e4SLinus Torvalds 
31341da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
31351da177e4SLinus Torvalds {
31361da177e4SLinus Torvalds 	int error;
31371da177e4SLinus Torvalds 	int len;
31381da177e4SLinus Torvalds 	struct inode *inode;
31399276aad6SHugh Dickins 	struct page *page;
31401da177e4SLinus Torvalds 
31411da177e4SLinus Torvalds 	len = strlen(symname) + 1;
314209cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
31431da177e4SLinus Torvalds 		return -ENAMETOOLONG;
31441da177e4SLinus Torvalds 
31450825a6f9SJoe Perches 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
31460825a6f9SJoe Perches 				VM_NORESERVE);
31471da177e4SLinus Torvalds 	if (!inode)
31481da177e4SLinus Torvalds 		return -ENOSPC;
31491da177e4SLinus Torvalds 
31509d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
31516d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3152343c3d7fSMateusz Nosek 	if (error && error != -EOPNOTSUPP) {
3153570bc1c2SStephen Smalley 		iput(inode);
3154570bc1c2SStephen Smalley 		return error;
3155570bc1c2SStephen Smalley 	}
3156570bc1c2SStephen Smalley 
31571da177e4SLinus Torvalds 	inode->i_size = len-1;
315869f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
31593ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
31603ed47db3SAl Viro 		if (!inode->i_link) {
316169f07ec9SHugh Dickins 			iput(inode);
316269f07ec9SHugh Dickins 			return -ENOMEM;
316369f07ec9SHugh Dickins 		}
316469f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
31651da177e4SLinus Torvalds 	} else {
3166e8ecde25SAl Viro 		inode_nohighmem(inode);
31679e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
31681da177e4SLinus Torvalds 		if (error) {
31691da177e4SLinus Torvalds 			iput(inode);
31701da177e4SLinus Torvalds 			return error;
31711da177e4SLinus Torvalds 		}
317214fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
31731da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
317421fc61c7SAl Viro 		memcpy(page_address(page), symname, len);
3175ec9516fbSHugh Dickins 		SetPageUptodate(page);
31761da177e4SLinus Torvalds 		set_page_dirty(page);
31776746aff7SWu Fengguang 		unlock_page(page);
317809cbfeafSKirill A. Shutemov 		put_page(page);
31791da177e4SLinus Torvalds 	}
31801da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3181078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
31821da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
31831da177e4SLinus Torvalds 	dget(dentry);
31841da177e4SLinus Torvalds 	return 0;
31851da177e4SLinus Torvalds }
31861da177e4SLinus Torvalds 
3187fceef393SAl Viro static void shmem_put_link(void *arg)
3188fceef393SAl Viro {
3189fceef393SAl Viro 	mark_page_accessed(arg);
3190fceef393SAl Viro 	put_page(arg);
3191fceef393SAl Viro }
3192fceef393SAl Viro 
31936b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3194fceef393SAl Viro 				  struct inode *inode,
3195fceef393SAl Viro 				  struct delayed_call *done)
31961da177e4SLinus Torvalds {
31971da177e4SLinus Torvalds 	struct page *page = NULL;
31986b255391SAl Viro 	int error;
31996a6c9904SAl Viro 	if (!dentry) {
32006a6c9904SAl Viro 		page = find_get_page(inode->i_mapping, 0);
32016a6c9904SAl Viro 		if (!page)
32026b255391SAl Viro 			return ERR_PTR(-ECHILD);
32036a6c9904SAl Viro 		if (!PageUptodate(page)) {
32046a6c9904SAl Viro 			put_page(page);
32056a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
32066a6c9904SAl Viro 		}
32076a6c9904SAl Viro 	} else {
32089e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3209680baacbSAl Viro 		if (error)
3210680baacbSAl Viro 			return ERR_PTR(error);
3211d3602444SHugh Dickins 		unlock_page(page);
32121da177e4SLinus Torvalds 	}
3213fceef393SAl Viro 	set_delayed_call(done, shmem_put_link, page);
321421fc61c7SAl Viro 	return page_address(page);
32151da177e4SLinus Torvalds }
32161da177e4SLinus Torvalds 
3217b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3218b09e0fa4SEric Paris /*
3219b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3220b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3221b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3222b09e0fa4SEric Paris  * filesystem level, though.
3223b09e0fa4SEric Paris  */
3224b09e0fa4SEric Paris 
32256d9d88d0SJarkko Sakkinen /*
32266d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
32276d9d88d0SJarkko Sakkinen  */
32286d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
32296d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
32306d9d88d0SJarkko Sakkinen 			    void *fs_info)
32316d9d88d0SJarkko Sakkinen {
32326d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
32336d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
323438f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
32356d9d88d0SJarkko Sakkinen 	size_t len;
32366d9d88d0SJarkko Sakkinen 
32376d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
323838f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
32396d9d88d0SJarkko Sakkinen 		if (!new_xattr)
32406d9d88d0SJarkko Sakkinen 			return -ENOMEM;
32416d9d88d0SJarkko Sakkinen 
32426d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
32436d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
32446d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
32456d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
32463bef735aSChengguang Xu 			kvfree(new_xattr);
32476d9d88d0SJarkko Sakkinen 			return -ENOMEM;
32486d9d88d0SJarkko Sakkinen 		}
32496d9d88d0SJarkko Sakkinen 
32506d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
32516d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
32526d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
32536d9d88d0SJarkko Sakkinen 		       xattr->name, len);
32546d9d88d0SJarkko Sakkinen 
325538f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
32566d9d88d0SJarkko Sakkinen 	}
32576d9d88d0SJarkko Sakkinen 
32586d9d88d0SJarkko Sakkinen 	return 0;
32596d9d88d0SJarkko Sakkinen }
32606d9d88d0SJarkko Sakkinen 
3261aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3262b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3263b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3264aa7c5241SAndreas Gruenbacher {
3265b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3266aa7c5241SAndreas Gruenbacher 
3267aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3268aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3269aa7c5241SAndreas Gruenbacher }
3270aa7c5241SAndreas Gruenbacher 
3271aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
327259301226SAl Viro 				   struct dentry *unused, struct inode *inode,
327359301226SAl Viro 				   const char *name, const void *value,
327459301226SAl Viro 				   size_t size, int flags)
3275aa7c5241SAndreas Gruenbacher {
327659301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3277aa7c5241SAndreas Gruenbacher 
3278aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3279a46a2295SDaniel Xu 	return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3280aa7c5241SAndreas Gruenbacher }
3281aa7c5241SAndreas Gruenbacher 
3282aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3283aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3284aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3285aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3286aa7c5241SAndreas Gruenbacher };
3287aa7c5241SAndreas Gruenbacher 
3288aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3289aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3290aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3291aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3292aa7c5241SAndreas Gruenbacher };
3293aa7c5241SAndreas Gruenbacher 
3294b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3295b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3296feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3297feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3298b09e0fa4SEric Paris #endif
3299aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3300aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3301b09e0fa4SEric Paris 	NULL
3302b09e0fa4SEric Paris };
3303b09e0fa4SEric Paris 
3304b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3305b09e0fa4SEric Paris {
330675c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3307786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3308b09e0fa4SEric Paris }
3309b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3310b09e0fa4SEric Paris 
331169f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
33126b255391SAl Viro 	.get_link	= simple_get_link,
3313b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3314b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3315b09e0fa4SEric Paris #endif
33161da177e4SLinus Torvalds };
33171da177e4SLinus Torvalds 
331892e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
33196b255391SAl Viro 	.get_link	= shmem_get_link,
3320b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3321b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
332239f0247dSAndreas Gruenbacher #endif
3323b09e0fa4SEric Paris };
332439f0247dSAndreas Gruenbacher 
332591828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
332691828a40SDavid M. Grimes {
332791828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
332891828a40SDavid M. Grimes }
332991828a40SDavid M. Grimes 
333091828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
333191828a40SDavid M. Grimes {
333291828a40SDavid M. Grimes 	__u32 *fh = vfh;
333391828a40SDavid M. Grimes 	__u64 inum = fh[2];
333491828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
333591828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
333691828a40SDavid M. Grimes }
333791828a40SDavid M. Grimes 
333812ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
333912ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
334012ba780dSAmir Goldstein {
334112ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
334212ba780dSAmir Goldstein 
334312ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
334412ba780dSAmir Goldstein }
334512ba780dSAmir Goldstein 
334612ba780dSAmir Goldstein 
3347480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3348480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
334991828a40SDavid M. Grimes {
335091828a40SDavid M. Grimes 	struct inode *inode;
3351480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
335235c2a7f4SHugh Dickins 	u64 inum;
335391828a40SDavid M. Grimes 
3354480b116cSChristoph Hellwig 	if (fh_len < 3)
3355480b116cSChristoph Hellwig 		return NULL;
3356480b116cSChristoph Hellwig 
335735c2a7f4SHugh Dickins 	inum = fid->raw[2];
335835c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
335935c2a7f4SHugh Dickins 
3360480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3361480b116cSChristoph Hellwig 			shmem_match, fid->raw);
336291828a40SDavid M. Grimes 	if (inode) {
336312ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
336491828a40SDavid M. Grimes 		iput(inode);
336591828a40SDavid M. Grimes 	}
336691828a40SDavid M. Grimes 
3367480b116cSChristoph Hellwig 	return dentry;
336891828a40SDavid M. Grimes }
336991828a40SDavid M. Grimes 
3370b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3371b0b0382bSAl Viro 				struct inode *parent)
337291828a40SDavid M. Grimes {
33735fe0c237SAneesh Kumar K.V 	if (*len < 3) {
33745fe0c237SAneesh Kumar K.V 		*len = 3;
337594e07a75SNamjae Jeon 		return FILEID_INVALID;
33765fe0c237SAneesh Kumar K.V 	}
337791828a40SDavid M. Grimes 
33781d3382cbSAl Viro 	if (inode_unhashed(inode)) {
337991828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
338091828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
338191828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
338291828a40SDavid M. Grimes 		 * to do it once
338391828a40SDavid M. Grimes 		 */
338491828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
338591828a40SDavid M. Grimes 		spin_lock(&lock);
33861d3382cbSAl Viro 		if (inode_unhashed(inode))
338791828a40SDavid M. Grimes 			__insert_inode_hash(inode,
338891828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
338991828a40SDavid M. Grimes 		spin_unlock(&lock);
339091828a40SDavid M. Grimes 	}
339191828a40SDavid M. Grimes 
339291828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
339391828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
339491828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
339591828a40SDavid M. Grimes 
339691828a40SDavid M. Grimes 	*len = 3;
339791828a40SDavid M. Grimes 	return 1;
339891828a40SDavid M. Grimes }
339991828a40SDavid M. Grimes 
340039655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
340191828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
340291828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3403480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
340491828a40SDavid M. Grimes };
340591828a40SDavid M. Grimes 
3406626c3920SAl Viro enum shmem_param {
3407626c3920SAl Viro 	Opt_gid,
3408626c3920SAl Viro 	Opt_huge,
3409626c3920SAl Viro 	Opt_mode,
3410626c3920SAl Viro 	Opt_mpol,
3411626c3920SAl Viro 	Opt_nr_blocks,
3412626c3920SAl Viro 	Opt_nr_inodes,
3413626c3920SAl Viro 	Opt_size,
3414626c3920SAl Viro 	Opt_uid,
3415ea3271f7SChris Down 	Opt_inode32,
3416ea3271f7SChris Down 	Opt_inode64,
3417626c3920SAl Viro };
34181da177e4SLinus Torvalds 
34195eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = {
34202710c957SAl Viro 	{"never",	SHMEM_HUGE_NEVER },
34212710c957SAl Viro 	{"always",	SHMEM_HUGE_ALWAYS },
34222710c957SAl Viro 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
34232710c957SAl Viro 	{"advise",	SHMEM_HUGE_ADVISE },
34242710c957SAl Viro 	{}
34252710c957SAl Viro };
34262710c957SAl Viro 
3427d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = {
3428626c3920SAl Viro 	fsparam_u32   ("gid",		Opt_gid),
34292710c957SAl Viro 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3430626c3920SAl Viro 	fsparam_u32oct("mode",		Opt_mode),
3431626c3920SAl Viro 	fsparam_string("mpol",		Opt_mpol),
3432626c3920SAl Viro 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3433626c3920SAl Viro 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3434626c3920SAl Viro 	fsparam_string("size",		Opt_size),
3435626c3920SAl Viro 	fsparam_u32   ("uid",		Opt_uid),
3436ea3271f7SChris Down 	fsparam_flag  ("inode32",	Opt_inode32),
3437ea3271f7SChris Down 	fsparam_flag  ("inode64",	Opt_inode64),
3438626c3920SAl Viro 	{}
3439626c3920SAl Viro };
3440626c3920SAl Viro 
3441f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3442626c3920SAl Viro {
3443f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3444626c3920SAl Viro 	struct fs_parse_result result;
3445e04dc423SAl Viro 	unsigned long long size;
3446626c3920SAl Viro 	char *rest;
3447626c3920SAl Viro 	int opt;
3448626c3920SAl Viro 
3449d7167b14SAl Viro 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3450f3235626SDavid Howells 	if (opt < 0)
3451626c3920SAl Viro 		return opt;
3452626c3920SAl Viro 
3453626c3920SAl Viro 	switch (opt) {
3454626c3920SAl Viro 	case Opt_size:
3455626c3920SAl Viro 		size = memparse(param->string, &rest);
3456e04dc423SAl Viro 		if (*rest == '%') {
3457e04dc423SAl Viro 			size <<= PAGE_SHIFT;
3458e04dc423SAl Viro 			size *= totalram_pages();
3459e04dc423SAl Viro 			do_div(size, 100);
3460e04dc423SAl Viro 			rest++;
3461e04dc423SAl Viro 		}
3462e04dc423SAl Viro 		if (*rest)
3463626c3920SAl Viro 			goto bad_value;
3464e04dc423SAl Viro 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3465e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3466626c3920SAl Viro 		break;
3467626c3920SAl Viro 	case Opt_nr_blocks:
3468626c3920SAl Viro 		ctx->blocks = memparse(param->string, &rest);
3469e04dc423SAl Viro 		if (*rest)
3470626c3920SAl Viro 			goto bad_value;
3471e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3472626c3920SAl Viro 		break;
3473626c3920SAl Viro 	case Opt_nr_inodes:
3474626c3920SAl Viro 		ctx->inodes = memparse(param->string, &rest);
3475e04dc423SAl Viro 		if (*rest)
3476626c3920SAl Viro 			goto bad_value;
3477e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_INODES;
3478626c3920SAl Viro 		break;
3479626c3920SAl Viro 	case Opt_mode:
3480626c3920SAl Viro 		ctx->mode = result.uint_32 & 07777;
3481626c3920SAl Viro 		break;
3482626c3920SAl Viro 	case Opt_uid:
3483626c3920SAl Viro 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3484e04dc423SAl Viro 		if (!uid_valid(ctx->uid))
3485626c3920SAl Viro 			goto bad_value;
3486626c3920SAl Viro 		break;
3487626c3920SAl Viro 	case Opt_gid:
3488626c3920SAl Viro 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3489e04dc423SAl Viro 		if (!gid_valid(ctx->gid))
3490626c3920SAl Viro 			goto bad_value;
3491626c3920SAl Viro 		break;
3492626c3920SAl Viro 	case Opt_huge:
3493626c3920SAl Viro 		ctx->huge = result.uint_32;
3494626c3920SAl Viro 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3495396bcc52SMatthew Wilcox (Oracle) 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3496626c3920SAl Viro 		      has_transparent_hugepage()))
3497626c3920SAl Viro 			goto unsupported_parameter;
3498e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_HUGE;
3499626c3920SAl Viro 		break;
3500626c3920SAl Viro 	case Opt_mpol:
3501626c3920SAl Viro 		if (IS_ENABLED(CONFIG_NUMA)) {
3502e04dc423SAl Viro 			mpol_put(ctx->mpol);
3503e04dc423SAl Viro 			ctx->mpol = NULL;
3504626c3920SAl Viro 			if (mpol_parse_str(param->string, &ctx->mpol))
3505626c3920SAl Viro 				goto bad_value;
3506626c3920SAl Viro 			break;
3507626c3920SAl Viro 		}
3508626c3920SAl Viro 		goto unsupported_parameter;
3509ea3271f7SChris Down 	case Opt_inode32:
3510ea3271f7SChris Down 		ctx->full_inums = false;
3511ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3512ea3271f7SChris Down 		break;
3513ea3271f7SChris Down 	case Opt_inode64:
3514ea3271f7SChris Down 		if (sizeof(ino_t) < 8) {
3515ea3271f7SChris Down 			return invalfc(fc,
3516ea3271f7SChris Down 				       "Cannot use inode64 with <64bit inums in kernel\n");
3517ea3271f7SChris Down 		}
3518ea3271f7SChris Down 		ctx->full_inums = true;
3519ea3271f7SChris Down 		ctx->seen |= SHMEM_SEEN_INUMS;
3520ea3271f7SChris Down 		break;
3521e04dc423SAl Viro 	}
3522e04dc423SAl Viro 	return 0;
3523e04dc423SAl Viro 
3524626c3920SAl Viro unsupported_parameter:
3525f35aa2bcSAl Viro 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3526626c3920SAl Viro bad_value:
3527f35aa2bcSAl Viro 	return invalfc(fc, "Bad value for '%s'", param->key);
3528e04dc423SAl Viro }
3529e04dc423SAl Viro 
3530f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data)
3531e04dc423SAl Viro {
3532f3235626SDavid Howells 	char *options = data;
3533f3235626SDavid Howells 
353433f37c64SAl Viro 	if (options) {
353533f37c64SAl Viro 		int err = security_sb_eat_lsm_opts(options, &fc->security);
353633f37c64SAl Viro 		if (err)
353733f37c64SAl Viro 			return err;
353833f37c64SAl Viro 	}
353933f37c64SAl Viro 
3540b00dc3adSHugh Dickins 	while (options != NULL) {
3541626c3920SAl Viro 		char *this_char = options;
3542b00dc3adSHugh Dickins 		for (;;) {
3543b00dc3adSHugh Dickins 			/*
3544b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3545b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3546b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3547b00dc3adSHugh Dickins 			 */
3548b00dc3adSHugh Dickins 			options = strchr(options, ',');
3549b00dc3adSHugh Dickins 			if (options == NULL)
3550b00dc3adSHugh Dickins 				break;
3551b00dc3adSHugh Dickins 			options++;
3552b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3553b00dc3adSHugh Dickins 				options[-1] = '\0';
3554b00dc3adSHugh Dickins 				break;
3555b00dc3adSHugh Dickins 			}
3556b00dc3adSHugh Dickins 		}
3557626c3920SAl Viro 		if (*this_char) {
3558626c3920SAl Viro 			char *value = strchr(this_char,'=');
3559f3235626SDavid Howells 			size_t len = 0;
3560626c3920SAl Viro 			int err;
3561626c3920SAl Viro 
3562626c3920SAl Viro 			if (value) {
3563626c3920SAl Viro 				*value++ = '\0';
3564f3235626SDavid Howells 				len = strlen(value);
35651da177e4SLinus Torvalds 			}
3566f3235626SDavid Howells 			err = vfs_parse_fs_string(fc, this_char, value, len);
3567f3235626SDavid Howells 			if (err < 0)
3568f3235626SDavid Howells 				return err;
35691da177e4SLinus Torvalds 		}
3570626c3920SAl Viro 	}
35711da177e4SLinus Torvalds 	return 0;
35721da177e4SLinus Torvalds }
35731da177e4SLinus Torvalds 
3574f3235626SDavid Howells /*
3575f3235626SDavid Howells  * Reconfigure a shmem filesystem.
3576f3235626SDavid Howells  *
3577f3235626SDavid Howells  * Note that we disallow change from limited->unlimited blocks/inodes while any
3578f3235626SDavid Howells  * are in use; but we must separately disallow unlimited->limited, because in
3579f3235626SDavid Howells  * that case we have no record of how much is already in use.
3580f3235626SDavid Howells  */
3581f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc)
35821da177e4SLinus Torvalds {
3583f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3584f3235626SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
35850edd73b3SHugh Dickins 	unsigned long inodes;
3586f3235626SDavid Howells 	const char *err;
35870edd73b3SHugh Dickins 
35880edd73b3SHugh Dickins 	spin_lock(&sbinfo->stat_lock);
35890edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3590f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3591f3235626SDavid Howells 		if (!sbinfo->max_blocks) {
3592f3235626SDavid Howells 			err = "Cannot retroactively limit size";
35930edd73b3SHugh Dickins 			goto out;
35940b5071ddSAl Viro 		}
3595f3235626SDavid Howells 		if (percpu_counter_compare(&sbinfo->used_blocks,
3596f3235626SDavid Howells 					   ctx->blocks) > 0) {
3597f3235626SDavid Howells 			err = "Too small a size for current use";
35980b5071ddSAl Viro 			goto out;
3599f3235626SDavid Howells 		}
3600f3235626SDavid Howells 	}
3601f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3602f3235626SDavid Howells 		if (!sbinfo->max_inodes) {
3603f3235626SDavid Howells 			err = "Cannot retroactively limit inodes";
36040b5071ddSAl Viro 			goto out;
36050b5071ddSAl Viro 		}
3606f3235626SDavid Howells 		if (ctx->inodes < inodes) {
3607f3235626SDavid Howells 			err = "Too few inodes for current use";
3608f3235626SDavid Howells 			goto out;
3609f3235626SDavid Howells 		}
3610f3235626SDavid Howells 	}
36110edd73b3SHugh Dickins 
3612ea3271f7SChris Down 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3613ea3271f7SChris Down 	    sbinfo->next_ino > UINT_MAX) {
3614ea3271f7SChris Down 		err = "Current inum too high to switch to 32-bit inums";
3615ea3271f7SChris Down 		goto out;
3616ea3271f7SChris Down 	}
3617ea3271f7SChris Down 
3618f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_HUGE)
3619f3235626SDavid Howells 		sbinfo->huge = ctx->huge;
3620ea3271f7SChris Down 	if (ctx->seen & SHMEM_SEEN_INUMS)
3621ea3271f7SChris Down 		sbinfo->full_inums = ctx->full_inums;
3622f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3623f3235626SDavid Howells 		sbinfo->max_blocks  = ctx->blocks;
3624f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_INODES) {
3625f3235626SDavid Howells 		sbinfo->max_inodes  = ctx->inodes;
3626f3235626SDavid Howells 		sbinfo->free_inodes = ctx->inodes - inodes;
36270b5071ddSAl Viro 	}
362871fe804bSLee Schermerhorn 
36295f00110fSGreg Thelen 	/*
36305f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
36315f00110fSGreg Thelen 	 */
3632f3235626SDavid Howells 	if (ctx->mpol) {
363371fe804bSLee Schermerhorn 		mpol_put(sbinfo->mpol);
3634f3235626SDavid Howells 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3635f3235626SDavid Howells 		ctx->mpol = NULL;
36365f00110fSGreg Thelen 	}
3637f3235626SDavid Howells 	spin_unlock(&sbinfo->stat_lock);
3638f3235626SDavid Howells 	return 0;
36390edd73b3SHugh Dickins out:
36400edd73b3SHugh Dickins 	spin_unlock(&sbinfo->stat_lock);
3641f35aa2bcSAl Viro 	return invalfc(fc, "%s", err);
36421da177e4SLinus Torvalds }
3643680d794bSakpm@linux-foundation.org 
364434c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3645680d794bSakpm@linux-foundation.org {
364634c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3647680d794bSakpm@linux-foundation.org 
3648680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3649680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
365009cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3651680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3652680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
36530825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
365409208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
36558751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
36568751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
36578751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
36588751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
36598751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
36608751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3661ea3271f7SChris Down 
3662ea3271f7SChris Down 	/*
3663ea3271f7SChris Down 	 * Showing inode{64,32} might be useful even if it's the system default,
3664ea3271f7SChris Down 	 * since then people don't have to resort to checking both here and
3665ea3271f7SChris Down 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
3666ea3271f7SChris Down 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3667ea3271f7SChris Down 	 *
3668ea3271f7SChris Down 	 * We hide it when inode64 isn't the default and we are using 32-bit
3669ea3271f7SChris Down 	 * inodes, since that probably just means the feature isn't even under
3670ea3271f7SChris Down 	 * consideration.
3671ea3271f7SChris Down 	 *
3672ea3271f7SChris Down 	 * As such:
3673ea3271f7SChris Down 	 *
3674ea3271f7SChris Down 	 *                     +-----------------+-----------------+
3675ea3271f7SChris Down 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3676ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3677ea3271f7SChris Down 	 *  | full_inums=true  | show            | show            |
3678ea3271f7SChris Down 	 *  | full_inums=false | show            | hide            |
3679ea3271f7SChris Down 	 *  +------------------+-----------------+-----------------+
3680ea3271f7SChris Down 	 *
3681ea3271f7SChris Down 	 */
3682ea3271f7SChris Down 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3683ea3271f7SChris Down 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3684396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
36855a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
36865a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
36875a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
36885a6e75f8SKirill A. Shutemov #endif
368971fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3690680d794bSakpm@linux-foundation.org 	return 0;
3691680d794bSakpm@linux-foundation.org }
36929183df25SDavid Herrmann 
3693680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
36941da177e4SLinus Torvalds 
36951da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
36961da177e4SLinus Torvalds {
3697602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3698602586a8SHugh Dickins 
3699e809d5f0SChris Down 	free_percpu(sbinfo->ino_batch);
3700602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
370149cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3702602586a8SHugh Dickins 	kfree(sbinfo);
37031da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
37041da177e4SLinus Torvalds }
37051da177e4SLinus Torvalds 
3706f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
37071da177e4SLinus Torvalds {
3708f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
37091da177e4SLinus Torvalds 	struct inode *inode;
37100edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3711680d794bSakpm@linux-foundation.org 	int err = -ENOMEM;
3712680d794bSakpm@linux-foundation.org 
3713680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3714425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3715680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3716680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3717680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3718680d794bSakpm@linux-foundation.org 
3719680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
37201da177e4SLinus Torvalds 
37210edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
37221da177e4SLinus Torvalds 	/*
37231da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
37241da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
37251da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
37261da177e4SLinus Torvalds 	 */
37271751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3728f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3729f3235626SDavid Howells 			ctx->blocks = shmem_default_max_blocks();
3730f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3731f3235626SDavid Howells 			ctx->inodes = shmem_default_max_inodes();
3732ea3271f7SChris Down 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
3733ea3271f7SChris Down 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3734ca4e0519SAl Viro 	} else {
37351751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
37361da177e4SLinus Torvalds 	}
373791828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
37381751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOSEC;
37390edd73b3SHugh Dickins #else
37401751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
37410edd73b3SHugh Dickins #endif
3742f3235626SDavid Howells 	sbinfo->max_blocks = ctx->blocks;
3743f3235626SDavid Howells 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3744e809d5f0SChris Down 	if (sb->s_flags & SB_KERNMOUNT) {
3745e809d5f0SChris Down 		sbinfo->ino_batch = alloc_percpu(ino_t);
3746e809d5f0SChris Down 		if (!sbinfo->ino_batch)
3747e809d5f0SChris Down 			goto failed;
3748e809d5f0SChris Down 	}
3749f3235626SDavid Howells 	sbinfo->uid = ctx->uid;
3750f3235626SDavid Howells 	sbinfo->gid = ctx->gid;
3751ea3271f7SChris Down 	sbinfo->full_inums = ctx->full_inums;
3752f3235626SDavid Howells 	sbinfo->mode = ctx->mode;
3753f3235626SDavid Howells 	sbinfo->huge = ctx->huge;
3754f3235626SDavid Howells 	sbinfo->mpol = ctx->mpol;
3755f3235626SDavid Howells 	ctx->mpol = NULL;
37561da177e4SLinus Torvalds 
37571da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
3758908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3759602586a8SHugh Dickins 		goto failed;
3760779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3761779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
37621da177e4SLinus Torvalds 
3763285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
376409cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
376509cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
37661da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
37671da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3768cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3769b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
377039f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3771b09e0fa4SEric Paris #endif
3772b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
37731751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
377439f0247dSAndreas Gruenbacher #endif
37752b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
37760edd73b3SHugh Dickins 
3777454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
37781da177e4SLinus Torvalds 	if (!inode)
37791da177e4SLinus Torvalds 		goto failed;
3780680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3781680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3782318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3783318ceed0SAl Viro 	if (!sb->s_root)
378448fde701SAl Viro 		goto failed;
37851da177e4SLinus Torvalds 	return 0;
37861da177e4SLinus Torvalds 
37871da177e4SLinus Torvalds failed:
37881da177e4SLinus Torvalds 	shmem_put_super(sb);
37891da177e4SLinus Torvalds 	return err;
37901da177e4SLinus Torvalds }
37911da177e4SLinus Torvalds 
3792f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc)
3793f3235626SDavid Howells {
3794f3235626SDavid Howells 	return get_tree_nodev(fc, shmem_fill_super);
3795f3235626SDavid Howells }
3796f3235626SDavid Howells 
3797f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc)
3798f3235626SDavid Howells {
3799f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3800f3235626SDavid Howells 
3801f3235626SDavid Howells 	if (ctx) {
3802f3235626SDavid Howells 		mpol_put(ctx->mpol);
3803f3235626SDavid Howells 		kfree(ctx);
3804f3235626SDavid Howells 	}
3805f3235626SDavid Howells }
3806f3235626SDavid Howells 
3807f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = {
3808f3235626SDavid Howells 	.free			= shmem_free_fc,
3809f3235626SDavid Howells 	.get_tree		= shmem_get_tree,
3810f3235626SDavid Howells #ifdef CONFIG_TMPFS
3811f3235626SDavid Howells 	.parse_monolithic	= shmem_parse_options,
3812f3235626SDavid Howells 	.parse_param		= shmem_parse_one,
3813f3235626SDavid Howells 	.reconfigure		= shmem_reconfigure,
3814f3235626SDavid Howells #endif
3815f3235626SDavid Howells };
3816f3235626SDavid Howells 
3817fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
38181da177e4SLinus Torvalds 
38191da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
38201da177e4SLinus Torvalds {
382141ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
382241ffe5d5SHugh Dickins 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
382341ffe5d5SHugh Dickins 	if (!info)
38241da177e4SLinus Torvalds 		return NULL;
382541ffe5d5SHugh Dickins 	return &info->vfs_inode;
38261da177e4SLinus Torvalds }
38271da177e4SLinus Torvalds 
382874b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode)
3829fa0d7e3dSNick Piggin {
383084e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
38313ed47db3SAl Viro 		kfree(inode->i_link);
3832fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3833fa0d7e3dSNick Piggin }
3834fa0d7e3dSNick Piggin 
38351da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
38361da177e4SLinus Torvalds {
383709208d15SAl Viro 	if (S_ISREG(inode->i_mode))
38381da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
38391da177e4SLinus Torvalds }
38401da177e4SLinus Torvalds 
384141ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
38421da177e4SLinus Torvalds {
384341ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
384441ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
38451da177e4SLinus Torvalds }
38461da177e4SLinus Torvalds 
38479a8ec03eSweiping zhang static void shmem_init_inodecache(void)
38481da177e4SLinus Torvalds {
38491da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
38501da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
38515d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
38521da177e4SLinus Torvalds }
38531da177e4SLinus Torvalds 
385441ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
38551da177e4SLinus Torvalds {
38561a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
38571da177e4SLinus Torvalds }
38581da177e4SLinus Torvalds 
3859f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = {
38601da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
386176719325SKen Chen 	.set_page_dirty	= __set_page_dirty_no_writeback,
38621da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3863800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3864800d15a5SNick Piggin 	.write_end	= shmem_write_end,
38651da177e4SLinus Torvalds #endif
38661c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
3867304dbdb7SLee Schermerhorn 	.migratepage	= migrate_page,
38681c93923cSAndrew Morton #endif
3869aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
38701da177e4SLinus Torvalds };
38711da177e4SLinus Torvalds 
387215ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
38731da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3874c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
38751da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3876220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
38772ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
38788174202bSAl Viro 	.write_iter	= generic_file_write_iter,
38791b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
388082c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3881f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
388283e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
38831da177e4SLinus Torvalds #endif
38841da177e4SLinus Torvalds };
38851da177e4SLinus Torvalds 
388692e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
388744a30220SYu Zhao 	.getattr	= shmem_getattr,
388894c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3889b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3890b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3891feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3892b09e0fa4SEric Paris #endif
38931da177e4SLinus Torvalds };
38941da177e4SLinus Torvalds 
389592e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
38961da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
38971da177e4SLinus Torvalds 	.create		= shmem_create,
38981da177e4SLinus Torvalds 	.lookup		= simple_lookup,
38991da177e4SLinus Torvalds 	.link		= shmem_link,
39001da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
39011da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
39021da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
39031da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
39041da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
39052773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
390660545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
39071da177e4SLinus Torvalds #endif
3908b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3909b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3910b09e0fa4SEric Paris #endif
391139f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
391294c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3913feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
391439f0247dSAndreas Gruenbacher #endif
391539f0247dSAndreas Gruenbacher };
391639f0247dSAndreas Gruenbacher 
391792e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3918b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3919b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3920b09e0fa4SEric Paris #endif
392139f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
392294c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3923feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
392439f0247dSAndreas Gruenbacher #endif
39251da177e4SLinus Torvalds };
39261da177e4SLinus Torvalds 
3927759b9775SHugh Dickins static const struct super_operations shmem_ops = {
39281da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
392974b1da56SAl Viro 	.free_inode	= shmem_free_in_core_inode,
39301da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
39311da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
39321da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
3933680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
39341da177e4SLinus Torvalds #endif
39351f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
39361da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
39371da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3938396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3939779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3940779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3941779750d2SKirill A. Shutemov #endif
39421da177e4SLinus Torvalds };
39431da177e4SLinus Torvalds 
3944f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
394554cb8821SNick Piggin 	.fault		= shmem_fault,
3946d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
39471da177e4SLinus Torvalds #ifdef CONFIG_NUMA
39481da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
39491da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
39501da177e4SLinus Torvalds #endif
39511da177e4SLinus Torvalds };
39521da177e4SLinus Torvalds 
3953f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc)
39541da177e4SLinus Torvalds {
3955f3235626SDavid Howells 	struct shmem_options *ctx;
3956f3235626SDavid Howells 
3957f3235626SDavid Howells 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3958f3235626SDavid Howells 	if (!ctx)
3959f3235626SDavid Howells 		return -ENOMEM;
3960f3235626SDavid Howells 
3961f3235626SDavid Howells 	ctx->mode = 0777 | S_ISVTX;
3962f3235626SDavid Howells 	ctx->uid = current_fsuid();
3963f3235626SDavid Howells 	ctx->gid = current_fsgid();
3964f3235626SDavid Howells 
3965f3235626SDavid Howells 	fc->fs_private = ctx;
3966f3235626SDavid Howells 	fc->ops = &shmem_fs_context_ops;
3967f3235626SDavid Howells 	return 0;
39681da177e4SLinus Torvalds }
39691da177e4SLinus Torvalds 
397041ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
39711da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
39721da177e4SLinus Torvalds 	.name		= "tmpfs",
3973f3235626SDavid Howells 	.init_fs_context = shmem_init_fs_context,
3974f3235626SDavid Howells #ifdef CONFIG_TMPFS
3975d7167b14SAl Viro 	.parameters	= shmem_fs_parameters,
3976f3235626SDavid Howells #endif
39771da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
39782b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
39791da177e4SLinus Torvalds };
39801da177e4SLinus Torvalds 
398141ffe5d5SHugh Dickins int __init shmem_init(void)
39821da177e4SLinus Torvalds {
39831da177e4SLinus Torvalds 	int error;
39841da177e4SLinus Torvalds 
39859a8ec03eSweiping zhang 	shmem_init_inodecache();
39861da177e4SLinus Torvalds 
398741ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
39881da177e4SLinus Torvalds 	if (error) {
39891170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
39901da177e4SLinus Torvalds 		goto out2;
39911da177e4SLinus Torvalds 	}
399295dc112aSGreg Kroah-Hartman 
3993ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
39941da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
39951da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
39961170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
39971da177e4SLinus Torvalds 		goto out1;
39981da177e4SLinus Torvalds 	}
39995a6e75f8SKirill A. Shutemov 
4000396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4001435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
40025a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
40035a6e75f8SKirill A. Shutemov 	else
40045a6e75f8SKirill A. Shutemov 		shmem_huge = 0; /* just in case it was patched */
40055a6e75f8SKirill A. Shutemov #endif
40061da177e4SLinus Torvalds 	return 0;
40071da177e4SLinus Torvalds 
40081da177e4SLinus Torvalds out1:
400941ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
40101da177e4SLinus Torvalds out2:
401141ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
40121da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
40131da177e4SLinus Torvalds 	return error;
40141da177e4SLinus Torvalds }
4015853ac43aSMatt Mackall 
4016396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
40175a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
40185a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, char *buf)
40195a6e75f8SKirill A. Shutemov {
402026083eb6SColin Ian King 	static const int values[] = {
40215a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
40225a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
40235a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
40245a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
40255a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
40265a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
40275a6e75f8SKirill A. Shutemov 	};
40285a6e75f8SKirill A. Shutemov 	int i, count;
40295a6e75f8SKirill A. Shutemov 
40305a6e75f8SKirill A. Shutemov 	for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
40315a6e75f8SKirill A. Shutemov 		const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
40325a6e75f8SKirill A. Shutemov 
40335a6e75f8SKirill A. Shutemov 		count += sprintf(buf + count, fmt,
40345a6e75f8SKirill A. Shutemov 				shmem_format_huge(values[i]));
40355a6e75f8SKirill A. Shutemov 	}
40365a6e75f8SKirill A. Shutemov 	buf[count - 1] = '\n';
40375a6e75f8SKirill A. Shutemov 	return count;
40385a6e75f8SKirill A. Shutemov }
40395a6e75f8SKirill A. Shutemov 
40405a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
40415a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
40425a6e75f8SKirill A. Shutemov {
40435a6e75f8SKirill A. Shutemov 	char tmp[16];
40445a6e75f8SKirill A. Shutemov 	int huge;
40455a6e75f8SKirill A. Shutemov 
40465a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
40475a6e75f8SKirill A. Shutemov 		return -EINVAL;
40485a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
40495a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
40505a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
40515a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
40525a6e75f8SKirill A. Shutemov 
40535a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
40545a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
40555a6e75f8SKirill A. Shutemov 		return -EINVAL;
40565a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
40575a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
40585a6e75f8SKirill A. Shutemov 		return -EINVAL;
40595a6e75f8SKirill A. Shutemov 
40605a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
4061435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
40625a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
40635a6e75f8SKirill A. Shutemov 	return count;
40645a6e75f8SKirill A. Shutemov }
40655a6e75f8SKirill A. Shutemov 
40665a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr =
40675a6e75f8SKirill A. Shutemov 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4068396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4069f3f0e1d2SKirill A. Shutemov 
4070396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4071f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma)
4072f3f0e1d2SKirill A. Shutemov {
4073f3f0e1d2SKirill A. Shutemov 	struct inode *inode = file_inode(vma->vm_file);
4074f3f0e1d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4075f3f0e1d2SKirill A. Shutemov 	loff_t i_size;
4076f3f0e1d2SKirill A. Shutemov 	pgoff_t off;
4077f3f0e1d2SKirill A. Shutemov 
4078c0630669SYang Shi 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
4079c0630669SYang Shi 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
4080c0630669SYang Shi 		return false;
4081f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
4082f3f0e1d2SKirill A. Shutemov 		return true;
4083f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY)
4084f3f0e1d2SKirill A. Shutemov 		return false;
4085f3f0e1d2SKirill A. Shutemov 	switch (sbinfo->huge) {
4086f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_NEVER:
4087f3f0e1d2SKirill A. Shutemov 			return false;
4088f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ALWAYS:
4089f3f0e1d2SKirill A. Shutemov 			return true;
4090f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_WITHIN_SIZE:
4091f3f0e1d2SKirill A. Shutemov 			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4092f3f0e1d2SKirill A. Shutemov 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
4093f3f0e1d2SKirill A. Shutemov 			if (i_size >= HPAGE_PMD_SIZE &&
4094f3f0e1d2SKirill A. Shutemov 					i_size >> PAGE_SHIFT >= off)
4095f3f0e1d2SKirill A. Shutemov 				return true;
4096e4a9bc58SJoe Perches 			fallthrough;
4097f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ADVISE:
4098f3f0e1d2SKirill A. Shutemov 			/* TODO: implement fadvise() hints */
4099f3f0e1d2SKirill A. Shutemov 			return (vma->vm_flags & VM_HUGEPAGE);
4100f3f0e1d2SKirill A. Shutemov 		default:
4101f3f0e1d2SKirill A. Shutemov 			VM_BUG_ON(1);
4102f3f0e1d2SKirill A. Shutemov 			return false;
4103f3f0e1d2SKirill A. Shutemov 	}
4104f3f0e1d2SKirill A. Shutemov }
4105396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
41065a6e75f8SKirill A. Shutemov 
4107853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
4108853ac43aSMatt Mackall 
4109853ac43aSMatt Mackall /*
4110853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4111853ac43aSMatt Mackall  *
4112853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
4113853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
4114853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
4115853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
4116853ac43aSMatt Mackall  */
4117853ac43aSMatt Mackall 
411841ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
4119853ac43aSMatt Mackall 	.name		= "tmpfs",
4120f3235626SDavid Howells 	.init_fs_context = ramfs_init_fs_context,
4121d7167b14SAl Viro 	.parameters	= ramfs_fs_parameters,
4122853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
41232b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
4124853ac43aSMatt Mackall };
4125853ac43aSMatt Mackall 
412641ffe5d5SHugh Dickins int __init shmem_init(void)
4127853ac43aSMatt Mackall {
412841ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4129853ac43aSMatt Mackall 
413041ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
4131853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
4132853ac43aSMatt Mackall 
4133853ac43aSMatt Mackall 	return 0;
4134853ac43aSMatt Mackall }
4135853ac43aSMatt Mackall 
4136b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
4137b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
4138853ac43aSMatt Mackall {
4139853ac43aSMatt Mackall 	return 0;
4140853ac43aSMatt Mackall }
4141853ac43aSMatt Mackall 
41423f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user)
41433f96b79aSHugh Dickins {
41443f96b79aSHugh Dickins 	return 0;
41453f96b79aSHugh Dickins }
41463f96b79aSHugh Dickins 
414724513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
414824513264SHugh Dickins {
414924513264SHugh Dickins }
415024513264SHugh Dickins 
4151c01d5b30SHugh Dickins #ifdef CONFIG_MMU
4152c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4153c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
4154c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
4155c01d5b30SHugh Dickins {
4156c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4157c01d5b30SHugh Dickins }
4158c01d5b30SHugh Dickins #endif
4159c01d5b30SHugh Dickins 
416041ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
416194c1e62dSHugh Dickins {
416241ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
416394c1e62dSHugh Dickins }
416494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
416594c1e62dSHugh Dickins 
4166853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
41670b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
4168454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
41690b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
41700b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
4171853ac43aSMatt Mackall 
4172853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4173853ac43aSMatt Mackall 
4174853ac43aSMatt Mackall /* common code */
41751da177e4SLinus Torvalds 
4176703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4177c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
41781da177e4SLinus Torvalds {
41791da177e4SLinus Torvalds 	struct inode *inode;
418093dec2daSAl Viro 	struct file *res;
41811da177e4SLinus Torvalds 
4182703321b6SMatthew Auld 	if (IS_ERR(mnt))
4183703321b6SMatthew Auld 		return ERR_CAST(mnt);
41841da177e4SLinus Torvalds 
4185285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
41861da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
41871da177e4SLinus Torvalds 
41881da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
41891da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
41901da177e4SLinus Torvalds 
419193dec2daSAl Viro 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
419293dec2daSAl Viro 				flags);
4193dac2d1f6SAl Viro 	if (unlikely(!inode)) {
4194dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
4195dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
4196dac2d1f6SAl Viro 	}
4197c7277090SEric Paris 	inode->i_flags |= i_flags;
41981da177e4SLinus Torvalds 	inode->i_size = size;
41996d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
420026567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
420193dec2daSAl Viro 	if (!IS_ERR(res))
420293dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
42034b42af81SAl Viro 				&shmem_file_operations);
42046b4d0b27SAl Viro 	if (IS_ERR(res))
420593dec2daSAl Viro 		iput(inode);
42066b4d0b27SAl Viro 	return res;
42071da177e4SLinus Torvalds }
4208c7277090SEric Paris 
4209c7277090SEric Paris /**
4210c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4211c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
4212c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
4213e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
4214e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
4215c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4216c7277090SEric Paris  * @size: size to be set for the file
4217c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4218c7277090SEric Paris  */
4219c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4220c7277090SEric Paris {
4221703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4222c7277090SEric Paris }
4223c7277090SEric Paris 
4224c7277090SEric Paris /**
4225c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4226c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4227c7277090SEric Paris  * @size: size to be set for the file
4228c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4229c7277090SEric Paris  */
4230c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4231c7277090SEric Paris {
4232703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4233c7277090SEric Paris }
4234395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
42351da177e4SLinus Torvalds 
423646711810SRandy Dunlap /**
4237703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4238703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
4239703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
4240703321b6SMatthew Auld  * @size: size to be set for the file
4241703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4242703321b6SMatthew Auld  */
4243703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4244703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
4245703321b6SMatthew Auld {
4246703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
4247703321b6SMatthew Auld }
4248703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4249703321b6SMatthew Auld 
4250703321b6SMatthew Auld /**
42511da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
425245e55300SPeter Collingbourne  * @vma: the vma to be mmapped is prepared by do_mmap
42531da177e4SLinus Torvalds  */
42541da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
42551da177e4SLinus Torvalds {
42561da177e4SLinus Torvalds 	struct file *file;
42571da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
42581da177e4SLinus Torvalds 
425966fc1303SHugh Dickins 	/*
4260c1e8d7c6SMichel Lespinasse 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
426166fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
426266fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
426366fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
426466fc1303SHugh Dickins 	 */
4265703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
42661da177e4SLinus Torvalds 	if (IS_ERR(file))
42671da177e4SLinus Torvalds 		return PTR_ERR(file);
42681da177e4SLinus Torvalds 
42691da177e4SLinus Torvalds 	if (vma->vm_file)
42701da177e4SLinus Torvalds 		fput(vma->vm_file);
42711da177e4SLinus Torvalds 	vma->vm_file = file;
42721da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
4273f3f0e1d2SKirill A. Shutemov 
4274396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4275f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4276f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
4277f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
4278f3f0e1d2SKirill A. Shutemov 	}
4279f3f0e1d2SKirill A. Shutemov 
42801da177e4SLinus Torvalds 	return 0;
42811da177e4SLinus Torvalds }
4282d9d90e5eSHugh Dickins 
4283d9d90e5eSHugh Dickins /**
4284d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4285d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4286d9d90e5eSHugh Dickins  * @index:	the page index
4287d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4288d9d90e5eSHugh Dickins  *
4289d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4290d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
4291d9d90e5eSHugh Dickins  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4292d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4293d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4294d9d90e5eSHugh Dickins  *
429568da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
429668da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4297d9d90e5eSHugh Dickins  */
4298d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4299d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4300d9d90e5eSHugh Dickins {
430168da9f05SHugh Dickins #ifdef CONFIG_SHMEM
430268da9f05SHugh Dickins 	struct inode *inode = mapping->host;
43039276aad6SHugh Dickins 	struct page *page;
430468da9f05SHugh Dickins 	int error;
430568da9f05SHugh Dickins 
430668da9f05SHugh Dickins 	BUG_ON(mapping->a_ops != &shmem_aops);
43079e18eb29SAndres Lagar-Cavilla 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4308cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
430968da9f05SHugh Dickins 	if (error)
431068da9f05SHugh Dickins 		page = ERR_PTR(error);
431168da9f05SHugh Dickins 	else
431268da9f05SHugh Dickins 		unlock_page(page);
431368da9f05SHugh Dickins 	return page;
431468da9f05SHugh Dickins #else
431568da9f05SHugh Dickins 	/*
431668da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
431768da9f05SHugh Dickins 	 */
4318d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
431968da9f05SHugh Dickins #endif
4320d9d90e5eSHugh Dickins }
4321d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4322