xref: /openbmc/linux/mm/shmem.c (revision e809d5f0b5c912fe981dce738f3283b2010665f0)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31853ac43aSMatt Mackall #include <linux/mm.h>
3246c9a946SArnd Bergmann #include <linux/random.h>
33174cd4b1SIngo Molnar #include <linux/sched/signal.h>
34b95f1b31SPaul Gortmaker #include <linux/export.h>
35853ac43aSMatt Mackall #include <linux/swap.h>
36e2e40f2cSChristoph Hellwig #include <linux/uio.h>
37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h>
38749df87bSMike Kravetz #include <linux/hugetlb.h>
39b56a2d8aSVineeth Remanan Pillai #include <linux/frontswap.h>
40626c3920SAl Viro #include <linux/fs_parser.h>
41853ac43aSMatt Mackall 
4295cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
4395cc09d6SAndrea Arcangeli 
44853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
45853ac43aSMatt Mackall 
46853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
471da177e4SLinus Torvalds /*
481da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
491da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
501da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
511da177e4SLinus Torvalds  */
521da177e4SLinus Torvalds 
5339f0247dSAndreas Gruenbacher #include <linux/xattr.h>
54a5694255SChristoph Hellwig #include <linux/exportfs.h>
551c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
56feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
571da177e4SLinus Torvalds #include <linux/mman.h>
581da177e4SLinus Torvalds #include <linux/string.h>
591da177e4SLinus Torvalds #include <linux/slab.h>
601da177e4SLinus Torvalds #include <linux/backing-dev.h>
611da177e4SLinus Torvalds #include <linux/shmem_fs.h>
621da177e4SLinus Torvalds #include <linux/writeback.h>
631da177e4SLinus Torvalds #include <linux/blkdev.h>
64bda97eabSHugh Dickins #include <linux/pagevec.h>
6541ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6683e4fa9cSHugh Dickins #include <linux/falloc.h>
67708e3508SHugh Dickins #include <linux/splice.h>
681da177e4SLinus Torvalds #include <linux/security.h>
691da177e4SLinus Torvalds #include <linux/swapops.h>
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/namei.h>
72b00dc3adSHugh Dickins #include <linux/ctype.h>
73304dbdb7SLee Schermerhorn #include <linux/migrate.h>
74c1f60a5aSChristoph Lameter #include <linux/highmem.h>
75680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7692562927SMimi Zohar #include <linux/magic.h>
779183df25SDavid Herrmann #include <linux/syscalls.h>
7840e041a2SDavid Herrmann #include <linux/fcntl.h>
799183df25SDavid Herrmann #include <uapi/linux/memfd.h>
80cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
814c27fe4cSMike Rapoport #include <linux/rmap.h>
822b4db796SAmir Goldstein #include <linux/uuid.h>
83304dbdb7SLee Schermerhorn 
847c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
851da177e4SLinus Torvalds 
86dd56b046SMel Gorman #include "internal.h"
87dd56b046SMel Gorman 
8809cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8909cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
921da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
931da177e4SLinus Torvalds 
9469f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9569f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9669f07ec9SHugh Dickins 
971aac1400SHugh Dickins /*
98f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99f00cdc6dSHugh Dickins  * inode->i_private (with i_mutex making sure that it has only one user at
100f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
1011aac1400SHugh Dickins  */
1021aac1400SHugh Dickins struct shmem_falloc {
1038e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1041aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1051aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1061aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1071aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1081aac1400SHugh Dickins };
1091aac1400SHugh Dickins 
1100b5071ddSAl Viro struct shmem_options {
1110b5071ddSAl Viro 	unsigned long long blocks;
1120b5071ddSAl Viro 	unsigned long long inodes;
1130b5071ddSAl Viro 	struct mempolicy *mpol;
1140b5071ddSAl Viro 	kuid_t uid;
1150b5071ddSAl Viro 	kgid_t gid;
1160b5071ddSAl Viro 	umode_t mode;
1170b5071ddSAl Viro 	int huge;
1180b5071ddSAl Viro 	int seen;
1190b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1
1200b5071ddSAl Viro #define SHMEM_SEEN_INODES 2
1210b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4
1220b5071ddSAl Viro };
1230b5071ddSAl Viro 
124b76db735SAndrew Morton #ifdef CONFIG_TMPFS
125680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
126680d794bSakpm@linux-foundation.org {
127ca79b0c2SArun KS 	return totalram_pages() / 2;
128680d794bSakpm@linux-foundation.org }
129680d794bSakpm@linux-foundation.org 
130680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
131680d794bSakpm@linux-foundation.org {
132ca79b0c2SArun KS 	unsigned long nr_pages = totalram_pages();
133ca79b0c2SArun KS 
134ca79b0c2SArun KS 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
135680d794bSakpm@linux-foundation.org }
136b76db735SAndrew Morton #endif
137680d794bSakpm@linux-foundation.org 
138bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
139bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
140bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index);
141c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
142c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
143c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
144c5bf121eSVineeth Remanan Pillai 			     vm_fault_t *fault_type);
14568da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1469e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp,
147cfda0526SMike Rapoport 		gfp_t gfp, struct vm_area_struct *vma,
1482b740303SSouptick Joarder 		struct vm_fault *vmf, vm_fault_t *fault_type);
14968da9f05SHugh Dickins 
150f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index,
1519e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp)
15268da9f05SHugh Dickins {
15368da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
154cfda0526SMike Rapoport 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
15568da9f05SHugh Dickins }
1561da177e4SLinus Torvalds 
1571da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1581da177e4SLinus Torvalds {
1591da177e4SLinus Torvalds 	return sb->s_fs_info;
1601da177e4SLinus Torvalds }
1611da177e4SLinus Torvalds 
1621da177e4SLinus Torvalds /*
1631da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1641da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1651da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1661da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1671da177e4SLinus Torvalds  */
1681da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1691da177e4SLinus Torvalds {
1700b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
171191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1721da177e4SLinus Torvalds }
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1751da177e4SLinus Torvalds {
1760b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1771da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1781da177e4SLinus Torvalds }
1791da177e4SLinus Torvalds 
18077142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
18177142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
18277142517SKonstantin Khlebnikov {
18377142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
18477142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
18577142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
18677142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
18777142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
18877142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
18977142517SKonstantin Khlebnikov 	}
19077142517SKonstantin Khlebnikov 	return 0;
19177142517SKonstantin Khlebnikov }
19277142517SKonstantin Khlebnikov 
1931da177e4SLinus Torvalds /*
1941da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
19575edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
1961da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1971da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1981da177e4SLinus Torvalds  */
199800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
2001da177e4SLinus Torvalds {
201800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
202800d8c63SKirill A. Shutemov 		return 0;
203800d8c63SKirill A. Shutemov 
204800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
205800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
2061da177e4SLinus Torvalds }
2071da177e4SLinus Torvalds 
2081da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
2091da177e4SLinus Torvalds {
2100b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
21109cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
2121da177e4SLinus Torvalds }
2131da177e4SLinus Torvalds 
2140f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
2150f079694SMike Rapoport {
2160f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2170f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2180f079694SMike Rapoport 
2190f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
2200f079694SMike Rapoport 		return false;
2210f079694SMike Rapoport 
2220f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2230f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2240f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2250f079694SMike Rapoport 			goto unacct;
2260f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2270f079694SMike Rapoport 	}
2280f079694SMike Rapoport 
2290f079694SMike Rapoport 	return true;
2300f079694SMike Rapoport 
2310f079694SMike Rapoport unacct:
2320f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2330f079694SMike Rapoport 	return false;
2340f079694SMike Rapoport }
2350f079694SMike Rapoport 
2360f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2370f079694SMike Rapoport {
2380f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2390f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2400f079694SMike Rapoport 
2410f079694SMike Rapoport 	if (sbinfo->max_blocks)
2420f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2430f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2440f079694SMike Rapoport }
2450f079694SMike Rapoport 
246759b9775SHugh Dickins static const struct super_operations shmem_ops;
247f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops;
24815ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
24992e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
25092e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
25192e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
252f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
253779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2541da177e4SLinus Torvalds 
255b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
256b0506e48SMike Rapoport {
257b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
258b0506e48SMike Rapoport }
259b0506e48SMike Rapoport 
2601da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
261cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2621da177e4SLinus Torvalds 
263*e809d5f0SChris Down /*
264*e809d5f0SChris Down  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
265*e809d5f0SChris Down  * produces a novel ino for the newly allocated inode.
266*e809d5f0SChris Down  *
267*e809d5f0SChris Down  * It may also be called when making a hard link to permit the space needed by
268*e809d5f0SChris Down  * each dentry. However, in that case, no new inode number is needed since that
269*e809d5f0SChris Down  * internally draws from another pool of inode numbers (currently global
270*e809d5f0SChris Down  * get_next_ino()). This case is indicated by passing NULL as inop.
271*e809d5f0SChris Down  */
272*e809d5f0SChris Down #define SHMEM_INO_BATCH 1024
273*e809d5f0SChris Down static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
2745b04c689SPavel Emelyanov {
2755b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
276*e809d5f0SChris Down 	ino_t ino;
277*e809d5f0SChris Down 
278*e809d5f0SChris Down 	if (!(sb->s_flags & SB_KERNMOUNT)) {
2795b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2805b04c689SPavel Emelyanov 		if (!sbinfo->free_inodes) {
2815b04c689SPavel Emelyanov 			spin_unlock(&sbinfo->stat_lock);
2825b04c689SPavel Emelyanov 			return -ENOSPC;
2835b04c689SPavel Emelyanov 		}
2845b04c689SPavel Emelyanov 		sbinfo->free_inodes--;
285*e809d5f0SChris Down 		if (inop) {
286*e809d5f0SChris Down 			ino = sbinfo->next_ino++;
287*e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
288*e809d5f0SChris Down 				ino = sbinfo->next_ino++;
289*e809d5f0SChris Down 			if (unlikely(ino > UINT_MAX)) {
290*e809d5f0SChris Down 				/*
291*e809d5f0SChris Down 				 * Emulate get_next_ino uint wraparound for
292*e809d5f0SChris Down 				 * compatibility
293*e809d5f0SChris Down 				 */
294*e809d5f0SChris Down 				ino = 1;
2955b04c689SPavel Emelyanov 			}
296*e809d5f0SChris Down 			*inop = ino;
297*e809d5f0SChris Down 		}
298*e809d5f0SChris Down 		spin_unlock(&sbinfo->stat_lock);
299*e809d5f0SChris Down 	} else if (inop) {
300*e809d5f0SChris Down 		/*
301*e809d5f0SChris Down 		 * __shmem_file_setup, one of our callers, is lock-free: it
302*e809d5f0SChris Down 		 * doesn't hold stat_lock in shmem_reserve_inode since
303*e809d5f0SChris Down 		 * max_inodes is always 0, and is called from potentially
304*e809d5f0SChris Down 		 * unknown contexts. As such, use a per-cpu batched allocator
305*e809d5f0SChris Down 		 * which doesn't require the per-sb stat_lock unless we are at
306*e809d5f0SChris Down 		 * the batch boundary.
307*e809d5f0SChris Down 		 */
308*e809d5f0SChris Down 		ino_t *next_ino;
309*e809d5f0SChris Down 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
310*e809d5f0SChris Down 		ino = *next_ino;
311*e809d5f0SChris Down 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
312*e809d5f0SChris Down 			spin_lock(&sbinfo->stat_lock);
313*e809d5f0SChris Down 			ino = sbinfo->next_ino;
314*e809d5f0SChris Down 			sbinfo->next_ino += SHMEM_INO_BATCH;
315*e809d5f0SChris Down 			spin_unlock(&sbinfo->stat_lock);
316*e809d5f0SChris Down 			if (unlikely(is_zero_ino(ino)))
317*e809d5f0SChris Down 				ino++;
318*e809d5f0SChris Down 		}
319*e809d5f0SChris Down 		*inop = ino;
320*e809d5f0SChris Down 		*next_ino = ++ino;
321*e809d5f0SChris Down 		put_cpu();
322*e809d5f0SChris Down 	}
323*e809d5f0SChris Down 
3245b04c689SPavel Emelyanov 	return 0;
3255b04c689SPavel Emelyanov }
3265b04c689SPavel Emelyanov 
3275b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
3285b04c689SPavel Emelyanov {
3295b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3305b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
3315b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
3325b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
3335b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
3345b04c689SPavel Emelyanov 	}
3355b04c689SPavel Emelyanov }
3365b04c689SPavel Emelyanov 
33746711810SRandy Dunlap /**
33841ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
3391da177e4SLinus Torvalds  * @inode: inode to recalc
3401da177e4SLinus Torvalds  *
3411da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
3421da177e4SLinus Torvalds  * undirtied hole pages behind our back.
3431da177e4SLinus Torvalds  *
3441da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
3451da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
3461da177e4SLinus Torvalds  *
3471da177e4SLinus Torvalds  * It has to be called with the spinlock held.
3481da177e4SLinus Torvalds  */
3491da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
3501da177e4SLinus Torvalds {
3511da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
3521da177e4SLinus Torvalds 	long freed;
3531da177e4SLinus Torvalds 
3541da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
3551da177e4SLinus Torvalds 	if (freed > 0) {
3561da177e4SLinus Torvalds 		info->alloced -= freed;
35754af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
3580f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
3591da177e4SLinus Torvalds 	}
3601da177e4SLinus Torvalds }
3611da177e4SLinus Torvalds 
362800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
363800d8c63SKirill A. Shutemov {
364800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3654595ef88SKirill A. Shutemov 	unsigned long flags;
366800d8c63SKirill A. Shutemov 
3670f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
368800d8c63SKirill A. Shutemov 		return false;
369b1cc94abSMike Rapoport 
370aaa52e34SHugh Dickins 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
371aaa52e34SHugh Dickins 	inode->i_mapping->nrpages += pages;
372aaa52e34SHugh Dickins 
3734595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
374800d8c63SKirill A. Shutemov 	info->alloced += pages;
375800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
376800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3774595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
378800d8c63SKirill A. Shutemov 
379800d8c63SKirill A. Shutemov 	return true;
380800d8c63SKirill A. Shutemov }
381800d8c63SKirill A. Shutemov 
382800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
383800d8c63SKirill A. Shutemov {
384800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3854595ef88SKirill A. Shutemov 	unsigned long flags;
386800d8c63SKirill A. Shutemov 
387aaa52e34SHugh Dickins 	/* nrpages adjustment done by __delete_from_page_cache() or caller */
388aaa52e34SHugh Dickins 
3894595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
390800d8c63SKirill A. Shutemov 	info->alloced -= pages;
391800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
392800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3934595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
394800d8c63SKirill A. Shutemov 
3950f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
396800d8c63SKirill A. Shutemov }
397800d8c63SKirill A. Shutemov 
3987a5d0fbbSHugh Dickins /*
39962f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
4007a5d0fbbSHugh Dickins  */
40162f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
4027a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
4037a5d0fbbSHugh Dickins {
40462f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
4056dbaf22cSJohannes Weiner 	void *item;
4067a5d0fbbSHugh Dickins 
4077a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
4086dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
40962f945b6SMatthew Wilcox 	item = xas_load(&xas);
4107a5d0fbbSHugh Dickins 	if (item != expected)
4117a5d0fbbSHugh Dickins 		return -ENOENT;
41262f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
4137a5d0fbbSHugh Dickins 	return 0;
4147a5d0fbbSHugh Dickins }
4157a5d0fbbSHugh Dickins 
4167a5d0fbbSHugh Dickins /*
417d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
418d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
419d1899228SHugh Dickins  *
420d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
421d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
422d1899228SHugh Dickins  */
423d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
424d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
425d1899228SHugh Dickins {
426a12831bfSMatthew Wilcox 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
427d1899228SHugh Dickins }
428d1899228SHugh Dickins 
429d1899228SHugh Dickins /*
4305a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
4315a6e75f8SKirill A. Shutemov  *
4325a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
4335a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
4345a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
4355a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
4365a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
4375a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
4385a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
4395a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
4405a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
4415a6e75f8SKirill A. Shutemov  */
4425a6e75f8SKirill A. Shutemov 
4435a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
4445a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
4455a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
4465a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
4475a6e75f8SKirill A. Shutemov 
4485a6e75f8SKirill A. Shutemov /*
4495a6e75f8SKirill A. Shutemov  * Special values.
4505a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
4515a6e75f8SKirill A. Shutemov  *
4525a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
4535a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
4545a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
4555a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
4565a6e75f8SKirill A. Shutemov  *
4575a6e75f8SKirill A. Shutemov  */
4585a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
4595a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
4605a6e75f8SKirill A. Shutemov 
461396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4625a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
4635a6e75f8SKirill A. Shutemov 
4645b9c98f3SMike Kravetz static int shmem_huge __read_mostly;
4655a6e75f8SKirill A. Shutemov 
466e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS)
4675a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
4685a6e75f8SKirill A. Shutemov {
4695a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
4705a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
4715a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
4725a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
4735a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
4745a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
4755a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
4765a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
4775a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
4785a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
4795a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
4805a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
4815a6e75f8SKirill A. Shutemov 	return -EINVAL;
4825a6e75f8SKirill A. Shutemov }
483e5f2249aSArnd Bergmann #endif
4845a6e75f8SKirill A. Shutemov 
485e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
4865a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
4875a6e75f8SKirill A. Shutemov {
4885a6e75f8SKirill A. Shutemov 	switch (huge) {
4895a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
4905a6e75f8SKirill A. Shutemov 		return "never";
4915a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
4925a6e75f8SKirill A. Shutemov 		return "always";
4935a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
4945a6e75f8SKirill A. Shutemov 		return "within_size";
4955a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
4965a6e75f8SKirill A. Shutemov 		return "advise";
4975a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
4985a6e75f8SKirill A. Shutemov 		return "deny";
4995a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
5005a6e75f8SKirill A. Shutemov 		return "force";
5015a6e75f8SKirill A. Shutemov 	default:
5025a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
5035a6e75f8SKirill A. Shutemov 		return "bad_val";
5045a6e75f8SKirill A. Shutemov 	}
5055a6e75f8SKirill A. Shutemov }
506f1f5929cSJérémy Lefaure #endif
5075a6e75f8SKirill A. Shutemov 
508779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
509779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
510779750d2SKirill A. Shutemov {
511779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
512253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
513779750d2SKirill A. Shutemov 	struct inode *inode;
514779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
515779750d2SKirill A. Shutemov 	struct page *page;
516779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
517779750d2SKirill A. Shutemov 	int removed = 0, split = 0;
518779750d2SKirill A. Shutemov 
519779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
520779750d2SKirill A. Shutemov 		return SHRINK_STOP;
521779750d2SKirill A. Shutemov 
522779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
523779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
524779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
525779750d2SKirill A. Shutemov 
526779750d2SKirill A. Shutemov 		/* pin the inode */
527779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
528779750d2SKirill A. Shutemov 
529779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
530779750d2SKirill A. Shutemov 		if (!inode) {
531779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
532779750d2SKirill A. Shutemov 			removed++;
533779750d2SKirill A. Shutemov 			goto next;
534779750d2SKirill A. Shutemov 		}
535779750d2SKirill A. Shutemov 
536779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
537779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
538779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
539253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
540779750d2SKirill A. Shutemov 			removed++;
541779750d2SKirill A. Shutemov 			goto next;
542779750d2SKirill A. Shutemov 		}
543779750d2SKirill A. Shutemov 
544779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
545779750d2SKirill A. Shutemov next:
546779750d2SKirill A. Shutemov 		if (!--batch)
547779750d2SKirill A. Shutemov 			break;
548779750d2SKirill A. Shutemov 	}
549779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
550779750d2SKirill A. Shutemov 
551253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
552253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
553253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
554253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
555253fd0f0SKirill A. Shutemov 		iput(inode);
556253fd0f0SKirill A. Shutemov 	}
557253fd0f0SKirill A. Shutemov 
558779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
559779750d2SKirill A. Shutemov 		int ret;
560779750d2SKirill A. Shutemov 
561779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
562779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
563779750d2SKirill A. Shutemov 
564b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
565b3cd54b2SKirill A. Shutemov 			goto leave;
566779750d2SKirill A. Shutemov 
567b3cd54b2SKirill A. Shutemov 		page = find_get_page(inode->i_mapping,
568779750d2SKirill A. Shutemov 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
569779750d2SKirill A. Shutemov 		if (!page)
570779750d2SKirill A. Shutemov 			goto drop;
571779750d2SKirill A. Shutemov 
572b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
573779750d2SKirill A. Shutemov 		if (!PageTransHuge(page)) {
574779750d2SKirill A. Shutemov 			put_page(page);
575779750d2SKirill A. Shutemov 			goto drop;
576779750d2SKirill A. Shutemov 		}
577779750d2SKirill A. Shutemov 
578b3cd54b2SKirill A. Shutemov 		/*
579b3cd54b2SKirill A. Shutemov 		 * Leave the inode on the list if we failed to lock
580b3cd54b2SKirill A. Shutemov 		 * the page at this time.
581b3cd54b2SKirill A. Shutemov 		 *
582b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
583b3cd54b2SKirill A. Shutemov 		 * reclaim path.
584b3cd54b2SKirill A. Shutemov 		 */
585b3cd54b2SKirill A. Shutemov 		if (!trylock_page(page)) {
586b3cd54b2SKirill A. Shutemov 			put_page(page);
587b3cd54b2SKirill A. Shutemov 			goto leave;
588b3cd54b2SKirill A. Shutemov 		}
589b3cd54b2SKirill A. Shutemov 
590779750d2SKirill A. Shutemov 		ret = split_huge_page(page);
591779750d2SKirill A. Shutemov 		unlock_page(page);
592779750d2SKirill A. Shutemov 		put_page(page);
593779750d2SKirill A. Shutemov 
594b3cd54b2SKirill A. Shutemov 		/* If split failed leave the inode on the list */
595b3cd54b2SKirill A. Shutemov 		if (ret)
596b3cd54b2SKirill A. Shutemov 			goto leave;
597779750d2SKirill A. Shutemov 
598779750d2SKirill A. Shutemov 		split++;
599779750d2SKirill A. Shutemov drop:
600779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
601779750d2SKirill A. Shutemov 		removed++;
602b3cd54b2SKirill A. Shutemov leave:
603779750d2SKirill A. Shutemov 		iput(inode);
604779750d2SKirill A. Shutemov 	}
605779750d2SKirill A. Shutemov 
606779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
607779750d2SKirill A. Shutemov 	list_splice_tail(&list, &sbinfo->shrinklist);
608779750d2SKirill A. Shutemov 	sbinfo->shrinklist_len -= removed;
609779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
610779750d2SKirill A. Shutemov 
611779750d2SKirill A. Shutemov 	return split;
612779750d2SKirill A. Shutemov }
613779750d2SKirill A. Shutemov 
614779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
615779750d2SKirill A. Shutemov 		struct shrink_control *sc)
616779750d2SKirill A. Shutemov {
617779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
618779750d2SKirill A. Shutemov 
619779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
620779750d2SKirill A. Shutemov 		return SHRINK_STOP;
621779750d2SKirill A. Shutemov 
622779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
623779750d2SKirill A. Shutemov }
624779750d2SKirill A. Shutemov 
625779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
626779750d2SKirill A. Shutemov 		struct shrink_control *sc)
627779750d2SKirill A. Shutemov {
628779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
629779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
630779750d2SKirill A. Shutemov }
631396bcc52SMatthew Wilcox (Oracle) #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
6325a6e75f8SKirill A. Shutemov 
6335a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
6345a6e75f8SKirill A. Shutemov 
635779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
636779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
637779750d2SKirill A. Shutemov {
638779750d2SKirill A. Shutemov 	return 0;
639779750d2SKirill A. Shutemov }
640396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
6415a6e75f8SKirill A. Shutemov 
64289fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
64389fdcd26SYang Shi {
644396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
64589fdcd26SYang Shi 	    (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
64689fdcd26SYang Shi 	    shmem_huge != SHMEM_HUGE_DENY)
64789fdcd26SYang Shi 		return true;
64889fdcd26SYang Shi 	return false;
64989fdcd26SYang Shi }
65089fdcd26SYang Shi 
6515a6e75f8SKirill A. Shutemov /*
65246f65ec1SHugh Dickins  * Like add_to_page_cache_locked, but error if expected item has gone.
65346f65ec1SHugh Dickins  */
65446f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page,
65546f65ec1SHugh Dickins 				   struct address_space *mapping,
6563fea5a49SJohannes Weiner 				   pgoff_t index, void *expected, gfp_t gfp,
6573fea5a49SJohannes Weiner 				   struct mm_struct *charge_mm)
65846f65ec1SHugh Dickins {
659552446a4SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
660552446a4SMatthew Wilcox 	unsigned long i = 0;
661d8c6546bSMatthew Wilcox (Oracle) 	unsigned long nr = compound_nr(page);
6623fea5a49SJohannes Weiner 	int error;
66346f65ec1SHugh Dickins 
664800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
665800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
666309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
667309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
668800d8c63SKirill A. Shutemov 	VM_BUG_ON(expected && PageTransHuge(page));
66946f65ec1SHugh Dickins 
670800d8c63SKirill A. Shutemov 	page_ref_add(page, nr);
67146f65ec1SHugh Dickins 	page->mapping = mapping;
67246f65ec1SHugh Dickins 	page->index = index;
67346f65ec1SHugh Dickins 
6744c6355b2SJohannes Weiner 	if (!PageSwapCache(page)) {
675d9eb1ea2SJohannes Weiner 		error = mem_cgroup_charge(page, charge_mm, gfp);
6763fea5a49SJohannes Weiner 		if (error) {
6774c6355b2SJohannes Weiner 			if (PageTransHuge(page)) {
6783fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK);
6793fea5a49SJohannes Weiner 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
6803fea5a49SJohannes Weiner 			}
6813fea5a49SJohannes Weiner 			goto error;
6823fea5a49SJohannes Weiner 		}
6834c6355b2SJohannes Weiner 	}
6843fea5a49SJohannes Weiner 	cgroup_throttle_swaprate(page, gfp);
6853fea5a49SJohannes Weiner 
686552446a4SMatthew Wilcox 	do {
687552446a4SMatthew Wilcox 		void *entry;
688552446a4SMatthew Wilcox 		xas_lock_irq(&xas);
689552446a4SMatthew Wilcox 		entry = xas_find_conflict(&xas);
690552446a4SMatthew Wilcox 		if (entry != expected)
691552446a4SMatthew Wilcox 			xas_set_err(&xas, -EEXIST);
692552446a4SMatthew Wilcox 		xas_create_range(&xas);
693552446a4SMatthew Wilcox 		if (xas_error(&xas))
694552446a4SMatthew Wilcox 			goto unlock;
695552446a4SMatthew Wilcox next:
6964101196bSMatthew Wilcox (Oracle) 		xas_store(&xas, page);
697552446a4SMatthew Wilcox 		if (++i < nr) {
698552446a4SMatthew Wilcox 			xas_next(&xas);
699552446a4SMatthew Wilcox 			goto next;
700552446a4SMatthew Wilcox 		}
701800d8c63SKirill A. Shutemov 		if (PageTransHuge(page)) {
702800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
70311fb9989SMel Gorman 			__inc_node_page_state(page, NR_SHMEM_THPS);
704552446a4SMatthew Wilcox 		}
705552446a4SMatthew Wilcox 		mapping->nrpages += nr;
7060d1c2072SJohannes Weiner 		__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
7070d1c2072SJohannes Weiner 		__mod_lruvec_page_state(page, NR_SHMEM, nr);
708552446a4SMatthew Wilcox unlock:
709552446a4SMatthew Wilcox 		xas_unlock_irq(&xas);
710552446a4SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
711552446a4SMatthew Wilcox 
712552446a4SMatthew Wilcox 	if (xas_error(&xas)) {
7133fea5a49SJohannes Weiner 		error = xas_error(&xas);
7143fea5a49SJohannes Weiner 		goto error;
71546f65ec1SHugh Dickins 	}
716552446a4SMatthew Wilcox 
717552446a4SMatthew Wilcox 	return 0;
7183fea5a49SJohannes Weiner error:
7193fea5a49SJohannes Weiner 	page->mapping = NULL;
7203fea5a49SJohannes Weiner 	page_ref_sub(page, nr);
7213fea5a49SJohannes Weiner 	return error;
72246f65ec1SHugh Dickins }
72346f65ec1SHugh Dickins 
72446f65ec1SHugh Dickins /*
7256922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
7266922c0c7SHugh Dickins  */
7276922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
7286922c0c7SHugh Dickins {
7296922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
7306922c0c7SHugh Dickins 	int error;
7316922c0c7SHugh Dickins 
732800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
733800d8c63SKirill A. Shutemov 
734b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
73562f945b6SMatthew Wilcox 	error = shmem_replace_entry(mapping, page->index, page, radswap);
7366922c0c7SHugh Dickins 	page->mapping = NULL;
7376922c0c7SHugh Dickins 	mapping->nrpages--;
7380d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_FILE_PAGES);
7390d1c2072SJohannes Weiner 	__dec_lruvec_page_state(page, NR_SHMEM);
740b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
74109cbfeafSKirill A. Shutemov 	put_page(page);
7426922c0c7SHugh Dickins 	BUG_ON(error);
7436922c0c7SHugh Dickins }
7446922c0c7SHugh Dickins 
7456922c0c7SHugh Dickins /*
746c121d3bbSMatthew Wilcox  * Remove swap entry from page cache, free the swap and its page cache.
7477a5d0fbbSHugh Dickins  */
7487a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
7497a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
7507a5d0fbbSHugh Dickins {
7516dbaf22cSJohannes Weiner 	void *old;
7527a5d0fbbSHugh Dickins 
75355f3f7eaSMatthew Wilcox 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
7546dbaf22cSJohannes Weiner 	if (old != radswap)
7556dbaf22cSJohannes Weiner 		return -ENOENT;
7567a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
7576dbaf22cSJohannes Weiner 	return 0;
7587a5d0fbbSHugh Dickins }
7597a5d0fbbSHugh Dickins 
7607a5d0fbbSHugh Dickins /*
7616a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
76248131e03SVlastimil Babka  * given offsets are swapped out.
7636a15a370SVlastimil Babka  *
764b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
7656a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
7666a15a370SVlastimil Babka  */
76748131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
76848131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
7696a15a370SVlastimil Babka {
7707ae3424fSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
7716a15a370SVlastimil Babka 	struct page *page;
77248131e03SVlastimil Babka 	unsigned long swapped = 0;
7736a15a370SVlastimil Babka 
7746a15a370SVlastimil Babka 	rcu_read_lock();
7757ae3424fSMatthew Wilcox 	xas_for_each(&xas, page, end - 1) {
7767ae3424fSMatthew Wilcox 		if (xas_retry(&xas, page))
7772cf938aaSMatthew Wilcox 			continue;
7783159f943SMatthew Wilcox 		if (xa_is_value(page))
7796a15a370SVlastimil Babka 			swapped++;
7806a15a370SVlastimil Babka 
7816a15a370SVlastimil Babka 		if (need_resched()) {
7827ae3424fSMatthew Wilcox 			xas_pause(&xas);
7836a15a370SVlastimil Babka 			cond_resched_rcu();
7846a15a370SVlastimil Babka 		}
7856a15a370SVlastimil Babka 	}
7866a15a370SVlastimil Babka 
7876a15a370SVlastimil Babka 	rcu_read_unlock();
7886a15a370SVlastimil Babka 
7896a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
7906a15a370SVlastimil Babka }
7916a15a370SVlastimil Babka 
7926a15a370SVlastimil Babka /*
79348131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
79448131e03SVlastimil Babka  * given vma is swapped out.
79548131e03SVlastimil Babka  *
796b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
79748131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
79848131e03SVlastimil Babka  */
79948131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
80048131e03SVlastimil Babka {
80148131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
80248131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
80348131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
80448131e03SVlastimil Babka 	unsigned long swapped;
80548131e03SVlastimil Babka 
80648131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
80748131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
80848131e03SVlastimil Babka 
80948131e03SVlastimil Babka 	/*
81048131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
81148131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
81248131e03SVlastimil Babka 	 * already track.
81348131e03SVlastimil Babka 	 */
81448131e03SVlastimil Babka 	if (!swapped)
81548131e03SVlastimil Babka 		return 0;
81648131e03SVlastimil Babka 
81748131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
81848131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
81948131e03SVlastimil Babka 
82048131e03SVlastimil Babka 	/* Here comes the more involved part */
82148131e03SVlastimil Babka 	return shmem_partial_swap_usage(mapping,
82248131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_start),
82348131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_end));
82448131e03SVlastimil Babka }
82548131e03SVlastimil Babka 
82648131e03SVlastimil Babka /*
82724513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
82824513264SHugh Dickins  */
82924513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
83024513264SHugh Dickins {
83124513264SHugh Dickins 	struct pagevec pvec;
83224513264SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
83324513264SHugh Dickins 	pgoff_t index = 0;
83424513264SHugh Dickins 
83586679820SMel Gorman 	pagevec_init(&pvec);
83624513264SHugh Dickins 	/*
83724513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
83824513264SHugh Dickins 	 */
83924513264SHugh Dickins 	while (!mapping_unevictable(mapping)) {
84024513264SHugh Dickins 		/*
84124513264SHugh Dickins 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
84224513264SHugh Dickins 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
84324513264SHugh Dickins 		 */
8440cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
84524513264SHugh Dickins 					   PAGEVEC_SIZE, pvec.pages, indices);
84624513264SHugh Dickins 		if (!pvec.nr)
84724513264SHugh Dickins 			break;
84824513264SHugh Dickins 		index = indices[pvec.nr - 1] + 1;
8490cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
85064e3d12fSKuo-Hsin Yang 		check_move_unevictable_pages(&pvec);
85124513264SHugh Dickins 		pagevec_release(&pvec);
85224513264SHugh Dickins 		cond_resched();
85324513264SHugh Dickins 	}
8547a5d0fbbSHugh Dickins }
8557a5d0fbbSHugh Dickins 
8567a5d0fbbSHugh Dickins /*
85771725ed1SHugh Dickins  * Check whether a hole-punch or truncation needs to split a huge page,
85871725ed1SHugh Dickins  * returning true if no split was required, or the split has been successful.
85971725ed1SHugh Dickins  *
86071725ed1SHugh Dickins  * Eviction (or truncation to 0 size) should never need to split a huge page;
86171725ed1SHugh Dickins  * but in rare cases might do so, if shmem_undo_range() failed to trylock on
86271725ed1SHugh Dickins  * head, and then succeeded to trylock on tail.
86371725ed1SHugh Dickins  *
86471725ed1SHugh Dickins  * A split can only succeed when there are no additional references on the
86571725ed1SHugh Dickins  * huge page: so the split below relies upon find_get_entries() having stopped
86671725ed1SHugh Dickins  * when it found a subpage of the huge page, without getting further references.
86771725ed1SHugh Dickins  */
86871725ed1SHugh Dickins static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
86971725ed1SHugh Dickins {
87071725ed1SHugh Dickins 	if (!PageTransCompound(page))
87171725ed1SHugh Dickins 		return true;
87271725ed1SHugh Dickins 
87371725ed1SHugh Dickins 	/* Just proceed to delete a huge page wholly within the range punched */
87471725ed1SHugh Dickins 	if (PageHead(page) &&
87571725ed1SHugh Dickins 	    page->index >= start && page->index + HPAGE_PMD_NR <= end)
87671725ed1SHugh Dickins 		return true;
87771725ed1SHugh Dickins 
87871725ed1SHugh Dickins 	/* Try to split huge page, so we can truly punch the hole or truncate */
87971725ed1SHugh Dickins 	return split_huge_page(page) >= 0;
88071725ed1SHugh Dickins }
88171725ed1SHugh Dickins 
88271725ed1SHugh Dickins /*
8837f4446eeSMatthew Wilcox  * Remove range of pages and swap entries from page cache, and free them.
8841635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
8857a5d0fbbSHugh Dickins  */
8861635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
8871635f6a7SHugh Dickins 								 bool unfalloc)
8881da177e4SLinus Torvalds {
889285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
8901da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
89109cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
89209cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
89309cbfeafSKirill A. Shutemov 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
89409cbfeafSKirill A. Shutemov 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
895bda97eabSHugh Dickins 	struct pagevec pvec;
8967a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
8977a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
898285b2c4fSHugh Dickins 	pgoff_t index;
899bda97eabSHugh Dickins 	int i;
9001da177e4SLinus Torvalds 
90183e4fa9cSHugh Dickins 	if (lend == -1)
90283e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
903bda97eabSHugh Dickins 
90486679820SMel Gorman 	pagevec_init(&pvec);
905bda97eabSHugh Dickins 	index = start;
90683e4fa9cSHugh Dickins 	while (index < end) {
9070cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
90883e4fa9cSHugh Dickins 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
9097a5d0fbbSHugh Dickins 			pvec.pages, indices);
9107a5d0fbbSHugh Dickins 		if (!pvec.nr)
9117a5d0fbbSHugh Dickins 			break;
912bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
913bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
914bda97eabSHugh Dickins 
9157a5d0fbbSHugh Dickins 			index = indices[i];
91683e4fa9cSHugh Dickins 			if (index >= end)
917bda97eabSHugh Dickins 				break;
918bda97eabSHugh Dickins 
9193159f943SMatthew Wilcox 			if (xa_is_value(page)) {
9201635f6a7SHugh Dickins 				if (unfalloc)
9211635f6a7SHugh Dickins 					continue;
9227a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
9237a5d0fbbSHugh Dickins 								index, page);
9247a5d0fbbSHugh Dickins 				continue;
9257a5d0fbbSHugh Dickins 			}
9267a5d0fbbSHugh Dickins 
927800d8c63SKirill A. Shutemov 			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
928800d8c63SKirill A. Shutemov 
929bda97eabSHugh Dickins 			if (!trylock_page(page))
930bda97eabSHugh Dickins 				continue;
931800d8c63SKirill A. Shutemov 
93271725ed1SHugh Dickins 			if ((!unfalloc || !PageUptodate(page)) &&
93371725ed1SHugh Dickins 			    page_mapping(page) == mapping) {
934309381feSSasha Levin 				VM_BUG_ON_PAGE(PageWriteback(page), page);
93571725ed1SHugh Dickins 				if (shmem_punch_compound(page, start, end))
936bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
9377a5d0fbbSHugh Dickins 			}
938bda97eabSHugh Dickins 			unlock_page(page);
939bda97eabSHugh Dickins 		}
9400cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
94124513264SHugh Dickins 		pagevec_release(&pvec);
942bda97eabSHugh Dickins 		cond_resched();
943bda97eabSHugh Dickins 		index++;
944bda97eabSHugh Dickins 	}
945bda97eabSHugh Dickins 
94683e4fa9cSHugh Dickins 	if (partial_start) {
947bda97eabSHugh Dickins 		struct page *page = NULL;
9489e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, start - 1, &page, SGP_READ);
949bda97eabSHugh Dickins 		if (page) {
95009cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
95183e4fa9cSHugh Dickins 			if (start > end) {
95283e4fa9cSHugh Dickins 				top = partial_end;
95383e4fa9cSHugh Dickins 				partial_end = 0;
95483e4fa9cSHugh Dickins 			}
95583e4fa9cSHugh Dickins 			zero_user_segment(page, partial_start, top);
956bda97eabSHugh Dickins 			set_page_dirty(page);
957bda97eabSHugh Dickins 			unlock_page(page);
95809cbfeafSKirill A. Shutemov 			put_page(page);
959bda97eabSHugh Dickins 		}
960bda97eabSHugh Dickins 	}
96183e4fa9cSHugh Dickins 	if (partial_end) {
96283e4fa9cSHugh Dickins 		struct page *page = NULL;
9639e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, end, &page, SGP_READ);
96483e4fa9cSHugh Dickins 		if (page) {
96583e4fa9cSHugh Dickins 			zero_user_segment(page, 0, partial_end);
96683e4fa9cSHugh Dickins 			set_page_dirty(page);
96783e4fa9cSHugh Dickins 			unlock_page(page);
96809cbfeafSKirill A. Shutemov 			put_page(page);
96983e4fa9cSHugh Dickins 		}
97083e4fa9cSHugh Dickins 	}
97183e4fa9cSHugh Dickins 	if (start >= end)
97283e4fa9cSHugh Dickins 		return;
973bda97eabSHugh Dickins 
974bda97eabSHugh Dickins 	index = start;
975b1a36650SHugh Dickins 	while (index < end) {
976bda97eabSHugh Dickins 		cond_resched();
9770cd6144aSJohannes Weiner 
9780cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
97983e4fa9cSHugh Dickins 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
9807a5d0fbbSHugh Dickins 				pvec.pages, indices);
9817a5d0fbbSHugh Dickins 		if (!pvec.nr) {
982b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
983b1a36650SHugh Dickins 			if (index == start || end != -1)
984bda97eabSHugh Dickins 				break;
985b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
986bda97eabSHugh Dickins 			index = start;
987bda97eabSHugh Dickins 			continue;
988bda97eabSHugh Dickins 		}
989bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
990bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
991bda97eabSHugh Dickins 
9927a5d0fbbSHugh Dickins 			index = indices[i];
99383e4fa9cSHugh Dickins 			if (index >= end)
994bda97eabSHugh Dickins 				break;
995bda97eabSHugh Dickins 
9963159f943SMatthew Wilcox 			if (xa_is_value(page)) {
9971635f6a7SHugh Dickins 				if (unfalloc)
9981635f6a7SHugh Dickins 					continue;
999b1a36650SHugh Dickins 				if (shmem_free_swap(mapping, index, page)) {
1000b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
1001b1a36650SHugh Dickins 					index--;
1002b1a36650SHugh Dickins 					break;
1003b1a36650SHugh Dickins 				}
1004b1a36650SHugh Dickins 				nr_swaps_freed++;
10057a5d0fbbSHugh Dickins 				continue;
10067a5d0fbbSHugh Dickins 			}
10077a5d0fbbSHugh Dickins 
1008bda97eabSHugh Dickins 			lock_page(page);
1009800d8c63SKirill A. Shutemov 
10101635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
101171725ed1SHugh Dickins 				if (page_mapping(page) != mapping) {
1012b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
1013b1a36650SHugh Dickins 					unlock_page(page);
1014b1a36650SHugh Dickins 					index--;
1015b1a36650SHugh Dickins 					break;
10167a5d0fbbSHugh Dickins 				}
101771725ed1SHugh Dickins 				VM_BUG_ON_PAGE(PageWriteback(page), page);
101871725ed1SHugh Dickins 				if (shmem_punch_compound(page, start, end))
101971725ed1SHugh Dickins 					truncate_inode_page(mapping, page);
10200783ac95SHugh Dickins 				else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
102171725ed1SHugh Dickins 					/* Wipe the page and don't get stuck */
102271725ed1SHugh Dickins 					clear_highpage(page);
102371725ed1SHugh Dickins 					flush_dcache_page(page);
102471725ed1SHugh Dickins 					set_page_dirty(page);
102571725ed1SHugh Dickins 					if (index <
102671725ed1SHugh Dickins 					    round_up(start, HPAGE_PMD_NR))
102771725ed1SHugh Dickins 						start = index + 1;
102871725ed1SHugh Dickins 				}
10291635f6a7SHugh Dickins 			}
1030bda97eabSHugh Dickins 			unlock_page(page);
1031bda97eabSHugh Dickins 		}
10320cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
103324513264SHugh Dickins 		pagevec_release(&pvec);
1034bda97eabSHugh Dickins 		index++;
1035bda97eabSHugh Dickins 	}
103694c1e62dSHugh Dickins 
10374595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
10387a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
10391da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
10404595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
10411635f6a7SHugh Dickins }
10421da177e4SLinus Torvalds 
10431635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
10441635f6a7SHugh Dickins {
10451635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
1046078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
10471da177e4SLinus Torvalds }
104894c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
10491da177e4SLinus Torvalds 
1050a528d35eSDavid Howells static int shmem_getattr(const struct path *path, struct kstat *stat,
1051a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
105244a30220SYu Zhao {
1053a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
105444a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
105589fdcd26SYang Shi 	struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
105644a30220SYu Zhao 
1057d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
10584595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
105944a30220SYu Zhao 		shmem_recalc_inode(inode);
10604595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1061d0424c42SHugh Dickins 	}
106244a30220SYu Zhao 	generic_fillattr(inode, stat);
106389fdcd26SYang Shi 
106489fdcd26SYang Shi 	if (is_huge_enabled(sb_info))
106589fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
106689fdcd26SYang Shi 
106744a30220SYu Zhao 	return 0;
106844a30220SYu Zhao }
106944a30220SYu Zhao 
107094c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
10711da177e4SLinus Torvalds {
107275c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
107340e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
1074779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
10751da177e4SLinus Torvalds 	int error;
10761da177e4SLinus Torvalds 
107731051c85SJan Kara 	error = setattr_prepare(dentry, attr);
1078db78b877SChristoph Hellwig 	if (error)
1079db78b877SChristoph Hellwig 		return error;
1080db78b877SChristoph Hellwig 
108194c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
108294c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
108394c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
10843889e6e7Snpiggin@suse.de 
108540e041a2SDavid Herrmann 		/* protected by i_mutex */
108640e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
108740e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
108840e041a2SDavid Herrmann 			return -EPERM;
108940e041a2SDavid Herrmann 
109094c1e62dSHugh Dickins 		if (newsize != oldsize) {
109177142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
109277142517SKonstantin Khlebnikov 					oldsize, newsize);
109377142517SKonstantin Khlebnikov 			if (error)
109477142517SKonstantin Khlebnikov 				return error;
109594c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1096078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
109794c1e62dSHugh Dickins 		}
1098afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
109994c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1100d0424c42SHugh Dickins 			if (oldsize > holebegin)
1101d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1102d0424c42SHugh Dickins 							holebegin, 0, 1);
1103d0424c42SHugh Dickins 			if (info->alloced)
1104d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1105d0424c42SHugh Dickins 							newsize, (loff_t)-1);
110694c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1107d0424c42SHugh Dickins 			if (oldsize > holebegin)
1108d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1109d0424c42SHugh Dickins 							holebegin, 0, 1);
1110779750d2SKirill A. Shutemov 
1111779750d2SKirill A. Shutemov 			/*
1112779750d2SKirill A. Shutemov 			 * Part of the huge page can be beyond i_size: subject
1113779750d2SKirill A. Shutemov 			 * to shrink under memory pressure.
1114779750d2SKirill A. Shutemov 			 */
1115396bcc52SMatthew Wilcox (Oracle) 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1116779750d2SKirill A. Shutemov 				spin_lock(&sbinfo->shrinklist_lock);
1117d041353dSCong Wang 				/*
1118d041353dSCong Wang 				 * _careful to defend against unlocked access to
1119d041353dSCong Wang 				 * ->shrink_list in shmem_unused_huge_shrink()
1120d041353dSCong Wang 				 */
1121d041353dSCong Wang 				if (list_empty_careful(&info->shrinklist)) {
1122779750d2SKirill A. Shutemov 					list_add_tail(&info->shrinklist,
1123779750d2SKirill A. Shutemov 							&sbinfo->shrinklist);
1124779750d2SKirill A. Shutemov 					sbinfo->shrinklist_len++;
1125779750d2SKirill A. Shutemov 				}
1126779750d2SKirill A. Shutemov 				spin_unlock(&sbinfo->shrinklist_lock);
1127779750d2SKirill A. Shutemov 			}
112894c1e62dSHugh Dickins 		}
11291da177e4SLinus Torvalds 	}
11301da177e4SLinus Torvalds 
11316a1a90adSChristoph Hellwig 	setattr_copy(inode, attr);
1132db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1133feda821eSChristoph Hellwig 		error = posix_acl_chmod(inode, inode->i_mode);
11341da177e4SLinus Torvalds 	return error;
11351da177e4SLinus Torvalds }
11361da177e4SLinus Torvalds 
11371f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
11381da177e4SLinus Torvalds {
11391da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1140779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
11411da177e4SLinus Torvalds 
11423889e6e7Snpiggin@suse.de 	if (inode->i_mapping->a_ops == &shmem_aops) {
11431da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
11441da177e4SLinus Torvalds 		inode->i_size = 0;
11453889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1146779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1147779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1148779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1149779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1150779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1151779750d2SKirill A. Shutemov 			}
1152779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1153779750d2SKirill A. Shutemov 		}
1154af53d3e9SHugh Dickins 		while (!list_empty(&info->swaplist)) {
1155af53d3e9SHugh Dickins 			/* Wait while shmem_unuse() is scanning this inode... */
1156af53d3e9SHugh Dickins 			wait_var_event(&info->stop_eviction,
1157af53d3e9SHugh Dickins 				       !atomic_read(&info->stop_eviction));
1158cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
1159af53d3e9SHugh Dickins 			/* ...but beware of the race if we peeked too early */
1160af53d3e9SHugh Dickins 			if (!atomic_read(&info->stop_eviction))
11611da177e4SLinus Torvalds 				list_del_init(&info->swaplist);
1162cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
11631da177e4SLinus Torvalds 		}
11643ed47db3SAl Viro 	}
1165b09e0fa4SEric Paris 
116638f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
11670f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
11685b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1169dbd5768fSJan Kara 	clear_inode(inode);
11701da177e4SLinus Torvalds }
11711da177e4SLinus Torvalds 
1172b56a2d8aSVineeth Remanan Pillai extern struct swap_info_struct *swap_info[];
1173b56a2d8aSVineeth Remanan Pillai 
1174b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1175b56a2d8aSVineeth Remanan Pillai 				   pgoff_t start, unsigned int nr_entries,
1176b56a2d8aSVineeth Remanan Pillai 				   struct page **entries, pgoff_t *indices,
117787039546SHugh Dickins 				   unsigned int type, bool frontswap)
1178478922e2SMatthew Wilcox {
1179b56a2d8aSVineeth Remanan Pillai 	XA_STATE(xas, &mapping->i_pages, start);
1180b56a2d8aSVineeth Remanan Pillai 	struct page *page;
118187039546SHugh Dickins 	swp_entry_t entry;
1182b56a2d8aSVineeth Remanan Pillai 	unsigned int ret = 0;
1183b56a2d8aSVineeth Remanan Pillai 
1184b56a2d8aSVineeth Remanan Pillai 	if (!nr_entries)
1185b56a2d8aSVineeth Remanan Pillai 		return 0;
1186478922e2SMatthew Wilcox 
1187478922e2SMatthew Wilcox 	rcu_read_lock();
1188b56a2d8aSVineeth Remanan Pillai 	xas_for_each(&xas, page, ULONG_MAX) {
1189b56a2d8aSVineeth Remanan Pillai 		if (xas_retry(&xas, page))
11905b9c98f3SMike Kravetz 			continue;
1191b56a2d8aSVineeth Remanan Pillai 
1192b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1193478922e2SMatthew Wilcox 			continue;
1194b56a2d8aSVineeth Remanan Pillai 
119587039546SHugh Dickins 		entry = radix_to_swp_entry(page);
119687039546SHugh Dickins 		if (swp_type(entry) != type)
1197b56a2d8aSVineeth Remanan Pillai 			continue;
119887039546SHugh Dickins 		if (frontswap &&
119987039546SHugh Dickins 		    !frontswap_test(swap_info[type], swp_offset(entry)))
120087039546SHugh Dickins 			continue;
1201b56a2d8aSVineeth Remanan Pillai 
1202b56a2d8aSVineeth Remanan Pillai 		indices[ret] = xas.xa_index;
1203b56a2d8aSVineeth Remanan Pillai 		entries[ret] = page;
1204b56a2d8aSVineeth Remanan Pillai 
1205b56a2d8aSVineeth Remanan Pillai 		if (need_resched()) {
1206e21a2955SMatthew Wilcox 			xas_pause(&xas);
1207478922e2SMatthew Wilcox 			cond_resched_rcu();
1208478922e2SMatthew Wilcox 		}
1209b56a2d8aSVineeth Remanan Pillai 		if (++ret == nr_entries)
1210b56a2d8aSVineeth Remanan Pillai 			break;
1211b56a2d8aSVineeth Remanan Pillai 	}
1212478922e2SMatthew Wilcox 	rcu_read_unlock();
1213e21a2955SMatthew Wilcox 
1214b56a2d8aSVineeth Remanan Pillai 	return ret;
1215b56a2d8aSVineeth Remanan Pillai }
1216b56a2d8aSVineeth Remanan Pillai 
1217b56a2d8aSVineeth Remanan Pillai /*
1218b56a2d8aSVineeth Remanan Pillai  * Move the swapped pages for an inode to page cache. Returns the count
1219b56a2d8aSVineeth Remanan Pillai  * of pages swapped in, or the error in case of failure.
1220b56a2d8aSVineeth Remanan Pillai  */
1221b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1222b56a2d8aSVineeth Remanan Pillai 				    pgoff_t *indices)
1223b56a2d8aSVineeth Remanan Pillai {
1224b56a2d8aSVineeth Remanan Pillai 	int i = 0;
1225b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
1226b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1227b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1228b56a2d8aSVineeth Remanan Pillai 
1229b56a2d8aSVineeth Remanan Pillai 	for (i = 0; i < pvec.nr; i++) {
1230b56a2d8aSVineeth Remanan Pillai 		struct page *page = pvec.pages[i];
1231b56a2d8aSVineeth Remanan Pillai 
1232b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1233b56a2d8aSVineeth Remanan Pillai 			continue;
1234b56a2d8aSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, indices[i],
1235b56a2d8aSVineeth Remanan Pillai 					  &page, SGP_CACHE,
1236b56a2d8aSVineeth Remanan Pillai 					  mapping_gfp_mask(mapping),
1237b56a2d8aSVineeth Remanan Pillai 					  NULL, NULL);
1238b56a2d8aSVineeth Remanan Pillai 		if (error == 0) {
1239b56a2d8aSVineeth Remanan Pillai 			unlock_page(page);
1240b56a2d8aSVineeth Remanan Pillai 			put_page(page);
1241b56a2d8aSVineeth Remanan Pillai 			ret++;
1242b56a2d8aSVineeth Remanan Pillai 		}
1243b56a2d8aSVineeth Remanan Pillai 		if (error == -ENOMEM)
1244b56a2d8aSVineeth Remanan Pillai 			break;
1245b56a2d8aSVineeth Remanan Pillai 		error = 0;
1246b56a2d8aSVineeth Remanan Pillai 	}
1247b56a2d8aSVineeth Remanan Pillai 	return error ? error : ret;
1248478922e2SMatthew Wilcox }
1249478922e2SMatthew Wilcox 
125046f65ec1SHugh Dickins /*
125146f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
125246f65ec1SHugh Dickins  */
1253b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1254b56a2d8aSVineeth Remanan Pillai 			     bool frontswap, unsigned long *fs_pages_to_unuse)
12551da177e4SLinus Torvalds {
1256b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1257b56a2d8aSVineeth Remanan Pillai 	pgoff_t start = 0;
1258b56a2d8aSVineeth Remanan Pillai 	struct pagevec pvec;
1259b56a2d8aSVineeth Remanan Pillai 	pgoff_t indices[PAGEVEC_SIZE];
1260b56a2d8aSVineeth Remanan Pillai 	bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1261b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
12621da177e4SLinus Torvalds 
1263b56a2d8aSVineeth Remanan Pillai 	pagevec_init(&pvec);
1264b56a2d8aSVineeth Remanan Pillai 	do {
1265b56a2d8aSVineeth Remanan Pillai 		unsigned int nr_entries = PAGEVEC_SIZE;
12662e0e26c7SHugh Dickins 
1267b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1268b56a2d8aSVineeth Remanan Pillai 			nr_entries = *fs_pages_to_unuse;
12692e0e26c7SHugh Dickins 
1270b56a2d8aSVineeth Remanan Pillai 		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1271b56a2d8aSVineeth Remanan Pillai 						  pvec.pages, indices,
127287039546SHugh Dickins 						  type, frontswap);
1273b56a2d8aSVineeth Remanan Pillai 		if (pvec.nr == 0) {
1274b56a2d8aSVineeth Remanan Pillai 			ret = 0;
1275778dd893SHugh Dickins 			break;
1276b56a2d8aSVineeth Remanan Pillai 		}
1277b56a2d8aSVineeth Remanan Pillai 
1278b56a2d8aSVineeth Remanan Pillai 		ret = shmem_unuse_swap_entries(inode, pvec, indices);
1279b56a2d8aSVineeth Remanan Pillai 		if (ret < 0)
1280b56a2d8aSVineeth Remanan Pillai 			break;
1281b56a2d8aSVineeth Remanan Pillai 
1282b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial) {
1283b56a2d8aSVineeth Remanan Pillai 			*fs_pages_to_unuse -= ret;
1284b56a2d8aSVineeth Remanan Pillai 			if (*fs_pages_to_unuse == 0) {
1285b56a2d8aSVineeth Remanan Pillai 				ret = FRONTSWAP_PAGES_UNUSED;
1286b56a2d8aSVineeth Remanan Pillai 				break;
1287b56a2d8aSVineeth Remanan Pillai 			}
1288b56a2d8aSVineeth Remanan Pillai 		}
1289b56a2d8aSVineeth Remanan Pillai 
1290b56a2d8aSVineeth Remanan Pillai 		start = indices[pvec.nr - 1];
1291b56a2d8aSVineeth Remanan Pillai 	} while (true);
1292b56a2d8aSVineeth Remanan Pillai 
1293b56a2d8aSVineeth Remanan Pillai 	return ret;
1294b56a2d8aSVineeth Remanan Pillai }
1295b56a2d8aSVineeth Remanan Pillai 
1296b56a2d8aSVineeth Remanan Pillai /*
1297b56a2d8aSVineeth Remanan Pillai  * Read all the shared memory data that resides in the swap
1298b56a2d8aSVineeth Remanan Pillai  * device 'type' back into memory, so the swap device can be
1299b56a2d8aSVineeth Remanan Pillai  * unused.
1300b56a2d8aSVineeth Remanan Pillai  */
1301b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
1302b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
1303b56a2d8aSVineeth Remanan Pillai {
1304b56a2d8aSVineeth Remanan Pillai 	struct shmem_inode_info *info, *next;
1305b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1306b56a2d8aSVineeth Remanan Pillai 
1307b56a2d8aSVineeth Remanan Pillai 	if (list_empty(&shmem_swaplist))
1308b56a2d8aSVineeth Remanan Pillai 		return 0;
1309b56a2d8aSVineeth Remanan Pillai 
1310b56a2d8aSVineeth Remanan Pillai 	mutex_lock(&shmem_swaplist_mutex);
1311b56a2d8aSVineeth Remanan Pillai 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1312b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped) {
1313b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1314b56a2d8aSVineeth Remanan Pillai 			continue;
1315b56a2d8aSVineeth Remanan Pillai 		}
1316af53d3e9SHugh Dickins 		/*
1317af53d3e9SHugh Dickins 		 * Drop the swaplist mutex while searching the inode for swap;
1318af53d3e9SHugh Dickins 		 * but before doing so, make sure shmem_evict_inode() will not
1319af53d3e9SHugh Dickins 		 * remove placeholder inode from swaplist, nor let it be freed
1320af53d3e9SHugh Dickins 		 * (igrab() would protect from unlink, but not from unmount).
1321af53d3e9SHugh Dickins 		 */
1322af53d3e9SHugh Dickins 		atomic_inc(&info->stop_eviction);
1323b56a2d8aSVineeth Remanan Pillai 		mutex_unlock(&shmem_swaplist_mutex);
1324b56a2d8aSVineeth Remanan Pillai 
1325af53d3e9SHugh Dickins 		error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1326b56a2d8aSVineeth Remanan Pillai 					  fs_pages_to_unuse);
1327b56a2d8aSVineeth Remanan Pillai 		cond_resched();
1328b56a2d8aSVineeth Remanan Pillai 
1329b56a2d8aSVineeth Remanan Pillai 		mutex_lock(&shmem_swaplist_mutex);
1330b56a2d8aSVineeth Remanan Pillai 		next = list_next_entry(info, swaplist);
1331b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped)
1332b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1333af53d3e9SHugh Dickins 		if (atomic_dec_and_test(&info->stop_eviction))
1334af53d3e9SHugh Dickins 			wake_up_var(&info->stop_eviction);
1335b56a2d8aSVineeth Remanan Pillai 		if (error)
1336b56a2d8aSVineeth Remanan Pillai 			break;
13371da177e4SLinus Torvalds 	}
1338cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1339778dd893SHugh Dickins 
1340778dd893SHugh Dickins 	return error;
13411da177e4SLinus Torvalds }
13421da177e4SLinus Torvalds 
13431da177e4SLinus Torvalds /*
13441da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
13451da177e4SLinus Torvalds  */
13461da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
13471da177e4SLinus Torvalds {
13481da177e4SLinus Torvalds 	struct shmem_inode_info *info;
13491da177e4SLinus Torvalds 	struct address_space *mapping;
13501da177e4SLinus Torvalds 	struct inode *inode;
13516922c0c7SHugh Dickins 	swp_entry_t swap;
13526922c0c7SHugh Dickins 	pgoff_t index;
13531da177e4SLinus Torvalds 
1354800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
13551da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
13561da177e4SLinus Torvalds 	mapping = page->mapping;
13571da177e4SLinus Torvalds 	index = page->index;
13581da177e4SLinus Torvalds 	inode = mapping->host;
13591da177e4SLinus Torvalds 	info = SHMEM_I(inode);
13601da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
13611da177e4SLinus Torvalds 		goto redirty;
1362d9fe526aSHugh Dickins 	if (!total_swap_pages)
13631da177e4SLinus Torvalds 		goto redirty;
13641da177e4SLinus Torvalds 
1365d9fe526aSHugh Dickins 	/*
136697b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
136797b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
136897b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
136997b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
137097b713baSChristoph Hellwig 	 * threads or sync.
1371d9fe526aSHugh Dickins 	 */
137248f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
137348f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
137448f170fbSHugh Dickins 		goto redirty;
137548f170fbSHugh Dickins 	}
13761635f6a7SHugh Dickins 
13771635f6a7SHugh Dickins 	/*
13781635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13791635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
13801635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
13811aac1400SHugh Dickins 	 *
13821aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
13831aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
13841aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
13851aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
13861aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
13871635f6a7SHugh Dickins 	 */
13881635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
13891aac1400SHugh Dickins 		if (inode->i_private) {
13901aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
13911aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
13921aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
13931aac1400SHugh Dickins 			if (shmem_falloc &&
13948e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
13951aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
13961aac1400SHugh Dickins 			    index < shmem_falloc->next)
13971aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
13981aac1400SHugh Dickins 			else
13991aac1400SHugh Dickins 				shmem_falloc = NULL;
14001aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
14011aac1400SHugh Dickins 			if (shmem_falloc)
14021aac1400SHugh Dickins 				goto redirty;
14031aac1400SHugh Dickins 		}
14041635f6a7SHugh Dickins 		clear_highpage(page);
14051635f6a7SHugh Dickins 		flush_dcache_page(page);
14061635f6a7SHugh Dickins 		SetPageUptodate(page);
14071635f6a7SHugh Dickins 	}
14081635f6a7SHugh Dickins 
140938d8b4e6SHuang Ying 	swap = get_swap_page(page);
141048f170fbSHugh Dickins 	if (!swap.val)
141148f170fbSHugh Dickins 		goto redirty;
1412d9fe526aSHugh Dickins 
1413b1dea800SHugh Dickins 	/*
1414b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
14156922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
14166922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1417b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
14186922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
14196922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1420b1dea800SHugh Dickins 	 */
1421b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
142205bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
1423b56a2d8aSVineeth Remanan Pillai 		list_add(&info->swaplist, &shmem_swaplist);
1424b1dea800SHugh Dickins 
14254afab1cdSYang Shi 	if (add_to_swap_cache(page, swap,
14264afab1cdSYang Shi 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN) == 0) {
14274595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1428267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1429267a4c76SHugh Dickins 		info->swapped++;
14304595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1431267a4c76SHugh Dickins 
1432aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
14336922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
14346922c0c7SHugh Dickins 
14356922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1436d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
14379fab5619SHugh Dickins 		swap_writepage(page, wbc);
14381da177e4SLinus Torvalds 		return 0;
14391da177e4SLinus Torvalds 	}
14401da177e4SLinus Torvalds 
14416922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
144275f6d6d2SMinchan Kim 	put_swap_page(page, swap);
14431da177e4SLinus Torvalds redirty:
14441da177e4SLinus Torvalds 	set_page_dirty(page);
1445d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1446d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1447d9fe526aSHugh Dickins 	unlock_page(page);
1448d9fe526aSHugh Dickins 	return 0;
14491da177e4SLinus Torvalds }
14501da177e4SLinus Torvalds 
145175edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
145271fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1453680d794bSakpm@linux-foundation.org {
1454680d794bSakpm@linux-foundation.org 	char buffer[64];
1455680d794bSakpm@linux-foundation.org 
145671fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1457095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1458095f1fc4SLee Schermerhorn 
1459a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1460095f1fc4SLee Schermerhorn 
1461095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1462680d794bSakpm@linux-foundation.org }
146371fe804bSLee Schermerhorn 
146471fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
146571fe804bSLee Schermerhorn {
146671fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
146771fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
146871fe804bSLee Schermerhorn 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
146971fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
147071fe804bSLee Schermerhorn 		mpol_get(mpol);
147171fe804bSLee Schermerhorn 		spin_unlock(&sbinfo->stat_lock);
147271fe804bSLee Schermerhorn 	}
147371fe804bSLee Schermerhorn 	return mpol;
147471fe804bSLee Schermerhorn }
147575edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
147675edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
147775edd345SHugh Dickins {
147875edd345SHugh Dickins }
147975edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
148075edd345SHugh Dickins {
148175edd345SHugh Dickins 	return NULL;
148275edd345SHugh Dickins }
148375edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
148475edd345SHugh Dickins #ifndef CONFIG_NUMA
148575edd345SHugh Dickins #define vm_policy vm_private_data
148675edd345SHugh Dickins #endif
1487680d794bSakpm@linux-foundation.org 
1488800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1489800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1490800d8c63SKirill A. Shutemov {
1491800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
14922c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1493800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1494800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1495800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1496800d8c63SKirill A. Shutemov }
1497800d8c63SKirill A. Shutemov 
1498800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1499800d8c63SKirill A. Shutemov {
1500800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1501800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1502800d8c63SKirill A. Shutemov }
1503800d8c63SKirill A. Shutemov 
150441ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
150541ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
15061da177e4SLinus Torvalds {
15071da177e4SLinus Torvalds 	struct vm_area_struct pvma;
150818a2f371SMel Gorman 	struct page *page;
1509e9e9b7ecSMinchan Kim 	struct vm_fault vmf;
15101da177e4SLinus Torvalds 
1511800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1512e9e9b7ecSMinchan Kim 	vmf.vma = &pvma;
1513e9e9b7ecSMinchan Kim 	vmf.address = 0;
1514e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1515800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
151618a2f371SMel Gorman 
1517800d8c63SKirill A. Shutemov 	return page;
1518800d8c63SKirill A. Shutemov }
151918a2f371SMel Gorman 
1520800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp,
1521800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1522800d8c63SKirill A. Shutemov {
1523800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
15247b8d046fSMatthew Wilcox 	struct address_space *mapping = info->vfs_inode.i_mapping;
15257b8d046fSMatthew Wilcox 	pgoff_t hindex;
1526800d8c63SKirill A. Shutemov 	struct page *page;
1527800d8c63SKirill A. Shutemov 
15284620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
15297b8d046fSMatthew Wilcox 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
15307b8d046fSMatthew Wilcox 								XA_PRESENT))
1531800d8c63SKirill A. Shutemov 		return NULL;
1532800d8c63SKirill A. Shutemov 
1533800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1534800d8c63SKirill A. Shutemov 	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
153519deb769SDavid Rientjes 			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1536800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1537800d8c63SKirill A. Shutemov 	if (page)
1538800d8c63SKirill A. Shutemov 		prep_transhuge_page(page);
1539dcdf11eeSDavid Rientjes 	else
1540dcdf11eeSDavid Rientjes 		count_vm_event(THP_FILE_FALLBACK);
154118a2f371SMel Gorman 	return page;
154218a2f371SMel Gorman }
154318a2f371SMel Gorman 
154418a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp,
154518a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
154618a2f371SMel Gorman {
154718a2f371SMel Gorman 	struct vm_area_struct pvma;
154818a2f371SMel Gorman 	struct page *page;
154918a2f371SMel Gorman 
1550800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1551800d8c63SKirill A. Shutemov 	page = alloc_page_vma(gfp, &pvma, 0);
1552800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
155318a2f371SMel Gorman 
1554800d8c63SKirill A. Shutemov 	return page;
1555800d8c63SKirill A. Shutemov }
1556800d8c63SKirill A. Shutemov 
1557800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
15580f079694SMike Rapoport 		struct inode *inode,
1559800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1560800d8c63SKirill A. Shutemov {
15610f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
1562800d8c63SKirill A. Shutemov 	struct page *page;
1563800d8c63SKirill A. Shutemov 	int nr;
1564800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1565800d8c63SKirill A. Shutemov 
1566396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1567800d8c63SKirill A. Shutemov 		huge = false;
1568800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1569800d8c63SKirill A. Shutemov 
15700f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1571800d8c63SKirill A. Shutemov 		goto failed;
1572800d8c63SKirill A. Shutemov 
1573800d8c63SKirill A. Shutemov 	if (huge)
1574800d8c63SKirill A. Shutemov 		page = shmem_alloc_hugepage(gfp, info, index);
1575800d8c63SKirill A. Shutemov 	else
1576800d8c63SKirill A. Shutemov 		page = shmem_alloc_page(gfp, info, index);
157775edd345SHugh Dickins 	if (page) {
157875edd345SHugh Dickins 		__SetPageLocked(page);
157975edd345SHugh Dickins 		__SetPageSwapBacked(page);
1580800d8c63SKirill A. Shutemov 		return page;
158175edd345SHugh Dickins 	}
158218a2f371SMel Gorman 
1583800d8c63SKirill A. Shutemov 	err = -ENOMEM;
15840f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1585800d8c63SKirill A. Shutemov failed:
1586800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
15871da177e4SLinus Torvalds }
158871fe804bSLee Schermerhorn 
15891da177e4SLinus Torvalds /*
1590bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1591bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1592bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1593bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1594bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1595bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1596bde05d1cSHugh Dickins  *
1597bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1598bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1599bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1600bde05d1cSHugh Dickins  */
1601bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1602bde05d1cSHugh Dickins {
1603bde05d1cSHugh Dickins 	return page_zonenum(page) > gfp_zone(gfp);
1604bde05d1cSHugh Dickins }
1605bde05d1cSHugh Dickins 
1606bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1607bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1608bde05d1cSHugh Dickins {
1609bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
1610bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1611c1cb20d4SYu Zhao 	swp_entry_t entry;
1612bde05d1cSHugh Dickins 	pgoff_t swap_index;
1613bde05d1cSHugh Dickins 	int error;
1614bde05d1cSHugh Dickins 
1615bde05d1cSHugh Dickins 	oldpage = *pagep;
1616c1cb20d4SYu Zhao 	entry.val = page_private(oldpage);
1617c1cb20d4SYu Zhao 	swap_index = swp_offset(entry);
1618bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1619bde05d1cSHugh Dickins 
1620bde05d1cSHugh Dickins 	/*
1621bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1622bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1623bde05d1cSHugh Dickins 	 */
1624bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1625bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1626bde05d1cSHugh Dickins 	if (!newpage)
1627bde05d1cSHugh Dickins 		return -ENOMEM;
1628bde05d1cSHugh Dickins 
162909cbfeafSKirill A. Shutemov 	get_page(newpage);
1630bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
16310142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1632bde05d1cSHugh Dickins 
16339956edf3SHugh Dickins 	__SetPageLocked(newpage);
16349956edf3SHugh Dickins 	__SetPageSwapBacked(newpage);
1635bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1636c1cb20d4SYu Zhao 	set_page_private(newpage, entry.val);
1637bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1638bde05d1cSHugh Dickins 
1639bde05d1cSHugh Dickins 	/*
1640bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1641bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1642bde05d1cSHugh Dickins 	 */
1643b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
164462f945b6SMatthew Wilcox 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
16450142ef6cSHugh Dickins 	if (!error) {
16460d1c2072SJohannes Weiner 		mem_cgroup_migrate(oldpage, newpage);
16470d1c2072SJohannes Weiner 		__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
16480d1c2072SJohannes Weiner 		__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
16490142ef6cSHugh Dickins 	}
1650b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1651bde05d1cSHugh Dickins 
16520142ef6cSHugh Dickins 	if (unlikely(error)) {
16530142ef6cSHugh Dickins 		/*
16540142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
16550142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
16560142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
16570142ef6cSHugh Dickins 		 */
16580142ef6cSHugh Dickins 		oldpage = newpage;
16590142ef6cSHugh Dickins 	} else {
16606058eaecSJohannes Weiner 		lru_cache_add(newpage);
16610142ef6cSHugh Dickins 		*pagep = newpage;
16620142ef6cSHugh Dickins 	}
1663bde05d1cSHugh Dickins 
1664bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1665bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1666bde05d1cSHugh Dickins 
1667bde05d1cSHugh Dickins 	unlock_page(oldpage);
166809cbfeafSKirill A. Shutemov 	put_page(oldpage);
166909cbfeafSKirill A. Shutemov 	put_page(oldpage);
16700142ef6cSHugh Dickins 	return error;
1671bde05d1cSHugh Dickins }
1672bde05d1cSHugh Dickins 
1673bde05d1cSHugh Dickins /*
1674c5bf121eSVineeth Remanan Pillai  * Swap in the page pointed to by *pagep.
1675c5bf121eSVineeth Remanan Pillai  * Caller has to make sure that *pagep contains a valid swapped page.
1676c5bf121eSVineeth Remanan Pillai  * Returns 0 and the page in pagep if success. On failure, returns the
1677c5bf121eSVineeth Remanan Pillai  * the error code and NULL in *pagep.
16781da177e4SLinus Torvalds  */
1679c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1680c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
1681c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
16822b740303SSouptick Joarder 			     vm_fault_t *fault_type)
16831da177e4SLinus Torvalds {
16841da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
168523f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
1686c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
168727ab7006SHugh Dickins 	struct page *page;
16881da177e4SLinus Torvalds 	swp_entry_t swap;
16891da177e4SLinus Torvalds 	int error;
16901da177e4SLinus Torvalds 
1691c5bf121eSVineeth Remanan Pillai 	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1692c5bf121eSVineeth Remanan Pillai 	swap = radix_to_swp_entry(*pagep);
1693c5bf121eSVineeth Remanan Pillai 	*pagep = NULL;
169454af6042SHugh Dickins 
16951da177e4SLinus Torvalds 	/* Look it up and read it in.. */
1696ec560175SHuang Ying 	page = lookup_swap_cache(swap, NULL, 0);
169727ab7006SHugh Dickins 	if (!page) {
16989e18eb29SAndres Lagar-Cavilla 		/* Or update major stats only when swapin succeeds?? */
16999e18eb29SAndres Lagar-Cavilla 		if (fault_type) {
170068da9f05SHugh Dickins 			*fault_type |= VM_FAULT_MAJOR;
17019e18eb29SAndres Lagar-Cavilla 			count_vm_event(PGMAJFAULT);
17022262185cSRoman Gushchin 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
17039e18eb29SAndres Lagar-Cavilla 		}
17049e18eb29SAndres Lagar-Cavilla 		/* Here we actually start the io */
170541ffe5d5SHugh Dickins 		page = shmem_swapin(swap, gfp, info, index);
170627ab7006SHugh Dickins 		if (!page) {
17071da177e4SLinus Torvalds 			error = -ENOMEM;
170854af6042SHugh Dickins 			goto failed;
1709285b2c4fSHugh Dickins 		}
17101da177e4SLinus Torvalds 	}
17111da177e4SLinus Torvalds 
17121da177e4SLinus Torvalds 	/* We have to do this with page locked to prevent races */
171354af6042SHugh Dickins 	lock_page(page);
17140142ef6cSHugh Dickins 	if (!PageSwapCache(page) || page_private(page) != swap.val ||
1715d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap)) {
1716c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1717d1899228SHugh Dickins 		goto unlock;
1718bde05d1cSHugh Dickins 	}
171927ab7006SHugh Dickins 	if (!PageUptodate(page)) {
17201da177e4SLinus Torvalds 		error = -EIO;
172154af6042SHugh Dickins 		goto failed;
172254af6042SHugh Dickins 	}
172354af6042SHugh Dickins 	wait_on_page_writeback(page);
172454af6042SHugh Dickins 
1725bde05d1cSHugh Dickins 	if (shmem_should_replace_page(page, gfp)) {
1726bde05d1cSHugh Dickins 		error = shmem_replace_page(&page, gfp, info, index);
1727bde05d1cSHugh Dickins 		if (error)
172854af6042SHugh Dickins 			goto failed;
17291da177e4SLinus Torvalds 	}
17301da177e4SLinus Torvalds 
17313fea5a49SJohannes Weiner 	error = shmem_add_to_page_cache(page, mapping, index,
17323fea5a49SJohannes Weiner 					swp_to_radix_entry(swap), gfp,
17333fea5a49SJohannes Weiner 					charge_mm);
173454af6042SHugh Dickins 	if (error)
173554af6042SHugh Dickins 		goto failed;
173654af6042SHugh Dickins 
17374595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
173854af6042SHugh Dickins 	info->swapped--;
173954af6042SHugh Dickins 	shmem_recalc_inode(inode);
17404595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
174127ab7006SHugh Dickins 
174266d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
174366d2f4d2SHugh Dickins 		mark_page_accessed(page);
174466d2f4d2SHugh Dickins 
174527ab7006SHugh Dickins 	delete_from_swap_cache(page);
174627ab7006SHugh Dickins 	set_page_dirty(page);
174727ab7006SHugh Dickins 	swap_free(swap);
174827ab7006SHugh Dickins 
1749c5bf121eSVineeth Remanan Pillai 	*pagep = page;
1750c5bf121eSVineeth Remanan Pillai 	return 0;
1751c5bf121eSVineeth Remanan Pillai failed:
1752c5bf121eSVineeth Remanan Pillai 	if (!shmem_confirm_swap(mapping, index, swap))
1753c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1754c5bf121eSVineeth Remanan Pillai unlock:
1755c5bf121eSVineeth Remanan Pillai 	if (page) {
1756c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1757c5bf121eSVineeth Remanan Pillai 		put_page(page);
1758c5bf121eSVineeth Remanan Pillai 	}
1759c5bf121eSVineeth Remanan Pillai 
1760c5bf121eSVineeth Remanan Pillai 	return error;
1761c5bf121eSVineeth Remanan Pillai }
1762c5bf121eSVineeth Remanan Pillai 
1763c5bf121eSVineeth Remanan Pillai /*
1764c5bf121eSVineeth Remanan Pillai  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1765c5bf121eSVineeth Remanan Pillai  *
1766c5bf121eSVineeth Remanan Pillai  * If we allocate a new one we do not mark it dirty. That's up to the
1767c5bf121eSVineeth Remanan Pillai  * vm. If we swap it in we mark it dirty since we also free the swap
1768c5bf121eSVineeth Remanan Pillai  * entry since a page cannot live in both the swap and page cache.
1769c5bf121eSVineeth Remanan Pillai  *
177028eb3c80SMiles Chen  * vmf and fault_type are only supplied by shmem_fault:
1771c5bf121eSVineeth Remanan Pillai  * otherwise they are NULL.
1772c5bf121eSVineeth Remanan Pillai  */
1773c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1774c5bf121eSVineeth Remanan Pillai 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1775c5bf121eSVineeth Remanan Pillai 	struct vm_area_struct *vma, struct vm_fault *vmf,
1776c5bf121eSVineeth Remanan Pillai 			vm_fault_t *fault_type)
1777c5bf121eSVineeth Remanan Pillai {
1778c5bf121eSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1779c5bf121eSVineeth Remanan Pillai 	struct shmem_inode_info *info = SHMEM_I(inode);
1780c5bf121eSVineeth Remanan Pillai 	struct shmem_sb_info *sbinfo;
1781c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm;
1782c5bf121eSVineeth Remanan Pillai 	struct page *page;
1783c5bf121eSVineeth Remanan Pillai 	enum sgp_type sgp_huge = sgp;
1784c5bf121eSVineeth Remanan Pillai 	pgoff_t hindex = index;
1785c5bf121eSVineeth Remanan Pillai 	int error;
1786c5bf121eSVineeth Remanan Pillai 	int once = 0;
1787c5bf121eSVineeth Remanan Pillai 	int alloced = 0;
1788c5bf121eSVineeth Remanan Pillai 
1789c5bf121eSVineeth Remanan Pillai 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1790c5bf121eSVineeth Remanan Pillai 		return -EFBIG;
1791c5bf121eSVineeth Remanan Pillai 	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1792c5bf121eSVineeth Remanan Pillai 		sgp = SGP_CACHE;
1793c5bf121eSVineeth Remanan Pillai repeat:
1794c5bf121eSVineeth Remanan Pillai 	if (sgp <= SGP_CACHE &&
1795c5bf121eSVineeth Remanan Pillai 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1796c5bf121eSVineeth Remanan Pillai 		return -EINVAL;
1797c5bf121eSVineeth Remanan Pillai 	}
1798c5bf121eSVineeth Remanan Pillai 
1799c5bf121eSVineeth Remanan Pillai 	sbinfo = SHMEM_SB(inode->i_sb);
1800c5bf121eSVineeth Remanan Pillai 	charge_mm = vma ? vma->vm_mm : current->mm;
1801c5bf121eSVineeth Remanan Pillai 
1802c5bf121eSVineeth Remanan Pillai 	page = find_lock_entry(mapping, index);
1803c5bf121eSVineeth Remanan Pillai 	if (xa_is_value(page)) {
1804c5bf121eSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, index, &page,
1805c5bf121eSVineeth Remanan Pillai 					  sgp, gfp, vma, fault_type);
1806c5bf121eSVineeth Remanan Pillai 		if (error == -EEXIST)
1807c5bf121eSVineeth Remanan Pillai 			goto repeat;
1808c5bf121eSVineeth Remanan Pillai 
1809c5bf121eSVineeth Remanan Pillai 		*pagep = page;
1810c5bf121eSVineeth Remanan Pillai 		return error;
1811c5bf121eSVineeth Remanan Pillai 	}
1812c5bf121eSVineeth Remanan Pillai 
1813c5bf121eSVineeth Remanan Pillai 	if (page && sgp == SGP_WRITE)
1814c5bf121eSVineeth Remanan Pillai 		mark_page_accessed(page);
1815c5bf121eSVineeth Remanan Pillai 
1816c5bf121eSVineeth Remanan Pillai 	/* fallocated page? */
1817c5bf121eSVineeth Remanan Pillai 	if (page && !PageUptodate(page)) {
1818c5bf121eSVineeth Remanan Pillai 		if (sgp != SGP_READ)
1819c5bf121eSVineeth Remanan Pillai 			goto clear;
1820c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1821c5bf121eSVineeth Remanan Pillai 		put_page(page);
1822c5bf121eSVineeth Remanan Pillai 		page = NULL;
1823c5bf121eSVineeth Remanan Pillai 	}
1824c5bf121eSVineeth Remanan Pillai 	if (page || sgp == SGP_READ) {
1825c5bf121eSVineeth Remanan Pillai 		*pagep = page;
1826c5bf121eSVineeth Remanan Pillai 		return 0;
1827c5bf121eSVineeth Remanan Pillai 	}
1828c5bf121eSVineeth Remanan Pillai 
1829c5bf121eSVineeth Remanan Pillai 	/*
1830c5bf121eSVineeth Remanan Pillai 	 * Fast cache lookup did not find it:
1831c5bf121eSVineeth Remanan Pillai 	 * bring it back from swap or allocate.
1832c5bf121eSVineeth Remanan Pillai 	 */
1833c5bf121eSVineeth Remanan Pillai 
1834cfda0526SMike Rapoport 	if (vma && userfaultfd_missing(vma)) {
1835cfda0526SMike Rapoport 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1836cfda0526SMike Rapoport 		return 0;
1837cfda0526SMike Rapoport 	}
1838cfda0526SMike Rapoport 
1839800d8c63SKirill A. Shutemov 	/* shmem_symlink() */
1840800d8c63SKirill A. Shutemov 	if (mapping->a_ops != &shmem_aops)
1841800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1842657e3038SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1843800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1844800d8c63SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
1845800d8c63SKirill A. Shutemov 		goto alloc_huge;
1846800d8c63SKirill A. Shutemov 	switch (sbinfo->huge) {
1847800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
1848800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
184927d80fa2SKees Cook 	case SHMEM_HUGE_WITHIN_SIZE: {
185027d80fa2SKees Cook 		loff_t i_size;
185127d80fa2SKees Cook 		pgoff_t off;
185227d80fa2SKees Cook 
1853800d8c63SKirill A. Shutemov 		off = round_up(index, HPAGE_PMD_NR);
1854800d8c63SKirill A. Shutemov 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1855800d8c63SKirill A. Shutemov 		if (i_size >= HPAGE_PMD_SIZE &&
1856800d8c63SKirill A. Shutemov 		    i_size >> PAGE_SHIFT >= off)
1857800d8c63SKirill A. Shutemov 			goto alloc_huge;
185827d80fa2SKees Cook 
185927d80fa2SKees Cook 		fallthrough;
186027d80fa2SKees Cook 	}
1861800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
1862657e3038SKirill A. Shutemov 		if (sgp_huge == SGP_HUGE)
1863657e3038SKirill A. Shutemov 			goto alloc_huge;
1864657e3038SKirill A. Shutemov 		/* TODO: implement fadvise() hints */
1865800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
186659a16eadSHugh Dickins 	}
18671da177e4SLinus Torvalds 
1868800d8c63SKirill A. Shutemov alloc_huge:
18690f079694SMike Rapoport 	page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1870800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1871c5bf121eSVineeth Remanan Pillai alloc_nohuge:
1872c5bf121eSVineeth Remanan Pillai 		page = shmem_alloc_and_acct_page(gfp, inode,
1873800d8c63SKirill A. Shutemov 						 index, false);
187454af6042SHugh Dickins 	}
1875800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1876779750d2SKirill A. Shutemov 		int retry = 5;
1877c5bf121eSVineeth Remanan Pillai 
1878800d8c63SKirill A. Shutemov 		error = PTR_ERR(page);
1879800d8c63SKirill A. Shutemov 		page = NULL;
1880779750d2SKirill A. Shutemov 		if (error != -ENOSPC)
1881c5bf121eSVineeth Remanan Pillai 			goto unlock;
1882779750d2SKirill A. Shutemov 		/*
1883c5bf121eSVineeth Remanan Pillai 		 * Try to reclaim some space by splitting a huge page
1884779750d2SKirill A. Shutemov 		 * beyond i_size on the filesystem.
1885779750d2SKirill A. Shutemov 		 */
1886779750d2SKirill A. Shutemov 		while (retry--) {
1887779750d2SKirill A. Shutemov 			int ret;
1888c5bf121eSVineeth Remanan Pillai 
1889779750d2SKirill A. Shutemov 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1890779750d2SKirill A. Shutemov 			if (ret == SHRINK_STOP)
1891779750d2SKirill A. Shutemov 				break;
1892779750d2SKirill A. Shutemov 			if (ret)
1893779750d2SKirill A. Shutemov 				goto alloc_nohuge;
1894779750d2SKirill A. Shutemov 		}
1895c5bf121eSVineeth Remanan Pillai 		goto unlock;
1896800d8c63SKirill A. Shutemov 	}
1897800d8c63SKirill A. Shutemov 
1898800d8c63SKirill A. Shutemov 	if (PageTransHuge(page))
1899800d8c63SKirill A. Shutemov 		hindex = round_down(index, HPAGE_PMD_NR);
1900800d8c63SKirill A. Shutemov 	else
1901800d8c63SKirill A. Shutemov 		hindex = index;
1902800d8c63SKirill A. Shutemov 
190366d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1904eb39d618SHugh Dickins 		__SetPageReferenced(page);
190566d2f4d2SHugh Dickins 
1906800d8c63SKirill A. Shutemov 	error = shmem_add_to_page_cache(page, mapping, hindex,
19073fea5a49SJohannes Weiner 					NULL, gfp & GFP_RECLAIM_MASK,
19083fea5a49SJohannes Weiner 					charge_mm);
19093fea5a49SJohannes Weiner 	if (error)
1910800d8c63SKirill A. Shutemov 		goto unacct;
19116058eaecSJohannes Weiner 	lru_cache_add(page);
191254af6042SHugh Dickins 
19134595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
1914d8c6546bSMatthew Wilcox (Oracle) 	info->alloced += compound_nr(page);
1915800d8c63SKirill A. Shutemov 	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
191654af6042SHugh Dickins 	shmem_recalc_inode(inode);
19174595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
19181635f6a7SHugh Dickins 	alloced = true;
191954af6042SHugh Dickins 
1920779750d2SKirill A. Shutemov 	if (PageTransHuge(page) &&
1921779750d2SKirill A. Shutemov 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1922779750d2SKirill A. Shutemov 			hindex + HPAGE_PMD_NR - 1) {
1923779750d2SKirill A. Shutemov 		/*
1924779750d2SKirill A. Shutemov 		 * Part of the huge page is beyond i_size: subject
1925779750d2SKirill A. Shutemov 		 * to shrink under memory pressure.
1926779750d2SKirill A. Shutemov 		 */
1927779750d2SKirill A. Shutemov 		spin_lock(&sbinfo->shrinklist_lock);
1928d041353dSCong Wang 		/*
1929d041353dSCong Wang 		 * _careful to defend against unlocked access to
1930d041353dSCong Wang 		 * ->shrink_list in shmem_unused_huge_shrink()
1931d041353dSCong Wang 		 */
1932d041353dSCong Wang 		if (list_empty_careful(&info->shrinklist)) {
1933779750d2SKirill A. Shutemov 			list_add_tail(&info->shrinklist,
1934779750d2SKirill A. Shutemov 				      &sbinfo->shrinklist);
1935779750d2SKirill A. Shutemov 			sbinfo->shrinklist_len++;
1936779750d2SKirill A. Shutemov 		}
1937779750d2SKirill A. Shutemov 		spin_unlock(&sbinfo->shrinklist_lock);
1938779750d2SKirill A. Shutemov 	}
1939779750d2SKirill A. Shutemov 
1940ec9516fbSHugh Dickins 	/*
19411635f6a7SHugh Dickins 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
19421635f6a7SHugh Dickins 	 */
19431635f6a7SHugh Dickins 	if (sgp == SGP_FALLOC)
19441635f6a7SHugh Dickins 		sgp = SGP_WRITE;
19451635f6a7SHugh Dickins clear:
19461635f6a7SHugh Dickins 	/*
19471635f6a7SHugh Dickins 	 * Let SGP_WRITE caller clear ends if write does not fill page;
19481635f6a7SHugh Dickins 	 * but SGP_FALLOC on a page fallocated earlier must initialize
19491635f6a7SHugh Dickins 	 * it now, lest undo on failure cancel our earlier guarantee.
1950ec9516fbSHugh Dickins 	 */
1951800d8c63SKirill A. Shutemov 	if (sgp != SGP_WRITE && !PageUptodate(page)) {
1952800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
1953800d8c63SKirill A. Shutemov 		int i;
1954800d8c63SKirill A. Shutemov 
1955d8c6546bSMatthew Wilcox (Oracle) 		for (i = 0; i < compound_nr(head); i++) {
1956800d8c63SKirill A. Shutemov 			clear_highpage(head + i);
1957800d8c63SKirill A. Shutemov 			flush_dcache_page(head + i);
1958800d8c63SKirill A. Shutemov 		}
1959800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
1960ec9516fbSHugh Dickins 	}
1961bde05d1cSHugh Dickins 
196254af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
196375edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
196409cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1965267a4c76SHugh Dickins 		if (alloced) {
1966267a4c76SHugh Dickins 			ClearPageDirty(page);
1967267a4c76SHugh Dickins 			delete_from_page_cache(page);
19684595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1969267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
19704595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
1971267a4c76SHugh Dickins 		}
197254af6042SHugh Dickins 		error = -EINVAL;
1973267a4c76SHugh Dickins 		goto unlock;
1974ff36b801SShaohua Li 	}
1975800d8c63SKirill A. Shutemov 	*pagep = page + index - hindex;
197654af6042SHugh Dickins 	return 0;
1977d00806b1SNick Piggin 
1978d0217ac0SNick Piggin 	/*
197954af6042SHugh Dickins 	 * Error recovery.
19801da177e4SLinus Torvalds 	 */
198154af6042SHugh Dickins unacct:
1982d8c6546bSMatthew Wilcox (Oracle) 	shmem_inode_unacct_blocks(inode, compound_nr(page));
1983800d8c63SKirill A. Shutemov 
1984800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
1985800d8c63SKirill A. Shutemov 		unlock_page(page);
1986800d8c63SKirill A. Shutemov 		put_page(page);
1987800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1988800d8c63SKirill A. Shutemov 	}
1989d1899228SHugh Dickins unlock:
199027ab7006SHugh Dickins 	if (page) {
199154af6042SHugh Dickins 		unlock_page(page);
199209cbfeafSKirill A. Shutemov 		put_page(page);
199354af6042SHugh Dickins 	}
199454af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
19954595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
199654af6042SHugh Dickins 		shmem_recalc_inode(inode);
19974595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
19981da177e4SLinus Torvalds 		goto repeat;
1999d8dc74f2SAdrian Bunk 	}
20007f4446eeSMatthew Wilcox 	if (error == -EEXIST)
200154af6042SHugh Dickins 		goto repeat;
200254af6042SHugh Dickins 	return error;
20031da177e4SLinus Torvalds }
20041da177e4SLinus Torvalds 
200510d20bd2SLinus Torvalds /*
200610d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
200710d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
200810d20bd2SLinus Torvalds  * target.
200910d20bd2SLinus Torvalds  */
2010ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
201110d20bd2SLinus Torvalds {
201210d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
20132055da97SIngo Molnar 	list_del_init(&wait->entry);
201410d20bd2SLinus Torvalds 	return ret;
201510d20bd2SLinus Torvalds }
201610d20bd2SLinus Torvalds 
201720acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
20181da177e4SLinus Torvalds {
201911bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
2020496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
20219e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2022657e3038SKirill A. Shutemov 	enum sgp_type sgp;
202320acce67SSouptick Joarder 	int err;
202420acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
20251da177e4SLinus Torvalds 
2026f00cdc6dSHugh Dickins 	/*
2027f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
2028f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
2029f00cdc6dSHugh Dickins 	 * locks writers out with its hold on i_mutex.  So refrain from
20308e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
20318e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
20328e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
20338e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
20348e205f77SHugh Dickins 	 *
20358e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
20368e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
20378e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
20388e205f77SHugh Dickins 	 *
20398e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
20408e205f77SHugh Dickins 	 * standard mutex or completion: but we cannot take i_mutex in fault,
20418e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
2042f00cdc6dSHugh Dickins 	 */
2043f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
2044f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
2045f00cdc6dSHugh Dickins 
2046f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2047f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
20488e205f77SHugh Dickins 		if (shmem_falloc &&
20498e205f77SHugh Dickins 		    shmem_falloc->waitq &&
20508e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
20518e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
20528897c1b1SKirill A. Shutemov 			struct file *fpin;
20538e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
205410d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
20558e205f77SHugh Dickins 
20568e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
20578897c1b1SKirill A. Shutemov 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
20588897c1b1SKirill A. Shutemov 			if (fpin)
20598e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
20608e205f77SHugh Dickins 
20618e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
20628e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
20638e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
20648e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20658e205f77SHugh Dickins 			schedule();
20668e205f77SHugh Dickins 
20678e205f77SHugh Dickins 			/*
20688e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
20698e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
20708e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
20718e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
20728e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
20738e205f77SHugh Dickins 			 */
20748e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
20758e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
20768e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20778897c1b1SKirill A. Shutemov 
20788897c1b1SKirill A. Shutemov 			if (fpin)
20798897c1b1SKirill A. Shutemov 				fput(fpin);
20808e205f77SHugh Dickins 			return ret;
2081f00cdc6dSHugh Dickins 		}
20828e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2083f00cdc6dSHugh Dickins 	}
2084f00cdc6dSHugh Dickins 
2085657e3038SKirill A. Shutemov 	sgp = SGP_CACHE;
208618600332SMichal Hocko 
208718600332SMichal Hocko 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
208818600332SMichal Hocko 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2089657e3038SKirill A. Shutemov 		sgp = SGP_NOHUGE;
209018600332SMichal Hocko 	else if (vma->vm_flags & VM_HUGEPAGE)
209118600332SMichal Hocko 		sgp = SGP_HUGE;
2092657e3038SKirill A. Shutemov 
209320acce67SSouptick Joarder 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2094cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
209520acce67SSouptick Joarder 	if (err)
209620acce67SSouptick Joarder 		return vmf_error(err);
209768da9f05SHugh Dickins 	return ret;
20981da177e4SLinus Torvalds }
20991da177e4SLinus Torvalds 
2100c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2101c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2102c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2103c01d5b30SHugh Dickins {
2104c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2105c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2106c01d5b30SHugh Dickins 	unsigned long addr;
2107c01d5b30SHugh Dickins 	unsigned long offset;
2108c01d5b30SHugh Dickins 	unsigned long inflated_len;
2109c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2110c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2111c01d5b30SHugh Dickins 
2112c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2113c01d5b30SHugh Dickins 		return -ENOMEM;
2114c01d5b30SHugh Dickins 
2115c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2116c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2117c01d5b30SHugh Dickins 
2118396bcc52SMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2119c01d5b30SHugh Dickins 		return addr;
2120c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2121c01d5b30SHugh Dickins 		return addr;
2122c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2123c01d5b30SHugh Dickins 		return addr;
2124c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2125c01d5b30SHugh Dickins 		return addr;
2126c01d5b30SHugh Dickins 
2127c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2128c01d5b30SHugh Dickins 		return addr;
2129c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2130c01d5b30SHugh Dickins 		return addr;
2131c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2132c01d5b30SHugh Dickins 		return addr;
2133c01d5b30SHugh Dickins 	/*
2134c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2135c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
213699158997SKirill A. Shutemov 	 * But if caller specified an address hint and we allocated area there
213799158997SKirill A. Shutemov 	 * successfully, respect that as before.
2138c01d5b30SHugh Dickins 	 */
213999158997SKirill A. Shutemov 	if (uaddr == addr)
2140c01d5b30SHugh Dickins 		return addr;
2141c01d5b30SHugh Dickins 
2142c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2143c01d5b30SHugh Dickins 		struct super_block *sb;
2144c01d5b30SHugh Dickins 
2145c01d5b30SHugh Dickins 		if (file) {
2146c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2147c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2148c01d5b30SHugh Dickins 		} else {
2149c01d5b30SHugh Dickins 			/*
2150c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2151c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2152c01d5b30SHugh Dickins 			 */
2153c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2154c01d5b30SHugh Dickins 				return addr;
2155c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2156c01d5b30SHugh Dickins 		}
21573089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2158c01d5b30SHugh Dickins 			return addr;
2159c01d5b30SHugh Dickins 	}
2160c01d5b30SHugh Dickins 
2161c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2162c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2163c01d5b30SHugh Dickins 		return addr;
2164c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2165c01d5b30SHugh Dickins 		return addr;
2166c01d5b30SHugh Dickins 
2167c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2168c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2169c01d5b30SHugh Dickins 		return addr;
2170c01d5b30SHugh Dickins 	if (inflated_len < len)
2171c01d5b30SHugh Dickins 		return addr;
2172c01d5b30SHugh Dickins 
217399158997SKirill A. Shutemov 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2174c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2175c01d5b30SHugh Dickins 		return addr;
2176c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2177c01d5b30SHugh Dickins 		return addr;
2178c01d5b30SHugh Dickins 
2179c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2180c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2181c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2182c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2183c01d5b30SHugh Dickins 
2184c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2185c01d5b30SHugh Dickins 		return addr;
2186c01d5b30SHugh Dickins 	return inflated_addr;
2187c01d5b30SHugh Dickins }
2188c01d5b30SHugh Dickins 
21891da177e4SLinus Torvalds #ifdef CONFIG_NUMA
219041ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
21911da177e4SLinus Torvalds {
2192496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
219341ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
21941da177e4SLinus Torvalds }
21951da177e4SLinus Torvalds 
2196d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2197d8dc74f2SAdrian Bunk 					  unsigned long addr)
21981da177e4SLinus Torvalds {
2199496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
220041ffe5d5SHugh Dickins 	pgoff_t index;
22011da177e4SLinus Torvalds 
220241ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
220341ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
22041da177e4SLinus Torvalds }
22051da177e4SLinus Torvalds #endif
22061da177e4SLinus Torvalds 
22071da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user)
22081da177e4SLinus Torvalds {
2209496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
22101da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
22111da177e4SLinus Torvalds 	int retval = -ENOMEM;
22121da177e4SLinus Torvalds 
2213ea0dfeb4SHugh Dickins 	/*
2214ea0dfeb4SHugh Dickins 	 * What serializes the accesses to info->flags?
2215ea0dfeb4SHugh Dickins 	 * ipc_lock_object() when called from shmctl_do_lock(),
2216ea0dfeb4SHugh Dickins 	 * no serialization needed when called from shm_destroy().
2217ea0dfeb4SHugh Dickins 	 */
22181da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
22191da177e4SLinus Torvalds 		if (!user_shm_lock(inode->i_size, user))
22201da177e4SLinus Torvalds 			goto out_nomem;
22211da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
222289e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
22231da177e4SLinus Torvalds 	}
22241da177e4SLinus Torvalds 	if (!lock && (info->flags & VM_LOCKED) && user) {
22251da177e4SLinus Torvalds 		user_shm_unlock(inode->i_size, user);
22261da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
222789e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
22281da177e4SLinus Torvalds 	}
22291da177e4SLinus Torvalds 	retval = 0;
223089e004eaSLee Schermerhorn 
22311da177e4SLinus Torvalds out_nomem:
22321da177e4SLinus Torvalds 	return retval;
22331da177e4SLinus Torvalds }
22341da177e4SLinus Torvalds 
22359b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
22361da177e4SLinus Torvalds {
2237ab3948f5SJoel Fernandes (Google) 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2238ab3948f5SJoel Fernandes (Google) 
2239ab3948f5SJoel Fernandes (Google) 	if (info->seals & F_SEAL_FUTURE_WRITE) {
2240ab3948f5SJoel Fernandes (Google) 		/*
2241ab3948f5SJoel Fernandes (Google) 		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2242ab3948f5SJoel Fernandes (Google) 		 * "future write" seal active.
2243ab3948f5SJoel Fernandes (Google) 		 */
2244ab3948f5SJoel Fernandes (Google) 		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2245ab3948f5SJoel Fernandes (Google) 			return -EPERM;
2246ab3948f5SJoel Fernandes (Google) 
2247ab3948f5SJoel Fernandes (Google) 		/*
224805d35110SNicolas Geoffray 		 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
224905d35110SNicolas Geoffray 		 * MAP_SHARED and read-only, take care to not allow mprotect to
225005d35110SNicolas Geoffray 		 * revert protections on such mappings. Do this only for shared
225105d35110SNicolas Geoffray 		 * mappings. For private mappings, don't need to mask
225205d35110SNicolas Geoffray 		 * VM_MAYWRITE as we still want them to be COW-writable.
2253ab3948f5SJoel Fernandes (Google) 		 */
225405d35110SNicolas Geoffray 		if (vma->vm_flags & VM_SHARED)
2255ab3948f5SJoel Fernandes (Google) 			vma->vm_flags &= ~(VM_MAYWRITE);
2256ab3948f5SJoel Fernandes (Google) 	}
2257ab3948f5SJoel Fernandes (Google) 
22581da177e4SLinus Torvalds 	file_accessed(file);
22591da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
2260396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2261f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2262f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
2263f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
2264f3f0e1d2SKirill A. Shutemov 	}
22651da177e4SLinus Torvalds 	return 0;
22661da177e4SLinus Torvalds }
22671da177e4SLinus Torvalds 
2268454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
226909208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
22701da177e4SLinus Torvalds {
22711da177e4SLinus Torvalds 	struct inode *inode;
22721da177e4SLinus Torvalds 	struct shmem_inode_info *info;
22731da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2274*e809d5f0SChris Down 	ino_t ino;
22751da177e4SLinus Torvalds 
2276*e809d5f0SChris Down 	if (shmem_reserve_inode(sb, &ino))
22771da177e4SLinus Torvalds 		return NULL;
22781da177e4SLinus Torvalds 
22791da177e4SLinus Torvalds 	inode = new_inode(sb);
22801da177e4SLinus Torvalds 	if (inode) {
2281*e809d5f0SChris Down 		inode->i_ino = ino;
2282454abafeSDmitry Monakhov 		inode_init_owner(inode, dir, mode);
22831da177e4SLinus Torvalds 		inode->i_blocks = 0;
2284078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
228546c9a946SArnd Bergmann 		inode->i_generation = prandom_u32();
22861da177e4SLinus Torvalds 		info = SHMEM_I(inode);
22871da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
22881da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
2289af53d3e9SHugh Dickins 		atomic_set(&info->stop_eviction, 0);
229040e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
22910b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2292779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
22931da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
229438f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
229572c04902SAl Viro 		cache_no_acl(inode);
22961da177e4SLinus Torvalds 
22971da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
22981da177e4SLinus Torvalds 		default:
229939f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
23001da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
23011da177e4SLinus Torvalds 			break;
23021da177e4SLinus Torvalds 		case S_IFREG:
230314fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
23041da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
23051da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
230671fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
230771fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
23081da177e4SLinus Torvalds 			break;
23091da177e4SLinus Torvalds 		case S_IFDIR:
2310d8c76e6fSDave Hansen 			inc_nlink(inode);
23111da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
23121da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
23131da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
23141da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
23151da177e4SLinus Torvalds 			break;
23161da177e4SLinus Torvalds 		case S_IFLNK:
23171da177e4SLinus Torvalds 			/*
23181da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
23191da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
23201da177e4SLinus Torvalds 			 */
232171fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
23221da177e4SLinus Torvalds 			break;
23231da177e4SLinus Torvalds 		}
2324b45d71fbSJoel Fernandes (Google) 
2325b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
23265b04c689SPavel Emelyanov 	} else
23275b04c689SPavel Emelyanov 		shmem_free_inode(sb);
23281da177e4SLinus Torvalds 	return inode;
23291da177e4SLinus Torvalds }
23301da177e4SLinus Torvalds 
23310cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping)
23320cd6144aSJohannes Weiner {
2333f8005451SHugh Dickins 	return mapping->a_ops == &shmem_aops;
23340cd6144aSJohannes Weiner }
23350cd6144aSJohannes Weiner 
23368d103963SMike Rapoport static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
23374c27fe4cSMike Rapoport 				  pmd_t *dst_pmd,
23384c27fe4cSMike Rapoport 				  struct vm_area_struct *dst_vma,
23394c27fe4cSMike Rapoport 				  unsigned long dst_addr,
23404c27fe4cSMike Rapoport 				  unsigned long src_addr,
23418d103963SMike Rapoport 				  bool zeropage,
23424c27fe4cSMike Rapoport 				  struct page **pagep)
23434c27fe4cSMike Rapoport {
23444c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
23454c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
23464c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
23474c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
23484c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
23494c27fe4cSMike Rapoport 	spinlock_t *ptl;
23504c27fe4cSMike Rapoport 	void *page_kaddr;
23514c27fe4cSMike Rapoport 	struct page *page;
23524c27fe4cSMike Rapoport 	pte_t _dst_pte, *dst_pte;
23534c27fe4cSMike Rapoport 	int ret;
2354e2a50c1fSAndrea Arcangeli 	pgoff_t offset, max_off;
23554c27fe4cSMike Rapoport 
23564c27fe4cSMike Rapoport 	ret = -ENOMEM;
23570f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, 1))
23584c27fe4cSMike Rapoport 		goto out;
23594c27fe4cSMike Rapoport 
2360cb658a45SAndrea Arcangeli 	if (!*pagep) {
23614c27fe4cSMike Rapoport 		page = shmem_alloc_page(gfp, info, pgoff);
23624c27fe4cSMike Rapoport 		if (!page)
23630f079694SMike Rapoport 			goto out_unacct_blocks;
23644c27fe4cSMike Rapoport 
23658d103963SMike Rapoport 		if (!zeropage) {	/* mcopy_atomic */
23664c27fe4cSMike Rapoport 			page_kaddr = kmap_atomic(page);
23678d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
23688d103963SMike Rapoport 					     (const void __user *)src_addr,
23694c27fe4cSMike Rapoport 					     PAGE_SIZE);
23704c27fe4cSMike Rapoport 			kunmap_atomic(page_kaddr);
23714c27fe4cSMike Rapoport 
2372c1e8d7c6SMichel Lespinasse 			/* fallback to copy_from_user outside mmap_lock */
23734c27fe4cSMike Rapoport 			if (unlikely(ret)) {
23744c27fe4cSMike Rapoport 				*pagep = page;
23750f079694SMike Rapoport 				shmem_inode_unacct_blocks(inode, 1);
23764c27fe4cSMike Rapoport 				/* don't free the page */
23779e368259SAndrea Arcangeli 				return -ENOENT;
23784c27fe4cSMike Rapoport 			}
23798d103963SMike Rapoport 		} else {		/* mfill_zeropage_atomic */
23808d103963SMike Rapoport 			clear_highpage(page);
23818d103963SMike Rapoport 		}
23824c27fe4cSMike Rapoport 	} else {
23834c27fe4cSMike Rapoport 		page = *pagep;
23844c27fe4cSMike Rapoport 		*pagep = NULL;
23854c27fe4cSMike Rapoport 	}
23864c27fe4cSMike Rapoport 
23879cc90c66SAndrea Arcangeli 	VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
23889cc90c66SAndrea Arcangeli 	__SetPageLocked(page);
23899cc90c66SAndrea Arcangeli 	__SetPageSwapBacked(page);
2390a425d358SAndrea Arcangeli 	__SetPageUptodate(page);
23919cc90c66SAndrea Arcangeli 
2392e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2393e2a50c1fSAndrea Arcangeli 	offset = linear_page_index(dst_vma, dst_addr);
2394e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2395e2a50c1fSAndrea Arcangeli 	if (unlikely(offset >= max_off))
2396e2a50c1fSAndrea Arcangeli 		goto out_release;
2397e2a50c1fSAndrea Arcangeli 
23983fea5a49SJohannes Weiner 	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
23993fea5a49SJohannes Weiner 				      gfp & GFP_RECLAIM_MASK, dst_mm);
24004c27fe4cSMike Rapoport 	if (ret)
24014c27fe4cSMike Rapoport 		goto out_release;
24024c27fe4cSMike Rapoport 
24034c27fe4cSMike Rapoport 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
24044c27fe4cSMike Rapoport 	if (dst_vma->vm_flags & VM_WRITE)
24054c27fe4cSMike Rapoport 		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2406dcf7fe9dSAndrea Arcangeli 	else {
2407dcf7fe9dSAndrea Arcangeli 		/*
2408dcf7fe9dSAndrea Arcangeli 		 * We don't set the pte dirty if the vma has no
2409dcf7fe9dSAndrea Arcangeli 		 * VM_WRITE permission, so mark the page dirty or it
2410dcf7fe9dSAndrea Arcangeli 		 * could be freed from under us. We could do it
2411dcf7fe9dSAndrea Arcangeli 		 * unconditionally before unlock_page(), but doing it
2412dcf7fe9dSAndrea Arcangeli 		 * only if VM_WRITE is not set is faster.
2413dcf7fe9dSAndrea Arcangeli 		 */
2414dcf7fe9dSAndrea Arcangeli 		set_page_dirty(page);
2415dcf7fe9dSAndrea Arcangeli 	}
24164c27fe4cSMike Rapoport 
24174c27fe4cSMike Rapoport 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2418e2a50c1fSAndrea Arcangeli 
2419e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2420e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2421e2a50c1fSAndrea Arcangeli 	if (unlikely(offset >= max_off))
24223fea5a49SJohannes Weiner 		goto out_release_unlock;
2423e2a50c1fSAndrea Arcangeli 
2424e2a50c1fSAndrea Arcangeli 	ret = -EEXIST;
24254c27fe4cSMike Rapoport 	if (!pte_none(*dst_pte))
24263fea5a49SJohannes Weiner 		goto out_release_unlock;
24274c27fe4cSMike Rapoport 
24286058eaecSJohannes Weiner 	lru_cache_add(page);
24294c27fe4cSMike Rapoport 
243094b7cc01SYang Shi 	spin_lock_irq(&info->lock);
24314c27fe4cSMike Rapoport 	info->alloced++;
24324c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
24334c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
243494b7cc01SYang Shi 	spin_unlock_irq(&info->lock);
24354c27fe4cSMike Rapoport 
24364c27fe4cSMike Rapoport 	inc_mm_counter(dst_mm, mm_counter_file(page));
24374c27fe4cSMike Rapoport 	page_add_file_rmap(page, false);
24384c27fe4cSMike Rapoport 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
24394c27fe4cSMike Rapoport 
24404c27fe4cSMike Rapoport 	/* No need to invalidate - it was non-present before */
24414c27fe4cSMike Rapoport 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
24424c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
2443e2a50c1fSAndrea Arcangeli 	unlock_page(page);
24444c27fe4cSMike Rapoport 	ret = 0;
24454c27fe4cSMike Rapoport out:
24464c27fe4cSMike Rapoport 	return ret;
24473fea5a49SJohannes Weiner out_release_unlock:
24484c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
2449dcf7fe9dSAndrea Arcangeli 	ClearPageDirty(page);
2450e2a50c1fSAndrea Arcangeli 	delete_from_page_cache(page);
24514c27fe4cSMike Rapoport out_release:
24529cc90c66SAndrea Arcangeli 	unlock_page(page);
24534c27fe4cSMike Rapoport 	put_page(page);
24544c27fe4cSMike Rapoport out_unacct_blocks:
24550f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
24564c27fe4cSMike Rapoport 	goto out;
24574c27fe4cSMike Rapoport }
24584c27fe4cSMike Rapoport 
24598d103963SMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
24608d103963SMike Rapoport 			   pmd_t *dst_pmd,
24618d103963SMike Rapoport 			   struct vm_area_struct *dst_vma,
24628d103963SMike Rapoport 			   unsigned long dst_addr,
24638d103963SMike Rapoport 			   unsigned long src_addr,
24648d103963SMike Rapoport 			   struct page **pagep)
24658d103963SMike Rapoport {
24668d103963SMike Rapoport 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
24678d103963SMike Rapoport 				      dst_addr, src_addr, false, pagep);
24688d103963SMike Rapoport }
24698d103963SMike Rapoport 
24708d103963SMike Rapoport int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
24718d103963SMike Rapoport 			     pmd_t *dst_pmd,
24728d103963SMike Rapoport 			     struct vm_area_struct *dst_vma,
24738d103963SMike Rapoport 			     unsigned long dst_addr)
24748d103963SMike Rapoport {
24758d103963SMike Rapoport 	struct page *page = NULL;
24768d103963SMike Rapoport 
24778d103963SMike Rapoport 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
24788d103963SMike Rapoport 				      dst_addr, 0, true, &page);
24798d103963SMike Rapoport }
24808d103963SMike Rapoport 
24811da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
248292e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
248369f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
24841da177e4SLinus Torvalds 
24856d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
24866d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
24876d9d88d0SJarkko Sakkinen #else
24886d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
24896d9d88d0SJarkko Sakkinen #endif
24906d9d88d0SJarkko Sakkinen 
24911da177e4SLinus Torvalds static int
2492800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
2493800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
2494800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
24951da177e4SLinus Torvalds {
2496800d15a5SNick Piggin 	struct inode *inode = mapping->host;
249740e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
249809cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
249940e041a2SDavid Herrmann 
250040e041a2SDavid Herrmann 	/* i_mutex is held by caller */
2501ab3948f5SJoel Fernandes (Google) 	if (unlikely(info->seals & (F_SEAL_GROW |
2502ab3948f5SJoel Fernandes (Google) 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2503ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
250440e041a2SDavid Herrmann 			return -EPERM;
250540e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
250640e041a2SDavid Herrmann 			return -EPERM;
250740e041a2SDavid Herrmann 	}
250840e041a2SDavid Herrmann 
25099e18eb29SAndres Lagar-Cavilla 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2510800d15a5SNick Piggin }
2511800d15a5SNick Piggin 
2512800d15a5SNick Piggin static int
2513800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2514800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2515800d15a5SNick Piggin 			struct page *page, void *fsdata)
2516800d15a5SNick Piggin {
2517800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2518800d15a5SNick Piggin 
2519800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2520800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2521800d15a5SNick Piggin 
2522ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2523800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2524800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2525800d8c63SKirill A. Shutemov 			int i;
2526800d8c63SKirill A. Shutemov 
2527800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2528800d8c63SKirill A. Shutemov 				if (head + i == page)
2529800d8c63SKirill A. Shutemov 					continue;
2530800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2531800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2532800d8c63SKirill A. Shutemov 			}
2533800d8c63SKirill A. Shutemov 		}
253409cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
253509cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2536ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
253709cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2538ec9516fbSHugh Dickins 		}
2539800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2540ec9516fbSHugh Dickins 	}
2541d3602444SHugh Dickins 	set_page_dirty(page);
25426746aff7SWu Fengguang 	unlock_page(page);
254309cbfeafSKirill A. Shutemov 	put_page(page);
2544d3602444SHugh Dickins 
2545800d15a5SNick Piggin 	return copied;
25461da177e4SLinus Torvalds }
25471da177e4SLinus Torvalds 
25482ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
25491da177e4SLinus Torvalds {
25506e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
25516e58e79dSAl Viro 	struct inode *inode = file_inode(file);
25521da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
255341ffe5d5SHugh Dickins 	pgoff_t index;
255441ffe5d5SHugh Dickins 	unsigned long offset;
2555a0ee5ec5SHugh Dickins 	enum sgp_type sgp = SGP_READ;
2556f7c1d074SGeert Uytterhoeven 	int error = 0;
2557cb66a7a1SAl Viro 	ssize_t retval = 0;
25586e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2559a0ee5ec5SHugh Dickins 
2560a0ee5ec5SHugh Dickins 	/*
2561a0ee5ec5SHugh Dickins 	 * Might this read be for a stacking filesystem?  Then when reading
2562a0ee5ec5SHugh Dickins 	 * holes of a sparse file, we actually need to allocate those pages,
2563a0ee5ec5SHugh Dickins 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2564a0ee5ec5SHugh Dickins 	 */
2565777eda2cSAl Viro 	if (!iter_is_iovec(to))
256675edd345SHugh Dickins 		sgp = SGP_CACHE;
25671da177e4SLinus Torvalds 
256809cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
256909cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
25701da177e4SLinus Torvalds 
25711da177e4SLinus Torvalds 	for (;;) {
25721da177e4SLinus Torvalds 		struct page *page = NULL;
257341ffe5d5SHugh Dickins 		pgoff_t end_index;
257441ffe5d5SHugh Dickins 		unsigned long nr, ret;
25751da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
25761da177e4SLinus Torvalds 
257709cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25781da177e4SLinus Torvalds 		if (index > end_index)
25791da177e4SLinus Torvalds 			break;
25801da177e4SLinus Torvalds 		if (index == end_index) {
258109cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25821da177e4SLinus Torvalds 			if (nr <= offset)
25831da177e4SLinus Torvalds 				break;
25841da177e4SLinus Torvalds 		}
25851da177e4SLinus Torvalds 
25869e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, index, &page, sgp);
25876e58e79dSAl Viro 		if (error) {
25886e58e79dSAl Viro 			if (error == -EINVAL)
25896e58e79dSAl Viro 				error = 0;
25901da177e4SLinus Torvalds 			break;
25911da177e4SLinus Torvalds 		}
259275edd345SHugh Dickins 		if (page) {
259375edd345SHugh Dickins 			if (sgp == SGP_CACHE)
259475edd345SHugh Dickins 				set_page_dirty(page);
2595d3602444SHugh Dickins 			unlock_page(page);
259675edd345SHugh Dickins 		}
25971da177e4SLinus Torvalds 
25981da177e4SLinus Torvalds 		/*
25991da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
26001b1dcc1bSJes Sorensen 		 * are called without i_mutex protection against truncate
26011da177e4SLinus Torvalds 		 */
260209cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
26031da177e4SLinus Torvalds 		i_size = i_size_read(inode);
260409cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
26051da177e4SLinus Torvalds 		if (index == end_index) {
260609cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
26071da177e4SLinus Torvalds 			if (nr <= offset) {
26081da177e4SLinus Torvalds 				if (page)
260909cbfeafSKirill A. Shutemov 					put_page(page);
26101da177e4SLinus Torvalds 				break;
26111da177e4SLinus Torvalds 			}
26121da177e4SLinus Torvalds 		}
26131da177e4SLinus Torvalds 		nr -= offset;
26141da177e4SLinus Torvalds 
26151da177e4SLinus Torvalds 		if (page) {
26161da177e4SLinus Torvalds 			/*
26171da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
26181da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
26191da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
26201da177e4SLinus Torvalds 			 */
26211da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
26221da177e4SLinus Torvalds 				flush_dcache_page(page);
26231da177e4SLinus Torvalds 			/*
26241da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
26251da177e4SLinus Torvalds 			 */
26261da177e4SLinus Torvalds 			if (!offset)
26271da177e4SLinus Torvalds 				mark_page_accessed(page);
2628b5810039SNick Piggin 		} else {
26291da177e4SLinus Torvalds 			page = ZERO_PAGE(0);
263009cbfeafSKirill A. Shutemov 			get_page(page);
2631b5810039SNick Piggin 		}
26321da177e4SLinus Torvalds 
26331da177e4SLinus Torvalds 		/*
26341da177e4SLinus Torvalds 		 * Ok, we have the page, and it's up-to-date, so
26351da177e4SLinus Torvalds 		 * now we can copy it to user space...
26361da177e4SLinus Torvalds 		 */
26372ba5bbedSAl Viro 		ret = copy_page_to_iter(page, offset, nr, to);
26386e58e79dSAl Viro 		retval += ret;
26391da177e4SLinus Torvalds 		offset += ret;
264009cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
264109cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
26421da177e4SLinus Torvalds 
264309cbfeafSKirill A. Shutemov 		put_page(page);
26442ba5bbedSAl Viro 		if (!iov_iter_count(to))
26451da177e4SLinus Torvalds 			break;
26466e58e79dSAl Viro 		if (ret < nr) {
26476e58e79dSAl Viro 			error = -EFAULT;
26486e58e79dSAl Viro 			break;
26496e58e79dSAl Viro 		}
26501da177e4SLinus Torvalds 		cond_resched();
26511da177e4SLinus Torvalds 	}
26521da177e4SLinus Torvalds 
265309cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
26546e58e79dSAl Viro 	file_accessed(file);
26556e58e79dSAl Viro 	return retval ? retval : error;
26561da177e4SLinus Torvalds }
26571da177e4SLinus Torvalds 
2658220f2ac9SHugh Dickins /*
26597f4446eeSMatthew Wilcox  * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2660220f2ac9SHugh Dickins  */
2661220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2662965c8e59SAndrew Morton 				    pgoff_t index, pgoff_t end, int whence)
2663220f2ac9SHugh Dickins {
2664220f2ac9SHugh Dickins 	struct page *page;
2665220f2ac9SHugh Dickins 	struct pagevec pvec;
2666220f2ac9SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
2667220f2ac9SHugh Dickins 	bool done = false;
2668220f2ac9SHugh Dickins 	int i;
2669220f2ac9SHugh Dickins 
267086679820SMel Gorman 	pagevec_init(&pvec);
2671220f2ac9SHugh Dickins 	pvec.nr = 1;		/* start small: we may be there already */
2672220f2ac9SHugh Dickins 	while (!done) {
26730cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
2674220f2ac9SHugh Dickins 					pvec.nr, pvec.pages, indices);
2675220f2ac9SHugh Dickins 		if (!pvec.nr) {
2676965c8e59SAndrew Morton 			if (whence == SEEK_DATA)
2677220f2ac9SHugh Dickins 				index = end;
2678220f2ac9SHugh Dickins 			break;
2679220f2ac9SHugh Dickins 		}
2680220f2ac9SHugh Dickins 		for (i = 0; i < pvec.nr; i++, index++) {
2681220f2ac9SHugh Dickins 			if (index < indices[i]) {
2682965c8e59SAndrew Morton 				if (whence == SEEK_HOLE) {
2683220f2ac9SHugh Dickins 					done = true;
2684220f2ac9SHugh Dickins 					break;
2685220f2ac9SHugh Dickins 				}
2686220f2ac9SHugh Dickins 				index = indices[i];
2687220f2ac9SHugh Dickins 			}
2688220f2ac9SHugh Dickins 			page = pvec.pages[i];
26893159f943SMatthew Wilcox 			if (page && !xa_is_value(page)) {
2690220f2ac9SHugh Dickins 				if (!PageUptodate(page))
2691220f2ac9SHugh Dickins 					page = NULL;
2692220f2ac9SHugh Dickins 			}
2693220f2ac9SHugh Dickins 			if (index >= end ||
2694965c8e59SAndrew Morton 			    (page && whence == SEEK_DATA) ||
2695965c8e59SAndrew Morton 			    (!page && whence == SEEK_HOLE)) {
2696220f2ac9SHugh Dickins 				done = true;
2697220f2ac9SHugh Dickins 				break;
2698220f2ac9SHugh Dickins 			}
2699220f2ac9SHugh Dickins 		}
27000cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
2701220f2ac9SHugh Dickins 		pagevec_release(&pvec);
2702220f2ac9SHugh Dickins 		pvec.nr = PAGEVEC_SIZE;
2703220f2ac9SHugh Dickins 		cond_resched();
2704220f2ac9SHugh Dickins 	}
2705220f2ac9SHugh Dickins 	return index;
2706220f2ac9SHugh Dickins }
2707220f2ac9SHugh Dickins 
2708965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2709220f2ac9SHugh Dickins {
2710220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2711220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2712220f2ac9SHugh Dickins 	pgoff_t start, end;
2713220f2ac9SHugh Dickins 	loff_t new_offset;
2714220f2ac9SHugh Dickins 
2715965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2716965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2717220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
27185955102cSAl Viro 	inode_lock(inode);
2719220f2ac9SHugh Dickins 	/* We're holding i_mutex so we can access i_size directly */
2720220f2ac9SHugh Dickins 
27211a413646SYufen Yu 	if (offset < 0 || offset >= inode->i_size)
2722220f2ac9SHugh Dickins 		offset = -ENXIO;
2723220f2ac9SHugh Dickins 	else {
272409cbfeafSKirill A. Shutemov 		start = offset >> PAGE_SHIFT;
272509cbfeafSKirill A. Shutemov 		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2726965c8e59SAndrew Morton 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
272709cbfeafSKirill A. Shutemov 		new_offset <<= PAGE_SHIFT;
2728220f2ac9SHugh Dickins 		if (new_offset > offset) {
2729220f2ac9SHugh Dickins 			if (new_offset < inode->i_size)
2730220f2ac9SHugh Dickins 				offset = new_offset;
2731965c8e59SAndrew Morton 			else if (whence == SEEK_DATA)
2732220f2ac9SHugh Dickins 				offset = -ENXIO;
2733220f2ac9SHugh Dickins 			else
2734220f2ac9SHugh Dickins 				offset = inode->i_size;
2735220f2ac9SHugh Dickins 		}
2736220f2ac9SHugh Dickins 	}
2737220f2ac9SHugh Dickins 
2738387aae6fSHugh Dickins 	if (offset >= 0)
273946a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
27405955102cSAl Viro 	inode_unlock(inode);
2741220f2ac9SHugh Dickins 	return offset;
2742220f2ac9SHugh Dickins }
2743220f2ac9SHugh Dickins 
274483e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
274583e4fa9cSHugh Dickins 							 loff_t len)
274683e4fa9cSHugh Dickins {
2747496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2748e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
274940e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
27501aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2751e2d12e22SHugh Dickins 	pgoff_t start, index, end;
2752e2d12e22SHugh Dickins 	int error;
275383e4fa9cSHugh Dickins 
275413ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
275513ace4d0SHugh Dickins 		return -EOPNOTSUPP;
275613ace4d0SHugh Dickins 
27575955102cSAl Viro 	inode_lock(inode);
275883e4fa9cSHugh Dickins 
275983e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
276083e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
276183e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
276283e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
27638e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
276483e4fa9cSHugh Dickins 
276540e041a2SDavid Herrmann 		/* protected by i_mutex */
2766ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
276740e041a2SDavid Herrmann 			error = -EPERM;
276840e041a2SDavid Herrmann 			goto out;
276940e041a2SDavid Herrmann 		}
277040e041a2SDavid Herrmann 
27718e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2772aa71ecd8SChen Jun 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2773f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2774f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2775f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2776f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2777f00cdc6dSHugh Dickins 
277883e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
277983e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
278083e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
278183e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
278283e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
27838e205f77SHugh Dickins 
27848e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
27858e205f77SHugh Dickins 		inode->i_private = NULL;
27868e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
27872055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
27888e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
278983e4fa9cSHugh Dickins 		error = 0;
27908e205f77SHugh Dickins 		goto out;
279183e4fa9cSHugh Dickins 	}
279283e4fa9cSHugh Dickins 
2793e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2794e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2795e2d12e22SHugh Dickins 	if (error)
2796e2d12e22SHugh Dickins 		goto out;
2797e2d12e22SHugh Dickins 
279840e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
279940e041a2SDavid Herrmann 		error = -EPERM;
280040e041a2SDavid Herrmann 		goto out;
280140e041a2SDavid Herrmann 	}
280240e041a2SDavid Herrmann 
280309cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
280409cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2805e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2806e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2807e2d12e22SHugh Dickins 		error = -ENOSPC;
2808e2d12e22SHugh Dickins 		goto out;
2809e2d12e22SHugh Dickins 	}
2810e2d12e22SHugh Dickins 
28118e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
28121aac1400SHugh Dickins 	shmem_falloc.start = start;
28131aac1400SHugh Dickins 	shmem_falloc.next  = start;
28141aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
28151aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
28161aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
28171aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
28181aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
28191aac1400SHugh Dickins 
2820e2d12e22SHugh Dickins 	for (index = start; index < end; index++) {
2821e2d12e22SHugh Dickins 		struct page *page;
2822e2d12e22SHugh Dickins 
2823e2d12e22SHugh Dickins 		/*
2824e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2825e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2826e2d12e22SHugh Dickins 		 */
2827e2d12e22SHugh Dickins 		if (signal_pending(current))
2828e2d12e22SHugh Dickins 			error = -EINTR;
28291aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
28301aac1400SHugh Dickins 			error = -ENOMEM;
2831e2d12e22SHugh Dickins 		else
28329e18eb29SAndres Lagar-Cavilla 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2833e2d12e22SHugh Dickins 		if (error) {
28341635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
28357f556567SHugh Dickins 			if (index > start) {
28361635f6a7SHugh Dickins 				shmem_undo_range(inode,
283709cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2838b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
28397f556567SHugh Dickins 			}
28401aac1400SHugh Dickins 			goto undone;
2841e2d12e22SHugh Dickins 		}
2842e2d12e22SHugh Dickins 
2843e2d12e22SHugh Dickins 		/*
28441aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
28451aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
28461aac1400SHugh Dickins 		 */
28471aac1400SHugh Dickins 		shmem_falloc.next++;
28481aac1400SHugh Dickins 		if (!PageUptodate(page))
28491aac1400SHugh Dickins 			shmem_falloc.nr_falloced++;
28501aac1400SHugh Dickins 
28511aac1400SHugh Dickins 		/*
28521635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
28531635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
28541635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
2855e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
2856e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2857e2d12e22SHugh Dickins 		 */
2858e2d12e22SHugh Dickins 		set_page_dirty(page);
2859e2d12e22SHugh Dickins 		unlock_page(page);
286009cbfeafSKirill A. Shutemov 		put_page(page);
2861e2d12e22SHugh Dickins 		cond_resched();
2862e2d12e22SHugh Dickins 	}
2863e2d12e22SHugh Dickins 
2864e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2865e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
2866078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
28671aac1400SHugh Dickins undone:
28681aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
28691aac1400SHugh Dickins 	inode->i_private = NULL;
28701aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2871e2d12e22SHugh Dickins out:
28725955102cSAl Viro 	inode_unlock(inode);
287383e4fa9cSHugh Dickins 	return error;
287483e4fa9cSHugh Dickins }
287583e4fa9cSHugh Dickins 
2876726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
28771da177e4SLinus Torvalds {
2878726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
28791da177e4SLinus Torvalds 
28801da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
288109cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
28821da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
28830edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
28841da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
288541ffe5d5SHugh Dickins 		buf->f_bavail =
288641ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
288741ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
28880edd73b3SHugh Dickins 	}
28890edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
28901da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
28911da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
28921da177e4SLinus Torvalds 	}
28931da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
28941da177e4SLinus Torvalds 	return 0;
28951da177e4SLinus Torvalds }
28961da177e4SLinus Torvalds 
28971da177e4SLinus Torvalds /*
28981da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
28991da177e4SLinus Torvalds  */
29001da177e4SLinus Torvalds static int
29011a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
29021da177e4SLinus Torvalds {
29030b0a0806SHugh Dickins 	struct inode *inode;
29041da177e4SLinus Torvalds 	int error = -ENOSPC;
29051da177e4SLinus Torvalds 
2906454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
29071da177e4SLinus Torvalds 	if (inode) {
2908feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2909feda821eSChristoph Hellwig 		if (error)
2910feda821eSChristoph Hellwig 			goto out_iput;
29112a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
29129d8f13baSMimi Zohar 						     &dentry->d_name,
29136d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2914feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2915feda821eSChristoph Hellwig 			goto out_iput;
291637ec43cdSMimi Zohar 
2917718deb6bSAl Viro 		error = 0;
29181da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2919078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
29201da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
29211da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
29221da177e4SLinus Torvalds 	}
29231da177e4SLinus Torvalds 	return error;
2924feda821eSChristoph Hellwig out_iput:
2925feda821eSChristoph Hellwig 	iput(inode);
2926feda821eSChristoph Hellwig 	return error;
29271da177e4SLinus Torvalds }
29281da177e4SLinus Torvalds 
292960545d0dSAl Viro static int
293060545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
293160545d0dSAl Viro {
293260545d0dSAl Viro 	struct inode *inode;
293360545d0dSAl Viro 	int error = -ENOSPC;
293460545d0dSAl Viro 
293560545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
293660545d0dSAl Viro 	if (inode) {
293760545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
293860545d0dSAl Viro 						     NULL,
293960545d0dSAl Viro 						     shmem_initxattrs, NULL);
2940feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2941feda821eSChristoph Hellwig 			goto out_iput;
2942feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2943feda821eSChristoph Hellwig 		if (error)
2944feda821eSChristoph Hellwig 			goto out_iput;
294560545d0dSAl Viro 		d_tmpfile(dentry, inode);
294660545d0dSAl Viro 	}
294760545d0dSAl Viro 	return error;
2948feda821eSChristoph Hellwig out_iput:
2949feda821eSChristoph Hellwig 	iput(inode);
2950feda821eSChristoph Hellwig 	return error;
295160545d0dSAl Viro }
295260545d0dSAl Viro 
295318bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
29541da177e4SLinus Torvalds {
29551da177e4SLinus Torvalds 	int error;
29561da177e4SLinus Torvalds 
29571da177e4SLinus Torvalds 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
29581da177e4SLinus Torvalds 		return error;
2959d8c76e6fSDave Hansen 	inc_nlink(dir);
29601da177e4SLinus Torvalds 	return 0;
29611da177e4SLinus Torvalds }
29621da177e4SLinus Torvalds 
29634acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2964ebfc3b49SAl Viro 		bool excl)
29651da177e4SLinus Torvalds {
29661da177e4SLinus Torvalds 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
29671da177e4SLinus Torvalds }
29681da177e4SLinus Torvalds 
29691da177e4SLinus Torvalds /*
29701da177e4SLinus Torvalds  * Link a file..
29711da177e4SLinus Torvalds  */
29721da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
29731da177e4SLinus Torvalds {
297475c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
297529b00e60SDarrick J. Wong 	int ret = 0;
29761da177e4SLinus Torvalds 
29771da177e4SLinus Torvalds 	/*
29781da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
29791da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
29801da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
29811062af92SDarrick J. Wong 	 * But if an O_TMPFILE file is linked into the tmpfs, the
29821062af92SDarrick J. Wong 	 * first link must skip that, to get the accounting right.
29831da177e4SLinus Torvalds 	 */
29841062af92SDarrick J. Wong 	if (inode->i_nlink) {
2985*e809d5f0SChris Down 		ret = shmem_reserve_inode(inode->i_sb, NULL);
29865b04c689SPavel Emelyanov 		if (ret)
29875b04c689SPavel Emelyanov 			goto out;
29881062af92SDarrick J. Wong 	}
29891da177e4SLinus Torvalds 
29901da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
2991078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2992d8c76e6fSDave Hansen 	inc_nlink(inode);
29937de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
29941da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
29951da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
29965b04c689SPavel Emelyanov out:
29975b04c689SPavel Emelyanov 	return ret;
29981da177e4SLinus Torvalds }
29991da177e4SLinus Torvalds 
30001da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
30011da177e4SLinus Torvalds {
300275c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
30031da177e4SLinus Torvalds 
30045b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
30055b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
30061da177e4SLinus Torvalds 
30071da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
3008078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
30099a53c3a7SDave Hansen 	drop_nlink(inode);
30101da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
30111da177e4SLinus Torvalds 	return 0;
30121da177e4SLinus Torvalds }
30131da177e4SLinus Torvalds 
30141da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
30151da177e4SLinus Torvalds {
30161da177e4SLinus Torvalds 	if (!simple_empty(dentry))
30171da177e4SLinus Torvalds 		return -ENOTEMPTY;
30181da177e4SLinus Torvalds 
301975c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
30209a53c3a7SDave Hansen 	drop_nlink(dir);
30211da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
30221da177e4SLinus Torvalds }
30231da177e4SLinus Torvalds 
302437456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
302537456771SMiklos Szeredi {
3026e36cb0b8SDavid Howells 	bool old_is_dir = d_is_dir(old_dentry);
3027e36cb0b8SDavid Howells 	bool new_is_dir = d_is_dir(new_dentry);
302837456771SMiklos Szeredi 
302937456771SMiklos Szeredi 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
303037456771SMiklos Szeredi 		if (old_is_dir) {
303137456771SMiklos Szeredi 			drop_nlink(old_dir);
303237456771SMiklos Szeredi 			inc_nlink(new_dir);
303337456771SMiklos Szeredi 		} else {
303437456771SMiklos Szeredi 			drop_nlink(new_dir);
303537456771SMiklos Szeredi 			inc_nlink(old_dir);
303637456771SMiklos Szeredi 		}
303737456771SMiklos Szeredi 	}
303837456771SMiklos Szeredi 	old_dir->i_ctime = old_dir->i_mtime =
303937456771SMiklos Szeredi 	new_dir->i_ctime = new_dir->i_mtime =
304075c3cfa8SDavid Howells 	d_inode(old_dentry)->i_ctime =
3041078cd827SDeepa Dinamani 	d_inode(new_dentry)->i_ctime = current_time(old_dir);
304237456771SMiklos Szeredi 
304337456771SMiklos Szeredi 	return 0;
304437456771SMiklos Szeredi }
304537456771SMiklos Szeredi 
304646fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
304746fdb794SMiklos Szeredi {
304846fdb794SMiklos Szeredi 	struct dentry *whiteout;
304946fdb794SMiklos Szeredi 	int error;
305046fdb794SMiklos Szeredi 
305146fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
305246fdb794SMiklos Szeredi 	if (!whiteout)
305346fdb794SMiklos Szeredi 		return -ENOMEM;
305446fdb794SMiklos Szeredi 
305546fdb794SMiklos Szeredi 	error = shmem_mknod(old_dir, whiteout,
305646fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
305746fdb794SMiklos Szeredi 	dput(whiteout);
305846fdb794SMiklos Szeredi 	if (error)
305946fdb794SMiklos Szeredi 		return error;
306046fdb794SMiklos Szeredi 
306146fdb794SMiklos Szeredi 	/*
306246fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
306346fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
306446fdb794SMiklos Szeredi 	 *
306546fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
306646fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
306746fdb794SMiklos Szeredi 	 */
306846fdb794SMiklos Szeredi 	d_rehash(whiteout);
306946fdb794SMiklos Szeredi 	return 0;
307046fdb794SMiklos Szeredi }
307146fdb794SMiklos Szeredi 
30721da177e4SLinus Torvalds /*
30731da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
30741da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
30751da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
30761da177e4SLinus Torvalds  * gets overwritten.
30771da177e4SLinus Torvalds  */
30783b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
30791da177e4SLinus Torvalds {
308075c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
30811da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
30821da177e4SLinus Torvalds 
308346fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
30843b69ff51SMiklos Szeredi 		return -EINVAL;
30853b69ff51SMiklos Szeredi 
308637456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
308737456771SMiklos Szeredi 		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
308837456771SMiklos Szeredi 
30891da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
30901da177e4SLinus Torvalds 		return -ENOTEMPTY;
30911da177e4SLinus Torvalds 
309246fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
309346fdb794SMiklos Szeredi 		int error;
309446fdb794SMiklos Szeredi 
309546fdb794SMiklos Szeredi 		error = shmem_whiteout(old_dir, old_dentry);
309646fdb794SMiklos Szeredi 		if (error)
309746fdb794SMiklos Szeredi 			return error;
309846fdb794SMiklos Szeredi 	}
309946fdb794SMiklos Szeredi 
310075c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
31011da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3102b928095bSMiklos Szeredi 		if (they_are_dirs) {
310375c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
31049a53c3a7SDave Hansen 			drop_nlink(old_dir);
3105b928095bSMiklos Szeredi 		}
31061da177e4SLinus Torvalds 	} else if (they_are_dirs) {
31079a53c3a7SDave Hansen 		drop_nlink(old_dir);
3108d8c76e6fSDave Hansen 		inc_nlink(new_dir);
31091da177e4SLinus Torvalds 	}
31101da177e4SLinus Torvalds 
31111da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
31121da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
31131da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
31141da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3115078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
31161da177e4SLinus Torvalds 	return 0;
31171da177e4SLinus Torvalds }
31181da177e4SLinus Torvalds 
31191da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
31201da177e4SLinus Torvalds {
31211da177e4SLinus Torvalds 	int error;
31221da177e4SLinus Torvalds 	int len;
31231da177e4SLinus Torvalds 	struct inode *inode;
31249276aad6SHugh Dickins 	struct page *page;
31251da177e4SLinus Torvalds 
31261da177e4SLinus Torvalds 	len = strlen(symname) + 1;
312709cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
31281da177e4SLinus Torvalds 		return -ENAMETOOLONG;
31291da177e4SLinus Torvalds 
31300825a6f9SJoe Perches 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
31310825a6f9SJoe Perches 				VM_NORESERVE);
31321da177e4SLinus Torvalds 	if (!inode)
31331da177e4SLinus Torvalds 		return -ENOSPC;
31341da177e4SLinus Torvalds 
31359d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
31366d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3137343c3d7fSMateusz Nosek 	if (error && error != -EOPNOTSUPP) {
3138570bc1c2SStephen Smalley 		iput(inode);
3139570bc1c2SStephen Smalley 		return error;
3140570bc1c2SStephen Smalley 	}
3141570bc1c2SStephen Smalley 
31421da177e4SLinus Torvalds 	inode->i_size = len-1;
314369f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
31443ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
31453ed47db3SAl Viro 		if (!inode->i_link) {
314669f07ec9SHugh Dickins 			iput(inode);
314769f07ec9SHugh Dickins 			return -ENOMEM;
314869f07ec9SHugh Dickins 		}
314969f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
31501da177e4SLinus Torvalds 	} else {
3151e8ecde25SAl Viro 		inode_nohighmem(inode);
31529e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
31531da177e4SLinus Torvalds 		if (error) {
31541da177e4SLinus Torvalds 			iput(inode);
31551da177e4SLinus Torvalds 			return error;
31561da177e4SLinus Torvalds 		}
315714fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
31581da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
315921fc61c7SAl Viro 		memcpy(page_address(page), symname, len);
3160ec9516fbSHugh Dickins 		SetPageUptodate(page);
31611da177e4SLinus Torvalds 		set_page_dirty(page);
31626746aff7SWu Fengguang 		unlock_page(page);
316309cbfeafSKirill A. Shutemov 		put_page(page);
31641da177e4SLinus Torvalds 	}
31651da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3166078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
31671da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
31681da177e4SLinus Torvalds 	dget(dentry);
31691da177e4SLinus Torvalds 	return 0;
31701da177e4SLinus Torvalds }
31711da177e4SLinus Torvalds 
3172fceef393SAl Viro static void shmem_put_link(void *arg)
3173fceef393SAl Viro {
3174fceef393SAl Viro 	mark_page_accessed(arg);
3175fceef393SAl Viro 	put_page(arg);
3176fceef393SAl Viro }
3177fceef393SAl Viro 
31786b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3179fceef393SAl Viro 				  struct inode *inode,
3180fceef393SAl Viro 				  struct delayed_call *done)
31811da177e4SLinus Torvalds {
31821da177e4SLinus Torvalds 	struct page *page = NULL;
31836b255391SAl Viro 	int error;
31846a6c9904SAl Viro 	if (!dentry) {
31856a6c9904SAl Viro 		page = find_get_page(inode->i_mapping, 0);
31866a6c9904SAl Viro 		if (!page)
31876b255391SAl Viro 			return ERR_PTR(-ECHILD);
31886a6c9904SAl Viro 		if (!PageUptodate(page)) {
31896a6c9904SAl Viro 			put_page(page);
31906a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
31916a6c9904SAl Viro 		}
31926a6c9904SAl Viro 	} else {
31939e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3194680baacbSAl Viro 		if (error)
3195680baacbSAl Viro 			return ERR_PTR(error);
3196d3602444SHugh Dickins 		unlock_page(page);
31971da177e4SLinus Torvalds 	}
3198fceef393SAl Viro 	set_delayed_call(done, shmem_put_link, page);
319921fc61c7SAl Viro 	return page_address(page);
32001da177e4SLinus Torvalds }
32011da177e4SLinus Torvalds 
3202b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3203b09e0fa4SEric Paris /*
3204b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3205b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3206b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3207b09e0fa4SEric Paris  * filesystem level, though.
3208b09e0fa4SEric Paris  */
3209b09e0fa4SEric Paris 
32106d9d88d0SJarkko Sakkinen /*
32116d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
32126d9d88d0SJarkko Sakkinen  */
32136d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
32146d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
32156d9d88d0SJarkko Sakkinen 			    void *fs_info)
32166d9d88d0SJarkko Sakkinen {
32176d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
32186d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
321938f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
32206d9d88d0SJarkko Sakkinen 	size_t len;
32216d9d88d0SJarkko Sakkinen 
32226d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
322338f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
32246d9d88d0SJarkko Sakkinen 		if (!new_xattr)
32256d9d88d0SJarkko Sakkinen 			return -ENOMEM;
32266d9d88d0SJarkko Sakkinen 
32276d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
32286d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
32296d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
32306d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
32313bef735aSChengguang Xu 			kvfree(new_xattr);
32326d9d88d0SJarkko Sakkinen 			return -ENOMEM;
32336d9d88d0SJarkko Sakkinen 		}
32346d9d88d0SJarkko Sakkinen 
32356d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
32366d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
32376d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
32386d9d88d0SJarkko Sakkinen 		       xattr->name, len);
32396d9d88d0SJarkko Sakkinen 
324038f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
32416d9d88d0SJarkko Sakkinen 	}
32426d9d88d0SJarkko Sakkinen 
32436d9d88d0SJarkko Sakkinen 	return 0;
32446d9d88d0SJarkko Sakkinen }
32456d9d88d0SJarkko Sakkinen 
3246aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3247b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3248b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3249aa7c5241SAndreas Gruenbacher {
3250b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3251aa7c5241SAndreas Gruenbacher 
3252aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3253aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3254aa7c5241SAndreas Gruenbacher }
3255aa7c5241SAndreas Gruenbacher 
3256aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
325759301226SAl Viro 				   struct dentry *unused, struct inode *inode,
325859301226SAl Viro 				   const char *name, const void *value,
325959301226SAl Viro 				   size_t size, int flags)
3260aa7c5241SAndreas Gruenbacher {
326159301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3262aa7c5241SAndreas Gruenbacher 
3263aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3264a46a2295SDaniel Xu 	return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3265aa7c5241SAndreas Gruenbacher }
3266aa7c5241SAndreas Gruenbacher 
3267aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3268aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3269aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3270aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3271aa7c5241SAndreas Gruenbacher };
3272aa7c5241SAndreas Gruenbacher 
3273aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3274aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3275aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3276aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3277aa7c5241SAndreas Gruenbacher };
3278aa7c5241SAndreas Gruenbacher 
3279b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3280b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3281feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3282feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3283b09e0fa4SEric Paris #endif
3284aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3285aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3286b09e0fa4SEric Paris 	NULL
3287b09e0fa4SEric Paris };
3288b09e0fa4SEric Paris 
3289b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3290b09e0fa4SEric Paris {
329175c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3292786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3293b09e0fa4SEric Paris }
3294b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3295b09e0fa4SEric Paris 
329669f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
32976b255391SAl Viro 	.get_link	= simple_get_link,
3298b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3299b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3300b09e0fa4SEric Paris #endif
33011da177e4SLinus Torvalds };
33021da177e4SLinus Torvalds 
330392e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
33046b255391SAl Viro 	.get_link	= shmem_get_link,
3305b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3306b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
330739f0247dSAndreas Gruenbacher #endif
3308b09e0fa4SEric Paris };
330939f0247dSAndreas Gruenbacher 
331091828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
331191828a40SDavid M. Grimes {
331291828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
331391828a40SDavid M. Grimes }
331491828a40SDavid M. Grimes 
331591828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
331691828a40SDavid M. Grimes {
331791828a40SDavid M. Grimes 	__u32 *fh = vfh;
331891828a40SDavid M. Grimes 	__u64 inum = fh[2];
331991828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
332091828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
332191828a40SDavid M. Grimes }
332291828a40SDavid M. Grimes 
332312ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
332412ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
332512ba780dSAmir Goldstein {
332612ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
332712ba780dSAmir Goldstein 
332812ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
332912ba780dSAmir Goldstein }
333012ba780dSAmir Goldstein 
333112ba780dSAmir Goldstein 
3332480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3333480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
333491828a40SDavid M. Grimes {
333591828a40SDavid M. Grimes 	struct inode *inode;
3336480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
333735c2a7f4SHugh Dickins 	u64 inum;
333891828a40SDavid M. Grimes 
3339480b116cSChristoph Hellwig 	if (fh_len < 3)
3340480b116cSChristoph Hellwig 		return NULL;
3341480b116cSChristoph Hellwig 
334235c2a7f4SHugh Dickins 	inum = fid->raw[2];
334335c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
334435c2a7f4SHugh Dickins 
3345480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3346480b116cSChristoph Hellwig 			shmem_match, fid->raw);
334791828a40SDavid M. Grimes 	if (inode) {
334812ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
334991828a40SDavid M. Grimes 		iput(inode);
335091828a40SDavid M. Grimes 	}
335191828a40SDavid M. Grimes 
3352480b116cSChristoph Hellwig 	return dentry;
335391828a40SDavid M. Grimes }
335491828a40SDavid M. Grimes 
3355b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3356b0b0382bSAl Viro 				struct inode *parent)
335791828a40SDavid M. Grimes {
33585fe0c237SAneesh Kumar K.V 	if (*len < 3) {
33595fe0c237SAneesh Kumar K.V 		*len = 3;
336094e07a75SNamjae Jeon 		return FILEID_INVALID;
33615fe0c237SAneesh Kumar K.V 	}
336291828a40SDavid M. Grimes 
33631d3382cbSAl Viro 	if (inode_unhashed(inode)) {
336491828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
336591828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
336691828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
336791828a40SDavid M. Grimes 		 * to do it once
336891828a40SDavid M. Grimes 		 */
336991828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
337091828a40SDavid M. Grimes 		spin_lock(&lock);
33711d3382cbSAl Viro 		if (inode_unhashed(inode))
337291828a40SDavid M. Grimes 			__insert_inode_hash(inode,
337391828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
337491828a40SDavid M. Grimes 		spin_unlock(&lock);
337591828a40SDavid M. Grimes 	}
337691828a40SDavid M. Grimes 
337791828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
337891828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
337991828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
338091828a40SDavid M. Grimes 
338191828a40SDavid M. Grimes 	*len = 3;
338291828a40SDavid M. Grimes 	return 1;
338391828a40SDavid M. Grimes }
338491828a40SDavid M. Grimes 
338539655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
338691828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
338791828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3388480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
338991828a40SDavid M. Grimes };
339091828a40SDavid M. Grimes 
3391626c3920SAl Viro enum shmem_param {
3392626c3920SAl Viro 	Opt_gid,
3393626c3920SAl Viro 	Opt_huge,
3394626c3920SAl Viro 	Opt_mode,
3395626c3920SAl Viro 	Opt_mpol,
3396626c3920SAl Viro 	Opt_nr_blocks,
3397626c3920SAl Viro 	Opt_nr_inodes,
3398626c3920SAl Viro 	Opt_size,
3399626c3920SAl Viro 	Opt_uid,
3400626c3920SAl Viro };
34011da177e4SLinus Torvalds 
34025eede625SAl Viro static const struct constant_table shmem_param_enums_huge[] = {
34032710c957SAl Viro 	{"never",	SHMEM_HUGE_NEVER },
34042710c957SAl Viro 	{"always",	SHMEM_HUGE_ALWAYS },
34052710c957SAl Viro 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
34062710c957SAl Viro 	{"advise",	SHMEM_HUGE_ADVISE },
34072710c957SAl Viro 	{}
34082710c957SAl Viro };
34092710c957SAl Viro 
3410d7167b14SAl Viro const struct fs_parameter_spec shmem_fs_parameters[] = {
3411626c3920SAl Viro 	fsparam_u32   ("gid",		Opt_gid),
34122710c957SAl Viro 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3413626c3920SAl Viro 	fsparam_u32oct("mode",		Opt_mode),
3414626c3920SAl Viro 	fsparam_string("mpol",		Opt_mpol),
3415626c3920SAl Viro 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3416626c3920SAl Viro 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3417626c3920SAl Viro 	fsparam_string("size",		Opt_size),
3418626c3920SAl Viro 	fsparam_u32   ("uid",		Opt_uid),
3419626c3920SAl Viro 	{}
3420626c3920SAl Viro };
3421626c3920SAl Viro 
3422f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3423626c3920SAl Viro {
3424f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3425626c3920SAl Viro 	struct fs_parse_result result;
3426e04dc423SAl Viro 	unsigned long long size;
3427626c3920SAl Viro 	char *rest;
3428626c3920SAl Viro 	int opt;
3429626c3920SAl Viro 
3430d7167b14SAl Viro 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3431f3235626SDavid Howells 	if (opt < 0)
3432626c3920SAl Viro 		return opt;
3433626c3920SAl Viro 
3434626c3920SAl Viro 	switch (opt) {
3435626c3920SAl Viro 	case Opt_size:
3436626c3920SAl Viro 		size = memparse(param->string, &rest);
3437e04dc423SAl Viro 		if (*rest == '%') {
3438e04dc423SAl Viro 			size <<= PAGE_SHIFT;
3439e04dc423SAl Viro 			size *= totalram_pages();
3440e04dc423SAl Viro 			do_div(size, 100);
3441e04dc423SAl Viro 			rest++;
3442e04dc423SAl Viro 		}
3443e04dc423SAl Viro 		if (*rest)
3444626c3920SAl Viro 			goto bad_value;
3445e04dc423SAl Viro 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3446e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3447626c3920SAl Viro 		break;
3448626c3920SAl Viro 	case Opt_nr_blocks:
3449626c3920SAl Viro 		ctx->blocks = memparse(param->string, &rest);
3450e04dc423SAl Viro 		if (*rest)
3451626c3920SAl Viro 			goto bad_value;
3452e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3453626c3920SAl Viro 		break;
3454626c3920SAl Viro 	case Opt_nr_inodes:
3455626c3920SAl Viro 		ctx->inodes = memparse(param->string, &rest);
3456e04dc423SAl Viro 		if (*rest)
3457626c3920SAl Viro 			goto bad_value;
3458e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_INODES;
3459626c3920SAl Viro 		break;
3460626c3920SAl Viro 	case Opt_mode:
3461626c3920SAl Viro 		ctx->mode = result.uint_32 & 07777;
3462626c3920SAl Viro 		break;
3463626c3920SAl Viro 	case Opt_uid:
3464626c3920SAl Viro 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3465e04dc423SAl Viro 		if (!uid_valid(ctx->uid))
3466626c3920SAl Viro 			goto bad_value;
3467626c3920SAl Viro 		break;
3468626c3920SAl Viro 	case Opt_gid:
3469626c3920SAl Viro 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3470e04dc423SAl Viro 		if (!gid_valid(ctx->gid))
3471626c3920SAl Viro 			goto bad_value;
3472626c3920SAl Viro 		break;
3473626c3920SAl Viro 	case Opt_huge:
3474626c3920SAl Viro 		ctx->huge = result.uint_32;
3475626c3920SAl Viro 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3476396bcc52SMatthew Wilcox (Oracle) 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3477626c3920SAl Viro 		      has_transparent_hugepage()))
3478626c3920SAl Viro 			goto unsupported_parameter;
3479e04dc423SAl Viro 		ctx->seen |= SHMEM_SEEN_HUGE;
3480626c3920SAl Viro 		break;
3481626c3920SAl Viro 	case Opt_mpol:
3482626c3920SAl Viro 		if (IS_ENABLED(CONFIG_NUMA)) {
3483e04dc423SAl Viro 			mpol_put(ctx->mpol);
3484e04dc423SAl Viro 			ctx->mpol = NULL;
3485626c3920SAl Viro 			if (mpol_parse_str(param->string, &ctx->mpol))
3486626c3920SAl Viro 				goto bad_value;
3487626c3920SAl Viro 			break;
3488626c3920SAl Viro 		}
3489626c3920SAl Viro 		goto unsupported_parameter;
3490e04dc423SAl Viro 	}
3491e04dc423SAl Viro 	return 0;
3492e04dc423SAl Viro 
3493626c3920SAl Viro unsupported_parameter:
3494f35aa2bcSAl Viro 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3495626c3920SAl Viro bad_value:
3496f35aa2bcSAl Viro 	return invalfc(fc, "Bad value for '%s'", param->key);
3497e04dc423SAl Viro }
3498e04dc423SAl Viro 
3499f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data)
3500e04dc423SAl Viro {
3501f3235626SDavid Howells 	char *options = data;
3502f3235626SDavid Howells 
350333f37c64SAl Viro 	if (options) {
350433f37c64SAl Viro 		int err = security_sb_eat_lsm_opts(options, &fc->security);
350533f37c64SAl Viro 		if (err)
350633f37c64SAl Viro 			return err;
350733f37c64SAl Viro 	}
350833f37c64SAl Viro 
3509b00dc3adSHugh Dickins 	while (options != NULL) {
3510626c3920SAl Viro 		char *this_char = options;
3511b00dc3adSHugh Dickins 		for (;;) {
3512b00dc3adSHugh Dickins 			/*
3513b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3514b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3515b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3516b00dc3adSHugh Dickins 			 */
3517b00dc3adSHugh Dickins 			options = strchr(options, ',');
3518b00dc3adSHugh Dickins 			if (options == NULL)
3519b00dc3adSHugh Dickins 				break;
3520b00dc3adSHugh Dickins 			options++;
3521b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3522b00dc3adSHugh Dickins 				options[-1] = '\0';
3523b00dc3adSHugh Dickins 				break;
3524b00dc3adSHugh Dickins 			}
3525b00dc3adSHugh Dickins 		}
3526626c3920SAl Viro 		if (*this_char) {
3527626c3920SAl Viro 			char *value = strchr(this_char,'=');
3528f3235626SDavid Howells 			size_t len = 0;
3529626c3920SAl Viro 			int err;
3530626c3920SAl Viro 
3531626c3920SAl Viro 			if (value) {
3532626c3920SAl Viro 				*value++ = '\0';
3533f3235626SDavid Howells 				len = strlen(value);
35341da177e4SLinus Torvalds 			}
3535f3235626SDavid Howells 			err = vfs_parse_fs_string(fc, this_char, value, len);
3536f3235626SDavid Howells 			if (err < 0)
3537f3235626SDavid Howells 				return err;
35381da177e4SLinus Torvalds 		}
3539626c3920SAl Viro 	}
35401da177e4SLinus Torvalds 	return 0;
35411da177e4SLinus Torvalds }
35421da177e4SLinus Torvalds 
3543f3235626SDavid Howells /*
3544f3235626SDavid Howells  * Reconfigure a shmem filesystem.
3545f3235626SDavid Howells  *
3546f3235626SDavid Howells  * Note that we disallow change from limited->unlimited blocks/inodes while any
3547f3235626SDavid Howells  * are in use; but we must separately disallow unlimited->limited, because in
3548f3235626SDavid Howells  * that case we have no record of how much is already in use.
3549f3235626SDavid Howells  */
3550f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc)
35511da177e4SLinus Torvalds {
3552f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3553f3235626SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
35540edd73b3SHugh Dickins 	unsigned long inodes;
3555f3235626SDavid Howells 	const char *err;
35560edd73b3SHugh Dickins 
35570edd73b3SHugh Dickins 	spin_lock(&sbinfo->stat_lock);
35580edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3559f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3560f3235626SDavid Howells 		if (!sbinfo->max_blocks) {
3561f3235626SDavid Howells 			err = "Cannot retroactively limit size";
35620edd73b3SHugh Dickins 			goto out;
35630b5071ddSAl Viro 		}
3564f3235626SDavid Howells 		if (percpu_counter_compare(&sbinfo->used_blocks,
3565f3235626SDavid Howells 					   ctx->blocks) > 0) {
3566f3235626SDavid Howells 			err = "Too small a size for current use";
35670b5071ddSAl Viro 			goto out;
3568f3235626SDavid Howells 		}
3569f3235626SDavid Howells 	}
3570f3235626SDavid Howells 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3571f3235626SDavid Howells 		if (!sbinfo->max_inodes) {
3572f3235626SDavid Howells 			err = "Cannot retroactively limit inodes";
35730b5071ddSAl Viro 			goto out;
35740b5071ddSAl Viro 		}
3575f3235626SDavid Howells 		if (ctx->inodes < inodes) {
3576f3235626SDavid Howells 			err = "Too few inodes for current use";
3577f3235626SDavid Howells 			goto out;
3578f3235626SDavid Howells 		}
3579f3235626SDavid Howells 	}
35800edd73b3SHugh Dickins 
3581f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_HUGE)
3582f3235626SDavid Howells 		sbinfo->huge = ctx->huge;
3583f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3584f3235626SDavid Howells 		sbinfo->max_blocks  = ctx->blocks;
3585f3235626SDavid Howells 	if (ctx->seen & SHMEM_SEEN_INODES) {
3586f3235626SDavid Howells 		sbinfo->max_inodes  = ctx->inodes;
3587f3235626SDavid Howells 		sbinfo->free_inodes = ctx->inodes - inodes;
35880b5071ddSAl Viro 	}
358971fe804bSLee Schermerhorn 
35905f00110fSGreg Thelen 	/*
35915f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
35925f00110fSGreg Thelen 	 */
3593f3235626SDavid Howells 	if (ctx->mpol) {
359471fe804bSLee Schermerhorn 		mpol_put(sbinfo->mpol);
3595f3235626SDavid Howells 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3596f3235626SDavid Howells 		ctx->mpol = NULL;
35975f00110fSGreg Thelen 	}
3598f3235626SDavid Howells 	spin_unlock(&sbinfo->stat_lock);
3599f3235626SDavid Howells 	return 0;
36000edd73b3SHugh Dickins out:
36010edd73b3SHugh Dickins 	spin_unlock(&sbinfo->stat_lock);
3602f35aa2bcSAl Viro 	return invalfc(fc, "%s", err);
36031da177e4SLinus Torvalds }
3604680d794bSakpm@linux-foundation.org 
360534c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3606680d794bSakpm@linux-foundation.org {
360734c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3608680d794bSakpm@linux-foundation.org 
3609680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3610680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
361109cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3612680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3613680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
36140825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
361509208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
36168751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
36178751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
36188751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
36198751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
36208751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
36218751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3622396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
36235a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
36245a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
36255a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
36265a6e75f8SKirill A. Shutemov #endif
362771fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3628680d794bSakpm@linux-foundation.org 	return 0;
3629680d794bSakpm@linux-foundation.org }
36309183df25SDavid Herrmann 
3631680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
36321da177e4SLinus Torvalds 
36331da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
36341da177e4SLinus Torvalds {
3635602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3636602586a8SHugh Dickins 
3637*e809d5f0SChris Down 	free_percpu(sbinfo->ino_batch);
3638602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
363949cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3640602586a8SHugh Dickins 	kfree(sbinfo);
36411da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
36421da177e4SLinus Torvalds }
36431da177e4SLinus Torvalds 
3644f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
36451da177e4SLinus Torvalds {
3646f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
36471da177e4SLinus Torvalds 	struct inode *inode;
36480edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3649680d794bSakpm@linux-foundation.org 	int err = -ENOMEM;
3650680d794bSakpm@linux-foundation.org 
3651680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3652425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3653680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3654680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3655680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3656680d794bSakpm@linux-foundation.org 
3657680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
36581da177e4SLinus Torvalds 
36590edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
36601da177e4SLinus Torvalds 	/*
36611da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
36621da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
36631da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
36641da177e4SLinus Torvalds 	 */
36651751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3666f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3667f3235626SDavid Howells 			ctx->blocks = shmem_default_max_blocks();
3668f3235626SDavid Howells 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3669f3235626SDavid Howells 			ctx->inodes = shmem_default_max_inodes();
3670ca4e0519SAl Viro 	} else {
36711751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
36721da177e4SLinus Torvalds 	}
367391828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
36741751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOSEC;
36750edd73b3SHugh Dickins #else
36761751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
36770edd73b3SHugh Dickins #endif
3678f3235626SDavid Howells 	sbinfo->max_blocks = ctx->blocks;
3679f3235626SDavid Howells 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3680*e809d5f0SChris Down 	if (sb->s_flags & SB_KERNMOUNT) {
3681*e809d5f0SChris Down 		sbinfo->ino_batch = alloc_percpu(ino_t);
3682*e809d5f0SChris Down 		if (!sbinfo->ino_batch)
3683*e809d5f0SChris Down 			goto failed;
3684*e809d5f0SChris Down 	}
3685f3235626SDavid Howells 	sbinfo->uid = ctx->uid;
3686f3235626SDavid Howells 	sbinfo->gid = ctx->gid;
3687f3235626SDavid Howells 	sbinfo->mode = ctx->mode;
3688f3235626SDavid Howells 	sbinfo->huge = ctx->huge;
3689f3235626SDavid Howells 	sbinfo->mpol = ctx->mpol;
3690f3235626SDavid Howells 	ctx->mpol = NULL;
36911da177e4SLinus Torvalds 
36921da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
3693908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3694602586a8SHugh Dickins 		goto failed;
3695779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3696779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
36971da177e4SLinus Torvalds 
3698285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
369909cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
370009cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
37011da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
37021da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3703cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3704b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
370539f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3706b09e0fa4SEric Paris #endif
3707b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
37081751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
370939f0247dSAndreas Gruenbacher #endif
37102b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
37110edd73b3SHugh Dickins 
3712454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
37131da177e4SLinus Torvalds 	if (!inode)
37141da177e4SLinus Torvalds 		goto failed;
3715680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3716680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3717318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3718318ceed0SAl Viro 	if (!sb->s_root)
371948fde701SAl Viro 		goto failed;
37201da177e4SLinus Torvalds 	return 0;
37211da177e4SLinus Torvalds 
37221da177e4SLinus Torvalds failed:
37231da177e4SLinus Torvalds 	shmem_put_super(sb);
37241da177e4SLinus Torvalds 	return err;
37251da177e4SLinus Torvalds }
37261da177e4SLinus Torvalds 
3727f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc)
3728f3235626SDavid Howells {
3729f3235626SDavid Howells 	return get_tree_nodev(fc, shmem_fill_super);
3730f3235626SDavid Howells }
3731f3235626SDavid Howells 
3732f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc)
3733f3235626SDavid Howells {
3734f3235626SDavid Howells 	struct shmem_options *ctx = fc->fs_private;
3735f3235626SDavid Howells 
3736f3235626SDavid Howells 	if (ctx) {
3737f3235626SDavid Howells 		mpol_put(ctx->mpol);
3738f3235626SDavid Howells 		kfree(ctx);
3739f3235626SDavid Howells 	}
3740f3235626SDavid Howells }
3741f3235626SDavid Howells 
3742f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = {
3743f3235626SDavid Howells 	.free			= shmem_free_fc,
3744f3235626SDavid Howells 	.get_tree		= shmem_get_tree,
3745f3235626SDavid Howells #ifdef CONFIG_TMPFS
3746f3235626SDavid Howells 	.parse_monolithic	= shmem_parse_options,
3747f3235626SDavid Howells 	.parse_param		= shmem_parse_one,
3748f3235626SDavid Howells 	.reconfigure		= shmem_reconfigure,
3749f3235626SDavid Howells #endif
3750f3235626SDavid Howells };
3751f3235626SDavid Howells 
3752fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
37531da177e4SLinus Torvalds 
37541da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
37551da177e4SLinus Torvalds {
375641ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
375741ffe5d5SHugh Dickins 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
375841ffe5d5SHugh Dickins 	if (!info)
37591da177e4SLinus Torvalds 		return NULL;
376041ffe5d5SHugh Dickins 	return &info->vfs_inode;
37611da177e4SLinus Torvalds }
37621da177e4SLinus Torvalds 
376374b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode)
3764fa0d7e3dSNick Piggin {
376584e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
37663ed47db3SAl Viro 		kfree(inode->i_link);
3767fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3768fa0d7e3dSNick Piggin }
3769fa0d7e3dSNick Piggin 
37701da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
37711da177e4SLinus Torvalds {
377209208d15SAl Viro 	if (S_ISREG(inode->i_mode))
37731da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
37741da177e4SLinus Torvalds }
37751da177e4SLinus Torvalds 
377641ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
37771da177e4SLinus Torvalds {
377841ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
377941ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
37801da177e4SLinus Torvalds }
37811da177e4SLinus Torvalds 
37829a8ec03eSweiping zhang static void shmem_init_inodecache(void)
37831da177e4SLinus Torvalds {
37841da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
37851da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
37865d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
37871da177e4SLinus Torvalds }
37881da177e4SLinus Torvalds 
378941ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
37901da177e4SLinus Torvalds {
37911a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
37921da177e4SLinus Torvalds }
37931da177e4SLinus Torvalds 
3794f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = {
37951da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
379676719325SKen Chen 	.set_page_dirty	= __set_page_dirty_no_writeback,
37971da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3798800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3799800d15a5SNick Piggin 	.write_end	= shmem_write_end,
38001da177e4SLinus Torvalds #endif
38011c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
3802304dbdb7SLee Schermerhorn 	.migratepage	= migrate_page,
38031c93923cSAndrew Morton #endif
3804aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
38051da177e4SLinus Torvalds };
38061da177e4SLinus Torvalds 
380715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
38081da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3809c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
38101da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3811220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
38122ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
38138174202bSAl Viro 	.write_iter	= generic_file_write_iter,
38141b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
381582c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3816f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
381783e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
38181da177e4SLinus Torvalds #endif
38191da177e4SLinus Torvalds };
38201da177e4SLinus Torvalds 
382192e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
382244a30220SYu Zhao 	.getattr	= shmem_getattr,
382394c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3824b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3825b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3826feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3827b09e0fa4SEric Paris #endif
38281da177e4SLinus Torvalds };
38291da177e4SLinus Torvalds 
383092e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
38311da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
38321da177e4SLinus Torvalds 	.create		= shmem_create,
38331da177e4SLinus Torvalds 	.lookup		= simple_lookup,
38341da177e4SLinus Torvalds 	.link		= shmem_link,
38351da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
38361da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
38371da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
38381da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
38391da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
38402773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
384160545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
38421da177e4SLinus Torvalds #endif
3843b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3844b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3845b09e0fa4SEric Paris #endif
384639f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
384794c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3848feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
384939f0247dSAndreas Gruenbacher #endif
385039f0247dSAndreas Gruenbacher };
385139f0247dSAndreas Gruenbacher 
385292e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3853b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3854b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3855b09e0fa4SEric Paris #endif
385639f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
385794c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3858feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
385939f0247dSAndreas Gruenbacher #endif
38601da177e4SLinus Torvalds };
38611da177e4SLinus Torvalds 
3862759b9775SHugh Dickins static const struct super_operations shmem_ops = {
38631da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
386474b1da56SAl Viro 	.free_inode	= shmem_free_in_core_inode,
38651da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
38661da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
38671da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
3868680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
38691da177e4SLinus Torvalds #endif
38701f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
38711da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
38721da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3873396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3874779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3875779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3876779750d2SKirill A. Shutemov #endif
38771da177e4SLinus Torvalds };
38781da177e4SLinus Torvalds 
3879f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
388054cb8821SNick Piggin 	.fault		= shmem_fault,
3881d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
38821da177e4SLinus Torvalds #ifdef CONFIG_NUMA
38831da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
38841da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
38851da177e4SLinus Torvalds #endif
38861da177e4SLinus Torvalds };
38871da177e4SLinus Torvalds 
3888f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc)
38891da177e4SLinus Torvalds {
3890f3235626SDavid Howells 	struct shmem_options *ctx;
3891f3235626SDavid Howells 
3892f3235626SDavid Howells 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3893f3235626SDavid Howells 	if (!ctx)
3894f3235626SDavid Howells 		return -ENOMEM;
3895f3235626SDavid Howells 
3896f3235626SDavid Howells 	ctx->mode = 0777 | S_ISVTX;
3897f3235626SDavid Howells 	ctx->uid = current_fsuid();
3898f3235626SDavid Howells 	ctx->gid = current_fsgid();
3899f3235626SDavid Howells 
3900f3235626SDavid Howells 	fc->fs_private = ctx;
3901f3235626SDavid Howells 	fc->ops = &shmem_fs_context_ops;
3902f3235626SDavid Howells 	return 0;
39031da177e4SLinus Torvalds }
39041da177e4SLinus Torvalds 
390541ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
39061da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
39071da177e4SLinus Torvalds 	.name		= "tmpfs",
3908f3235626SDavid Howells 	.init_fs_context = shmem_init_fs_context,
3909f3235626SDavid Howells #ifdef CONFIG_TMPFS
3910d7167b14SAl Viro 	.parameters	= shmem_fs_parameters,
3911f3235626SDavid Howells #endif
39121da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
39132b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
39141da177e4SLinus Torvalds };
39151da177e4SLinus Torvalds 
391641ffe5d5SHugh Dickins int __init shmem_init(void)
39171da177e4SLinus Torvalds {
39181da177e4SLinus Torvalds 	int error;
39191da177e4SLinus Torvalds 
39209a8ec03eSweiping zhang 	shmem_init_inodecache();
39211da177e4SLinus Torvalds 
392241ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
39231da177e4SLinus Torvalds 	if (error) {
39241170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
39251da177e4SLinus Torvalds 		goto out2;
39261da177e4SLinus Torvalds 	}
392795dc112aSGreg Kroah-Hartman 
3928ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
39291da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
39301da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
39311170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
39321da177e4SLinus Torvalds 		goto out1;
39331da177e4SLinus Torvalds 	}
39345a6e75f8SKirill A. Shutemov 
3935396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3936435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
39375a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
39385a6e75f8SKirill A. Shutemov 	else
39395a6e75f8SKirill A. Shutemov 		shmem_huge = 0; /* just in case it was patched */
39405a6e75f8SKirill A. Shutemov #endif
39411da177e4SLinus Torvalds 	return 0;
39421da177e4SLinus Torvalds 
39431da177e4SLinus Torvalds out1:
394441ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
39451da177e4SLinus Torvalds out2:
394641ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
39471da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
39481da177e4SLinus Torvalds 	return error;
39491da177e4SLinus Torvalds }
3950853ac43aSMatt Mackall 
3951396bcc52SMatthew Wilcox (Oracle) #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
39525a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
39535a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, char *buf)
39545a6e75f8SKirill A. Shutemov {
395526083eb6SColin Ian King 	static const int values[] = {
39565a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
39575a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
39585a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
39595a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
39605a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
39615a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
39625a6e75f8SKirill A. Shutemov 	};
39635a6e75f8SKirill A. Shutemov 	int i, count;
39645a6e75f8SKirill A. Shutemov 
39655a6e75f8SKirill A. Shutemov 	for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
39665a6e75f8SKirill A. Shutemov 		const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
39675a6e75f8SKirill A. Shutemov 
39685a6e75f8SKirill A. Shutemov 		count += sprintf(buf + count, fmt,
39695a6e75f8SKirill A. Shutemov 				shmem_format_huge(values[i]));
39705a6e75f8SKirill A. Shutemov 	}
39715a6e75f8SKirill A. Shutemov 	buf[count - 1] = '\n';
39725a6e75f8SKirill A. Shutemov 	return count;
39735a6e75f8SKirill A. Shutemov }
39745a6e75f8SKirill A. Shutemov 
39755a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
39765a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
39775a6e75f8SKirill A. Shutemov {
39785a6e75f8SKirill A. Shutemov 	char tmp[16];
39795a6e75f8SKirill A. Shutemov 	int huge;
39805a6e75f8SKirill A. Shutemov 
39815a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
39825a6e75f8SKirill A. Shutemov 		return -EINVAL;
39835a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
39845a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
39855a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
39865a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
39875a6e75f8SKirill A. Shutemov 
39885a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
39895a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
39905a6e75f8SKirill A. Shutemov 		return -EINVAL;
39915a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
39925a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
39935a6e75f8SKirill A. Shutemov 		return -EINVAL;
39945a6e75f8SKirill A. Shutemov 
39955a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
3996435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
39975a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
39985a6e75f8SKirill A. Shutemov 	return count;
39995a6e75f8SKirill A. Shutemov }
40005a6e75f8SKirill A. Shutemov 
40015a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr =
40025a6e75f8SKirill A. Shutemov 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4003396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4004f3f0e1d2SKirill A. Shutemov 
4005396bcc52SMatthew Wilcox (Oracle) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4006f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma)
4007f3f0e1d2SKirill A. Shutemov {
4008f3f0e1d2SKirill A. Shutemov 	struct inode *inode = file_inode(vma->vm_file);
4009f3f0e1d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4010f3f0e1d2SKirill A. Shutemov 	loff_t i_size;
4011f3f0e1d2SKirill A. Shutemov 	pgoff_t off;
4012f3f0e1d2SKirill A. Shutemov 
4013c0630669SYang Shi 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
4014c0630669SYang Shi 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
4015c0630669SYang Shi 		return false;
4016f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
4017f3f0e1d2SKirill A. Shutemov 		return true;
4018f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY)
4019f3f0e1d2SKirill A. Shutemov 		return false;
4020f3f0e1d2SKirill A. Shutemov 	switch (sbinfo->huge) {
4021f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_NEVER:
4022f3f0e1d2SKirill A. Shutemov 			return false;
4023f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ALWAYS:
4024f3f0e1d2SKirill A. Shutemov 			return true;
4025f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_WITHIN_SIZE:
4026f3f0e1d2SKirill A. Shutemov 			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4027f3f0e1d2SKirill A. Shutemov 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
4028f3f0e1d2SKirill A. Shutemov 			if (i_size >= HPAGE_PMD_SIZE &&
4029f3f0e1d2SKirill A. Shutemov 					i_size >> PAGE_SHIFT >= off)
4030f3f0e1d2SKirill A. Shutemov 				return true;
4031e4a9bc58SJoe Perches 			fallthrough;
4032f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ADVISE:
4033f3f0e1d2SKirill A. Shutemov 			/* TODO: implement fadvise() hints */
4034f3f0e1d2SKirill A. Shutemov 			return (vma->vm_flags & VM_HUGEPAGE);
4035f3f0e1d2SKirill A. Shutemov 		default:
4036f3f0e1d2SKirill A. Shutemov 			VM_BUG_ON(1);
4037f3f0e1d2SKirill A. Shutemov 			return false;
4038f3f0e1d2SKirill A. Shutemov 	}
4039f3f0e1d2SKirill A. Shutemov }
4040396bcc52SMatthew Wilcox (Oracle) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
40415a6e75f8SKirill A. Shutemov 
4042853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
4043853ac43aSMatt Mackall 
4044853ac43aSMatt Mackall /*
4045853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4046853ac43aSMatt Mackall  *
4047853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
4048853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
4049853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
4050853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
4051853ac43aSMatt Mackall  */
4052853ac43aSMatt Mackall 
405341ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
4054853ac43aSMatt Mackall 	.name		= "tmpfs",
4055f3235626SDavid Howells 	.init_fs_context = ramfs_init_fs_context,
4056d7167b14SAl Viro 	.parameters	= ramfs_fs_parameters,
4057853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
40582b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
4059853ac43aSMatt Mackall };
4060853ac43aSMatt Mackall 
406141ffe5d5SHugh Dickins int __init shmem_init(void)
4062853ac43aSMatt Mackall {
406341ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4064853ac43aSMatt Mackall 
406541ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
4066853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
4067853ac43aSMatt Mackall 
4068853ac43aSMatt Mackall 	return 0;
4069853ac43aSMatt Mackall }
4070853ac43aSMatt Mackall 
4071b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
4072b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
4073853ac43aSMatt Mackall {
4074853ac43aSMatt Mackall 	return 0;
4075853ac43aSMatt Mackall }
4076853ac43aSMatt Mackall 
40773f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user)
40783f96b79aSHugh Dickins {
40793f96b79aSHugh Dickins 	return 0;
40803f96b79aSHugh Dickins }
40813f96b79aSHugh Dickins 
408224513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
408324513264SHugh Dickins {
408424513264SHugh Dickins }
408524513264SHugh Dickins 
4086c01d5b30SHugh Dickins #ifdef CONFIG_MMU
4087c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4088c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
4089c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
4090c01d5b30SHugh Dickins {
4091c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4092c01d5b30SHugh Dickins }
4093c01d5b30SHugh Dickins #endif
4094c01d5b30SHugh Dickins 
409541ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
409694c1e62dSHugh Dickins {
409741ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
409894c1e62dSHugh Dickins }
409994c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
410094c1e62dSHugh Dickins 
4101853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
41020b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
4103454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
41040b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
41050b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
4106853ac43aSMatt Mackall 
4107853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4108853ac43aSMatt Mackall 
4109853ac43aSMatt Mackall /* common code */
41101da177e4SLinus Torvalds 
4111703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4112c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
41131da177e4SLinus Torvalds {
41141da177e4SLinus Torvalds 	struct inode *inode;
411593dec2daSAl Viro 	struct file *res;
41161da177e4SLinus Torvalds 
4117703321b6SMatthew Auld 	if (IS_ERR(mnt))
4118703321b6SMatthew Auld 		return ERR_CAST(mnt);
41191da177e4SLinus Torvalds 
4120285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
41211da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
41221da177e4SLinus Torvalds 
41231da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
41241da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
41251da177e4SLinus Torvalds 
412693dec2daSAl Viro 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
412793dec2daSAl Viro 				flags);
4128dac2d1f6SAl Viro 	if (unlikely(!inode)) {
4129dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
4130dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
4131dac2d1f6SAl Viro 	}
4132c7277090SEric Paris 	inode->i_flags |= i_flags;
41331da177e4SLinus Torvalds 	inode->i_size = size;
41346d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
413526567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
413693dec2daSAl Viro 	if (!IS_ERR(res))
413793dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
41384b42af81SAl Viro 				&shmem_file_operations);
41396b4d0b27SAl Viro 	if (IS_ERR(res))
414093dec2daSAl Viro 		iput(inode);
41416b4d0b27SAl Viro 	return res;
41421da177e4SLinus Torvalds }
4143c7277090SEric Paris 
4144c7277090SEric Paris /**
4145c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4146c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
4147c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
4148e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
4149e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
4150c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4151c7277090SEric Paris  * @size: size to be set for the file
4152c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4153c7277090SEric Paris  */
4154c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4155c7277090SEric Paris {
4156703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4157c7277090SEric Paris }
4158c7277090SEric Paris 
4159c7277090SEric Paris /**
4160c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4161c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4162c7277090SEric Paris  * @size: size to be set for the file
4163c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4164c7277090SEric Paris  */
4165c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4166c7277090SEric Paris {
4167703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4168c7277090SEric Paris }
4169395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
41701da177e4SLinus Torvalds 
417146711810SRandy Dunlap /**
4172703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4173703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
4174703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
4175703321b6SMatthew Auld  * @size: size to be set for the file
4176703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4177703321b6SMatthew Auld  */
4178703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4179703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
4180703321b6SMatthew Auld {
4181703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
4182703321b6SMatthew Auld }
4183703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4184703321b6SMatthew Auld 
4185703321b6SMatthew Auld /**
41861da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
41871da177e4SLinus Torvalds  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
41881da177e4SLinus Torvalds  */
41891da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
41901da177e4SLinus Torvalds {
41911da177e4SLinus Torvalds 	struct file *file;
41921da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
41931da177e4SLinus Torvalds 
419466fc1303SHugh Dickins 	/*
4195c1e8d7c6SMichel Lespinasse 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
419666fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
419766fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
419866fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
419966fc1303SHugh Dickins 	 */
4200703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
42011da177e4SLinus Torvalds 	if (IS_ERR(file))
42021da177e4SLinus Torvalds 		return PTR_ERR(file);
42031da177e4SLinus Torvalds 
42041da177e4SLinus Torvalds 	if (vma->vm_file)
42051da177e4SLinus Torvalds 		fput(vma->vm_file);
42061da177e4SLinus Torvalds 	vma->vm_file = file;
42071da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
4208f3f0e1d2SKirill A. Shutemov 
4209396bcc52SMatthew Wilcox (Oracle) 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4210f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4211f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
4212f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
4213f3f0e1d2SKirill A. Shutemov 	}
4214f3f0e1d2SKirill A. Shutemov 
42151da177e4SLinus Torvalds 	return 0;
42161da177e4SLinus Torvalds }
4217d9d90e5eSHugh Dickins 
4218d9d90e5eSHugh Dickins /**
4219d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4220d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4221d9d90e5eSHugh Dickins  * @index:	the page index
4222d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4223d9d90e5eSHugh Dickins  *
4224d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4225d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
4226d9d90e5eSHugh Dickins  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4227d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4228d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4229d9d90e5eSHugh Dickins  *
423068da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
423168da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4232d9d90e5eSHugh Dickins  */
4233d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4234d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4235d9d90e5eSHugh Dickins {
423668da9f05SHugh Dickins #ifdef CONFIG_SHMEM
423768da9f05SHugh Dickins 	struct inode *inode = mapping->host;
42389276aad6SHugh Dickins 	struct page *page;
423968da9f05SHugh Dickins 	int error;
424068da9f05SHugh Dickins 
424168da9f05SHugh Dickins 	BUG_ON(mapping->a_ops != &shmem_aops);
42429e18eb29SAndres Lagar-Cavilla 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4243cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
424468da9f05SHugh Dickins 	if (error)
424568da9f05SHugh Dickins 		page = ERR_PTR(error);
424668da9f05SHugh Dickins 	else
424768da9f05SHugh Dickins 		unlock_page(page);
424868da9f05SHugh Dickins 	return page;
424968da9f05SHugh Dickins #else
425068da9f05SHugh Dickins 	/*
425168da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
425268da9f05SHugh Dickins 	 */
4253d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
425468da9f05SHugh Dickins #endif
4255d9d90e5eSHugh Dickins }
4256d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4257