xref: /openbmc/linux/mm/shmem.c (revision b1cc94ab2f2ba31fcb2c59df0b9cf03f6d720553)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31853ac43aSMatt Mackall #include <linux/mm.h>
32174cd4b1SIngo Molnar #include <linux/sched/signal.h>
33b95f1b31SPaul Gortmaker #include <linux/export.h>
34853ac43aSMatt Mackall #include <linux/swap.h>
35e2e40f2cSChristoph Hellwig #include <linux/uio.h>
36f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h>
37853ac43aSMatt Mackall 
3895cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
3995cc09d6SAndrea Arcangeli 
40853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
41853ac43aSMatt Mackall 
42853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
431da177e4SLinus Torvalds /*
441da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
451da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
461da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
471da177e4SLinus Torvalds  */
481da177e4SLinus Torvalds 
4939f0247dSAndreas Gruenbacher #include <linux/xattr.h>
50a5694255SChristoph Hellwig #include <linux/exportfs.h>
511c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
52feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
531da177e4SLinus Torvalds #include <linux/mman.h>
541da177e4SLinus Torvalds #include <linux/string.h>
551da177e4SLinus Torvalds #include <linux/slab.h>
561da177e4SLinus Torvalds #include <linux/backing-dev.h>
571da177e4SLinus Torvalds #include <linux/shmem_fs.h>
581da177e4SLinus Torvalds #include <linux/writeback.h>
591da177e4SLinus Torvalds #include <linux/blkdev.h>
60bda97eabSHugh Dickins #include <linux/pagevec.h>
6141ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6283e4fa9cSHugh Dickins #include <linux/falloc.h>
63708e3508SHugh Dickins #include <linux/splice.h>
641da177e4SLinus Torvalds #include <linux/security.h>
651da177e4SLinus Torvalds #include <linux/swapops.h>
661da177e4SLinus Torvalds #include <linux/mempolicy.h>
671da177e4SLinus Torvalds #include <linux/namei.h>
68b00dc3adSHugh Dickins #include <linux/ctype.h>
69304dbdb7SLee Schermerhorn #include <linux/migrate.h>
70c1f60a5aSChristoph Lameter #include <linux/highmem.h>
71680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7292562927SMimi Zohar #include <linux/magic.h>
739183df25SDavid Herrmann #include <linux/syscalls.h>
7440e041a2SDavid Herrmann #include <linux/fcntl.h>
759183df25SDavid Herrmann #include <uapi/linux/memfd.h>
76cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
774c27fe4cSMike Rapoport #include <linux/rmap.h>
782b4db796SAmir Goldstein #include <linux/uuid.h>
79304dbdb7SLee Schermerhorn 
807c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
811da177e4SLinus Torvalds #include <asm/pgtable.h>
821da177e4SLinus Torvalds 
83dd56b046SMel Gorman #include "internal.h"
84dd56b046SMel Gorman 
8509cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8609cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
871da177e4SLinus Torvalds 
881da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
891da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
901da177e4SLinus Torvalds 
9169f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9269f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9369f07ec9SHugh Dickins 
941aac1400SHugh Dickins /*
95f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
96f00cdc6dSHugh Dickins  * inode->i_private (with i_mutex making sure that it has only one user at
97f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
981aac1400SHugh Dickins  */
991aac1400SHugh Dickins struct shmem_falloc {
1008e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1011aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1021aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1031aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1041aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1051aac1400SHugh Dickins };
1061aac1400SHugh Dickins 
107b76db735SAndrew Morton #ifdef CONFIG_TMPFS
108680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
109680d794bSakpm@linux-foundation.org {
110680d794bSakpm@linux-foundation.org 	return totalram_pages / 2;
111680d794bSakpm@linux-foundation.org }
112680d794bSakpm@linux-foundation.org 
113680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
114680d794bSakpm@linux-foundation.org {
115680d794bSakpm@linux-foundation.org 	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
116680d794bSakpm@linux-foundation.org }
117b76db735SAndrew Morton #endif
118680d794bSakpm@linux-foundation.org 
119bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
120bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
121bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index);
12268da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1239e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp,
124cfda0526SMike Rapoport 		gfp_t gfp, struct vm_area_struct *vma,
125cfda0526SMike Rapoport 		struct vm_fault *vmf, int *fault_type);
12668da9f05SHugh Dickins 
127f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index,
1289e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp)
12968da9f05SHugh Dickins {
13068da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
131cfda0526SMike Rapoport 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
13268da9f05SHugh Dickins }
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1351da177e4SLinus Torvalds {
1361da177e4SLinus Torvalds 	return sb->s_fs_info;
1371da177e4SLinus Torvalds }
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds /*
1401da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1411da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1421da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1431da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1441da177e4SLinus Torvalds  */
1451da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1461da177e4SLinus Torvalds {
1470b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
148191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1491da177e4SLinus Torvalds }
1501da177e4SLinus Torvalds 
1511da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1521da177e4SLinus Torvalds {
1530b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1541da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1551da177e4SLinus Torvalds }
1561da177e4SLinus Torvalds 
15777142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
15877142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
15977142517SKonstantin Khlebnikov {
16077142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
16177142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
16277142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
16377142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
16477142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
16577142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
16677142517SKonstantin Khlebnikov 	}
16777142517SKonstantin Khlebnikov 	return 0;
16877142517SKonstantin Khlebnikov }
16977142517SKonstantin Khlebnikov 
1701da177e4SLinus Torvalds /*
1711da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
17275edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
1731da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1741da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1751da177e4SLinus Torvalds  */
176800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
1771da177e4SLinus Torvalds {
178800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
179800d8c63SKirill A. Shutemov 		return 0;
180800d8c63SKirill A. Shutemov 
181800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
182800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
1831da177e4SLinus Torvalds }
1841da177e4SLinus Torvalds 
1851da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
1861da177e4SLinus Torvalds {
1870b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
18809cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
1891da177e4SLinus Torvalds }
1901da177e4SLinus Torvalds 
191759b9775SHugh Dickins static const struct super_operations shmem_ops;
192f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops;
19315ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
19492e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
19592e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
19692e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
197f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
198779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
1991da177e4SLinus Torvalds 
200b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
201b0506e48SMike Rapoport {
202b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
203b0506e48SMike Rapoport }
204b0506e48SMike Rapoport 
2051da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
206cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2071da177e4SLinus Torvalds 
2085b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb)
2095b04c689SPavel Emelyanov {
2105b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2115b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
2125b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2135b04c689SPavel Emelyanov 		if (!sbinfo->free_inodes) {
2145b04c689SPavel Emelyanov 			spin_unlock(&sbinfo->stat_lock);
2155b04c689SPavel Emelyanov 			return -ENOSPC;
2165b04c689SPavel Emelyanov 		}
2175b04c689SPavel Emelyanov 		sbinfo->free_inodes--;
2185b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
2195b04c689SPavel Emelyanov 	}
2205b04c689SPavel Emelyanov 	return 0;
2215b04c689SPavel Emelyanov }
2225b04c689SPavel Emelyanov 
2235b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
2245b04c689SPavel Emelyanov {
2255b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2265b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
2275b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2285b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
2295b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
2305b04c689SPavel Emelyanov 	}
2315b04c689SPavel Emelyanov }
2325b04c689SPavel Emelyanov 
23346711810SRandy Dunlap /**
23441ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
2351da177e4SLinus Torvalds  * @inode: inode to recalc
2361da177e4SLinus Torvalds  *
2371da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
2381da177e4SLinus Torvalds  * undirtied hole pages behind our back.
2391da177e4SLinus Torvalds  *
2401da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
2411da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
2421da177e4SLinus Torvalds  *
2431da177e4SLinus Torvalds  * It has to be called with the spinlock held.
2441da177e4SLinus Torvalds  */
2451da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
2461da177e4SLinus Torvalds {
2471da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
2481da177e4SLinus Torvalds 	long freed;
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
2511da177e4SLinus Torvalds 	if (freed > 0) {
25254af6042SHugh Dickins 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
25354af6042SHugh Dickins 		if (sbinfo->max_blocks)
25454af6042SHugh Dickins 			percpu_counter_add(&sbinfo->used_blocks, -freed);
2551da177e4SLinus Torvalds 		info->alloced -= freed;
25654af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
2571da177e4SLinus Torvalds 		shmem_unacct_blocks(info->flags, freed);
2581da177e4SLinus Torvalds 	}
2591da177e4SLinus Torvalds }
2601da177e4SLinus Torvalds 
261800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
262800d8c63SKirill A. Shutemov {
263800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
264800d8c63SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2654595ef88SKirill A. Shutemov 	unsigned long flags;
266800d8c63SKirill A. Shutemov 
267800d8c63SKirill A. Shutemov 	if (shmem_acct_block(info->flags, pages))
268800d8c63SKirill A. Shutemov 		return false;
269*b1cc94abSMike Rapoport 
270*b1cc94abSMike Rapoport 	if (sbinfo->max_blocks) {
271*b1cc94abSMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
272*b1cc94abSMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
273*b1cc94abSMike Rapoport 			goto unacct;
274*b1cc94abSMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
275*b1cc94abSMike Rapoport 	}
276*b1cc94abSMike Rapoport 
2774595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
278800d8c63SKirill A. Shutemov 	info->alloced += pages;
279800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
280800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
2814595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
282800d8c63SKirill A. Shutemov 	inode->i_mapping->nrpages += pages;
283800d8c63SKirill A. Shutemov 
284800d8c63SKirill A. Shutemov 	return true;
285*b1cc94abSMike Rapoport 
286*b1cc94abSMike Rapoport unacct:
28771664665SHugh Dickins 	shmem_unacct_blocks(info->flags, pages);
288800d8c63SKirill A. Shutemov 	return false;
289800d8c63SKirill A. Shutemov }
290800d8c63SKirill A. Shutemov 
291800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
292800d8c63SKirill A. Shutemov {
293800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
294800d8c63SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2954595ef88SKirill A. Shutemov 	unsigned long flags;
296800d8c63SKirill A. Shutemov 
2974595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
298800d8c63SKirill A. Shutemov 	info->alloced -= pages;
299800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
300800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3014595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
302800d8c63SKirill A. Shutemov 
303800d8c63SKirill A. Shutemov 	if (sbinfo->max_blocks)
304800d8c63SKirill A. Shutemov 		percpu_counter_sub(&sbinfo->used_blocks, pages);
30571664665SHugh Dickins 	shmem_unacct_blocks(info->flags, pages);
306800d8c63SKirill A. Shutemov }
307800d8c63SKirill A. Shutemov 
3087a5d0fbbSHugh Dickins /*
3097a5d0fbbSHugh Dickins  * Replace item expected in radix tree by a new item, while holding tree lock.
3107a5d0fbbSHugh Dickins  */
3117a5d0fbbSHugh Dickins static int shmem_radix_tree_replace(struct address_space *mapping,
3127a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
3137a5d0fbbSHugh Dickins {
314f7942430SJohannes Weiner 	struct radix_tree_node *node;
3157a5d0fbbSHugh Dickins 	void **pslot;
3166dbaf22cSJohannes Weiner 	void *item;
3177a5d0fbbSHugh Dickins 
3187a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
3196dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
320f7942430SJohannes Weiner 	item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot);
321f7942430SJohannes Weiner 	if (!item)
3226dbaf22cSJohannes Weiner 		return -ENOENT;
3237a5d0fbbSHugh Dickins 	if (item != expected)
3247a5d0fbbSHugh Dickins 		return -ENOENT;
3254d693d08SJohannes Weiner 	__radix_tree_replace(&mapping->page_tree, node, pslot,
3264d693d08SJohannes Weiner 			     replacement, NULL, NULL);
3277a5d0fbbSHugh Dickins 	return 0;
3287a5d0fbbSHugh Dickins }
3297a5d0fbbSHugh Dickins 
3307a5d0fbbSHugh Dickins /*
331d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
332d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
333d1899228SHugh Dickins  *
334d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
335d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
336d1899228SHugh Dickins  */
337d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
338d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
339d1899228SHugh Dickins {
340d1899228SHugh Dickins 	void *item;
341d1899228SHugh Dickins 
342d1899228SHugh Dickins 	rcu_read_lock();
343d1899228SHugh Dickins 	item = radix_tree_lookup(&mapping->page_tree, index);
344d1899228SHugh Dickins 	rcu_read_unlock();
345d1899228SHugh Dickins 	return item == swp_to_radix_entry(swap);
346d1899228SHugh Dickins }
347d1899228SHugh Dickins 
348d1899228SHugh Dickins /*
3495a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
3505a6e75f8SKirill A. Shutemov  *
3515a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
3525a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
3535a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
3545a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
3555a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
3565a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
3575a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
3585a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
3595a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
3605a6e75f8SKirill A. Shutemov  */
3615a6e75f8SKirill A. Shutemov 
3625a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
3635a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
3645a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
3655a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
3665a6e75f8SKirill A. Shutemov 
3675a6e75f8SKirill A. Shutemov /*
3685a6e75f8SKirill A. Shutemov  * Special values.
3695a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
3705a6e75f8SKirill A. Shutemov  *
3715a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
3725a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
3735a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
3745a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
3755a6e75f8SKirill A. Shutemov  *
3765a6e75f8SKirill A. Shutemov  */
3775a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
3785a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
3795a6e75f8SKirill A. Shutemov 
380e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3815a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
3825a6e75f8SKirill A. Shutemov 
3835a6e75f8SKirill A. Shutemov int shmem_huge __read_mostly;
3845a6e75f8SKirill A. Shutemov 
385f1f5929cSJérémy Lefaure #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
3865a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
3875a6e75f8SKirill A. Shutemov {
3885a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
3895a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
3905a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
3915a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
3925a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
3935a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
3945a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
3955a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
3965a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
3975a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
3985a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
3995a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
4005a6e75f8SKirill A. Shutemov 	return -EINVAL;
4015a6e75f8SKirill A. Shutemov }
4025a6e75f8SKirill A. Shutemov 
4035a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
4045a6e75f8SKirill A. Shutemov {
4055a6e75f8SKirill A. Shutemov 	switch (huge) {
4065a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
4075a6e75f8SKirill A. Shutemov 		return "never";
4085a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
4095a6e75f8SKirill A. Shutemov 		return "always";
4105a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
4115a6e75f8SKirill A. Shutemov 		return "within_size";
4125a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
4135a6e75f8SKirill A. Shutemov 		return "advise";
4145a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
4155a6e75f8SKirill A. Shutemov 		return "deny";
4165a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
4175a6e75f8SKirill A. Shutemov 		return "force";
4185a6e75f8SKirill A. Shutemov 	default:
4195a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
4205a6e75f8SKirill A. Shutemov 		return "bad_val";
4215a6e75f8SKirill A. Shutemov 	}
4225a6e75f8SKirill A. Shutemov }
423f1f5929cSJérémy Lefaure #endif
4245a6e75f8SKirill A. Shutemov 
425779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
426779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
427779750d2SKirill A. Shutemov {
428779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
429253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
430779750d2SKirill A. Shutemov 	struct inode *inode;
431779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
432779750d2SKirill A. Shutemov 	struct page *page;
433779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
434779750d2SKirill A. Shutemov 	int removed = 0, split = 0;
435779750d2SKirill A. Shutemov 
436779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
437779750d2SKirill A. Shutemov 		return SHRINK_STOP;
438779750d2SKirill A. Shutemov 
439779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
440779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
441779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
442779750d2SKirill A. Shutemov 
443779750d2SKirill A. Shutemov 		/* pin the inode */
444779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
445779750d2SKirill A. Shutemov 
446779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
447779750d2SKirill A. Shutemov 		if (!inode) {
448779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
449779750d2SKirill A. Shutemov 			removed++;
450779750d2SKirill A. Shutemov 			goto next;
451779750d2SKirill A. Shutemov 		}
452779750d2SKirill A. Shutemov 
453779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
454779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
455779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
456253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
457779750d2SKirill A. Shutemov 			removed++;
458779750d2SKirill A. Shutemov 			goto next;
459779750d2SKirill A. Shutemov 		}
460779750d2SKirill A. Shutemov 
461779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
462779750d2SKirill A. Shutemov next:
463779750d2SKirill A. Shutemov 		if (!--batch)
464779750d2SKirill A. Shutemov 			break;
465779750d2SKirill A. Shutemov 	}
466779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
467779750d2SKirill A. Shutemov 
468253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
469253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
470253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
471253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
472253fd0f0SKirill A. Shutemov 		iput(inode);
473253fd0f0SKirill A. Shutemov 	}
474253fd0f0SKirill A. Shutemov 
475779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
476779750d2SKirill A. Shutemov 		int ret;
477779750d2SKirill A. Shutemov 
478779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
479779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
480779750d2SKirill A. Shutemov 
481779750d2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split) {
482779750d2SKirill A. Shutemov 			iput(inode);
483779750d2SKirill A. Shutemov 			continue;
484779750d2SKirill A. Shutemov 		}
485779750d2SKirill A. Shutemov 
486779750d2SKirill A. Shutemov 		page = find_lock_page(inode->i_mapping,
487779750d2SKirill A. Shutemov 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
488779750d2SKirill A. Shutemov 		if (!page)
489779750d2SKirill A. Shutemov 			goto drop;
490779750d2SKirill A. Shutemov 
491779750d2SKirill A. Shutemov 		if (!PageTransHuge(page)) {
492779750d2SKirill A. Shutemov 			unlock_page(page);
493779750d2SKirill A. Shutemov 			put_page(page);
494779750d2SKirill A. Shutemov 			goto drop;
495779750d2SKirill A. Shutemov 		}
496779750d2SKirill A. Shutemov 
497779750d2SKirill A. Shutemov 		ret = split_huge_page(page);
498779750d2SKirill A. Shutemov 		unlock_page(page);
499779750d2SKirill A. Shutemov 		put_page(page);
500779750d2SKirill A. Shutemov 
501779750d2SKirill A. Shutemov 		if (ret) {
502779750d2SKirill A. Shutemov 			/* split failed: leave it on the list */
503779750d2SKirill A. Shutemov 			iput(inode);
504779750d2SKirill A. Shutemov 			continue;
505779750d2SKirill A. Shutemov 		}
506779750d2SKirill A. Shutemov 
507779750d2SKirill A. Shutemov 		split++;
508779750d2SKirill A. Shutemov drop:
509779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
510779750d2SKirill A. Shutemov 		removed++;
511779750d2SKirill A. Shutemov 		iput(inode);
512779750d2SKirill A. Shutemov 	}
513779750d2SKirill A. Shutemov 
514779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
515779750d2SKirill A. Shutemov 	list_splice_tail(&list, &sbinfo->shrinklist);
516779750d2SKirill A. Shutemov 	sbinfo->shrinklist_len -= removed;
517779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
518779750d2SKirill A. Shutemov 
519779750d2SKirill A. Shutemov 	return split;
520779750d2SKirill A. Shutemov }
521779750d2SKirill A. Shutemov 
522779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
523779750d2SKirill A. Shutemov 		struct shrink_control *sc)
524779750d2SKirill A. Shutemov {
525779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
526779750d2SKirill A. Shutemov 
527779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
528779750d2SKirill A. Shutemov 		return SHRINK_STOP;
529779750d2SKirill A. Shutemov 
530779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
531779750d2SKirill A. Shutemov }
532779750d2SKirill A. Shutemov 
533779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
534779750d2SKirill A. Shutemov 		struct shrink_control *sc)
535779750d2SKirill A. Shutemov {
536779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
537779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
538779750d2SKirill A. Shutemov }
539e496cf3dSKirill A. Shutemov #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
5405a6e75f8SKirill A. Shutemov 
5415a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
5425a6e75f8SKirill A. Shutemov 
543779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
544779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
545779750d2SKirill A. Shutemov {
546779750d2SKirill A. Shutemov 	return 0;
547779750d2SKirill A. Shutemov }
548e496cf3dSKirill A. Shutemov #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
5495a6e75f8SKirill A. Shutemov 
5505a6e75f8SKirill A. Shutemov /*
55146f65ec1SHugh Dickins  * Like add_to_page_cache_locked, but error if expected item has gone.
55246f65ec1SHugh Dickins  */
55346f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page,
55446f65ec1SHugh Dickins 				   struct address_space *mapping,
555fed400a1SWang Sheng-Hui 				   pgoff_t index, void *expected)
55646f65ec1SHugh Dickins {
557800d8c63SKirill A. Shutemov 	int error, nr = hpage_nr_pages(page);
55846f65ec1SHugh Dickins 
559800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
560800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
561309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
562309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
563800d8c63SKirill A. Shutemov 	VM_BUG_ON(expected && PageTransHuge(page));
56446f65ec1SHugh Dickins 
565800d8c63SKirill A. Shutemov 	page_ref_add(page, nr);
56646f65ec1SHugh Dickins 	page->mapping = mapping;
56746f65ec1SHugh Dickins 	page->index = index;
56846f65ec1SHugh Dickins 
56946f65ec1SHugh Dickins 	spin_lock_irq(&mapping->tree_lock);
570800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
571800d8c63SKirill A. Shutemov 		void __rcu **results;
572800d8c63SKirill A. Shutemov 		pgoff_t idx;
573800d8c63SKirill A. Shutemov 		int i;
574800d8c63SKirill A. Shutemov 
575800d8c63SKirill A. Shutemov 		error = 0;
576800d8c63SKirill A. Shutemov 		if (radix_tree_gang_lookup_slot(&mapping->page_tree,
577800d8c63SKirill A. Shutemov 					&results, &idx, index, 1) &&
578800d8c63SKirill A. Shutemov 				idx < index + HPAGE_PMD_NR) {
579800d8c63SKirill A. Shutemov 			error = -EEXIST;
580800d8c63SKirill A. Shutemov 		}
581800d8c63SKirill A. Shutemov 
582800d8c63SKirill A. Shutemov 		if (!error) {
583800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
584800d8c63SKirill A. Shutemov 				error = radix_tree_insert(&mapping->page_tree,
585800d8c63SKirill A. Shutemov 						index + i, page + i);
586800d8c63SKirill A. Shutemov 				VM_BUG_ON(error);
587800d8c63SKirill A. Shutemov 			}
588800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
589800d8c63SKirill A. Shutemov 		}
590800d8c63SKirill A. Shutemov 	} else if (!expected) {
591b065b432SHugh Dickins 		error = radix_tree_insert(&mapping->page_tree, index, page);
592800d8c63SKirill A. Shutemov 	} else {
593b065b432SHugh Dickins 		error = shmem_radix_tree_replace(mapping, index, expected,
594b065b432SHugh Dickins 								 page);
595800d8c63SKirill A. Shutemov 	}
596800d8c63SKirill A. Shutemov 
59746f65ec1SHugh Dickins 	if (!error) {
598800d8c63SKirill A. Shutemov 		mapping->nrpages += nr;
599800d8c63SKirill A. Shutemov 		if (PageTransHuge(page))
60011fb9989SMel Gorman 			__inc_node_page_state(page, NR_SHMEM_THPS);
60111fb9989SMel Gorman 		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
60211fb9989SMel Gorman 		__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
60346f65ec1SHugh Dickins 		spin_unlock_irq(&mapping->tree_lock);
60446f65ec1SHugh Dickins 	} else {
60546f65ec1SHugh Dickins 		page->mapping = NULL;
60646f65ec1SHugh Dickins 		spin_unlock_irq(&mapping->tree_lock);
607800d8c63SKirill A. Shutemov 		page_ref_sub(page, nr);
60846f65ec1SHugh Dickins 	}
60946f65ec1SHugh Dickins 	return error;
61046f65ec1SHugh Dickins }
61146f65ec1SHugh Dickins 
61246f65ec1SHugh Dickins /*
6136922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
6146922c0c7SHugh Dickins  */
6156922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
6166922c0c7SHugh Dickins {
6176922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
6186922c0c7SHugh Dickins 	int error;
6196922c0c7SHugh Dickins 
620800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
621800d8c63SKirill A. Shutemov 
6226922c0c7SHugh Dickins 	spin_lock_irq(&mapping->tree_lock);
6236922c0c7SHugh Dickins 	error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
6246922c0c7SHugh Dickins 	page->mapping = NULL;
6256922c0c7SHugh Dickins 	mapping->nrpages--;
62611fb9989SMel Gorman 	__dec_node_page_state(page, NR_FILE_PAGES);
62711fb9989SMel Gorman 	__dec_node_page_state(page, NR_SHMEM);
6286922c0c7SHugh Dickins 	spin_unlock_irq(&mapping->tree_lock);
62909cbfeafSKirill A. Shutemov 	put_page(page);
6306922c0c7SHugh Dickins 	BUG_ON(error);
6316922c0c7SHugh Dickins }
6326922c0c7SHugh Dickins 
6336922c0c7SHugh Dickins /*
6347a5d0fbbSHugh Dickins  * Remove swap entry from radix tree, free the swap and its page cache.
6357a5d0fbbSHugh Dickins  */
6367a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
6377a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
6387a5d0fbbSHugh Dickins {
6396dbaf22cSJohannes Weiner 	void *old;
6407a5d0fbbSHugh Dickins 
6417a5d0fbbSHugh Dickins 	spin_lock_irq(&mapping->tree_lock);
6426dbaf22cSJohannes Weiner 	old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
6437a5d0fbbSHugh Dickins 	spin_unlock_irq(&mapping->tree_lock);
6446dbaf22cSJohannes Weiner 	if (old != radswap)
6456dbaf22cSJohannes Weiner 		return -ENOENT;
6467a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
6476dbaf22cSJohannes Weiner 	return 0;
6487a5d0fbbSHugh Dickins }
6497a5d0fbbSHugh Dickins 
6507a5d0fbbSHugh Dickins /*
6516a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
65248131e03SVlastimil Babka  * given offsets are swapped out.
6536a15a370SVlastimil Babka  *
6546a15a370SVlastimil Babka  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
6556a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
6566a15a370SVlastimil Babka  */
65748131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
65848131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
6596a15a370SVlastimil Babka {
6606a15a370SVlastimil Babka 	struct radix_tree_iter iter;
6616a15a370SVlastimil Babka 	void **slot;
6626a15a370SVlastimil Babka 	struct page *page;
66348131e03SVlastimil Babka 	unsigned long swapped = 0;
6646a15a370SVlastimil Babka 
6656a15a370SVlastimil Babka 	rcu_read_lock();
6666a15a370SVlastimil Babka 
6676a15a370SVlastimil Babka 	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
6686a15a370SVlastimil Babka 		if (iter.index >= end)
6696a15a370SVlastimil Babka 			break;
6706a15a370SVlastimil Babka 
6716a15a370SVlastimil Babka 		page = radix_tree_deref_slot(slot);
6726a15a370SVlastimil Babka 
6732cf938aaSMatthew Wilcox 		if (radix_tree_deref_retry(page)) {
6742cf938aaSMatthew Wilcox 			slot = radix_tree_iter_retry(&iter);
6752cf938aaSMatthew Wilcox 			continue;
6762cf938aaSMatthew Wilcox 		}
6776a15a370SVlastimil Babka 
6786a15a370SVlastimil Babka 		if (radix_tree_exceptional_entry(page))
6796a15a370SVlastimil Babka 			swapped++;
6806a15a370SVlastimil Babka 
6816a15a370SVlastimil Babka 		if (need_resched()) {
682148deab2SMatthew Wilcox 			slot = radix_tree_iter_resume(slot, &iter);
6836a15a370SVlastimil Babka 			cond_resched_rcu();
6846a15a370SVlastimil Babka 		}
6856a15a370SVlastimil Babka 	}
6866a15a370SVlastimil Babka 
6876a15a370SVlastimil Babka 	rcu_read_unlock();
6886a15a370SVlastimil Babka 
6896a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
6906a15a370SVlastimil Babka }
6916a15a370SVlastimil Babka 
6926a15a370SVlastimil Babka /*
69348131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
69448131e03SVlastimil Babka  * given vma is swapped out.
69548131e03SVlastimil Babka  *
69648131e03SVlastimil Babka  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
69748131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
69848131e03SVlastimil Babka  */
69948131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
70048131e03SVlastimil Babka {
70148131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
70248131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
70348131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
70448131e03SVlastimil Babka 	unsigned long swapped;
70548131e03SVlastimil Babka 
70648131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
70748131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
70848131e03SVlastimil Babka 
70948131e03SVlastimil Babka 	/*
71048131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
71148131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
71248131e03SVlastimil Babka 	 * already track.
71348131e03SVlastimil Babka 	 */
71448131e03SVlastimil Babka 	if (!swapped)
71548131e03SVlastimil Babka 		return 0;
71648131e03SVlastimil Babka 
71748131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
71848131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
71948131e03SVlastimil Babka 
72048131e03SVlastimil Babka 	/* Here comes the more involved part */
72148131e03SVlastimil Babka 	return shmem_partial_swap_usage(mapping,
72248131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_start),
72348131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_end));
72448131e03SVlastimil Babka }
72548131e03SVlastimil Babka 
72648131e03SVlastimil Babka /*
72724513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
72824513264SHugh Dickins  */
72924513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
73024513264SHugh Dickins {
73124513264SHugh Dickins 	struct pagevec pvec;
73224513264SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
73324513264SHugh Dickins 	pgoff_t index = 0;
73424513264SHugh Dickins 
73524513264SHugh Dickins 	pagevec_init(&pvec, 0);
73624513264SHugh Dickins 	/*
73724513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
73824513264SHugh Dickins 	 */
73924513264SHugh Dickins 	while (!mapping_unevictable(mapping)) {
74024513264SHugh Dickins 		/*
74124513264SHugh Dickins 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
74224513264SHugh Dickins 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
74324513264SHugh Dickins 		 */
7440cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
74524513264SHugh Dickins 					   PAGEVEC_SIZE, pvec.pages, indices);
74624513264SHugh Dickins 		if (!pvec.nr)
74724513264SHugh Dickins 			break;
74824513264SHugh Dickins 		index = indices[pvec.nr - 1] + 1;
7490cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
75024513264SHugh Dickins 		check_move_unevictable_pages(pvec.pages, pvec.nr);
75124513264SHugh Dickins 		pagevec_release(&pvec);
75224513264SHugh Dickins 		cond_resched();
75324513264SHugh Dickins 	}
7547a5d0fbbSHugh Dickins }
7557a5d0fbbSHugh Dickins 
7567a5d0fbbSHugh Dickins /*
7577a5d0fbbSHugh Dickins  * Remove range of pages and swap entries from radix tree, and free them.
7581635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
7597a5d0fbbSHugh Dickins  */
7601635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
7611635f6a7SHugh Dickins 								 bool unfalloc)
7621da177e4SLinus Torvalds {
763285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
7641da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
76509cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
76609cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
76709cbfeafSKirill A. Shutemov 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
76809cbfeafSKirill A. Shutemov 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
769bda97eabSHugh Dickins 	struct pagevec pvec;
7707a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
7717a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
772285b2c4fSHugh Dickins 	pgoff_t index;
773bda97eabSHugh Dickins 	int i;
7741da177e4SLinus Torvalds 
77583e4fa9cSHugh Dickins 	if (lend == -1)
77683e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
777bda97eabSHugh Dickins 
778bda97eabSHugh Dickins 	pagevec_init(&pvec, 0);
779bda97eabSHugh Dickins 	index = start;
78083e4fa9cSHugh Dickins 	while (index < end) {
7810cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
78283e4fa9cSHugh Dickins 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
7837a5d0fbbSHugh Dickins 			pvec.pages, indices);
7847a5d0fbbSHugh Dickins 		if (!pvec.nr)
7857a5d0fbbSHugh Dickins 			break;
786bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
787bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
788bda97eabSHugh Dickins 
7897a5d0fbbSHugh Dickins 			index = indices[i];
79083e4fa9cSHugh Dickins 			if (index >= end)
791bda97eabSHugh Dickins 				break;
792bda97eabSHugh Dickins 
7937a5d0fbbSHugh Dickins 			if (radix_tree_exceptional_entry(page)) {
7941635f6a7SHugh Dickins 				if (unfalloc)
7951635f6a7SHugh Dickins 					continue;
7967a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
7977a5d0fbbSHugh Dickins 								index, page);
7987a5d0fbbSHugh Dickins 				continue;
7997a5d0fbbSHugh Dickins 			}
8007a5d0fbbSHugh Dickins 
801800d8c63SKirill A. Shutemov 			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
802800d8c63SKirill A. Shutemov 
803bda97eabSHugh Dickins 			if (!trylock_page(page))
804bda97eabSHugh Dickins 				continue;
805800d8c63SKirill A. Shutemov 
806800d8c63SKirill A. Shutemov 			if (PageTransTail(page)) {
807800d8c63SKirill A. Shutemov 				/* Middle of THP: zero out the page */
808800d8c63SKirill A. Shutemov 				clear_highpage(page);
809800d8c63SKirill A. Shutemov 				unlock_page(page);
810800d8c63SKirill A. Shutemov 				continue;
811800d8c63SKirill A. Shutemov 			} else if (PageTransHuge(page)) {
812800d8c63SKirill A. Shutemov 				if (index == round_down(end, HPAGE_PMD_NR)) {
813800d8c63SKirill A. Shutemov 					/*
814800d8c63SKirill A. Shutemov 					 * Range ends in the middle of THP:
815800d8c63SKirill A. Shutemov 					 * zero out the page
816800d8c63SKirill A. Shutemov 					 */
817800d8c63SKirill A. Shutemov 					clear_highpage(page);
818800d8c63SKirill A. Shutemov 					unlock_page(page);
819800d8c63SKirill A. Shutemov 					continue;
820800d8c63SKirill A. Shutemov 				}
821800d8c63SKirill A. Shutemov 				index += HPAGE_PMD_NR - 1;
822800d8c63SKirill A. Shutemov 				i += HPAGE_PMD_NR - 1;
823800d8c63SKirill A. Shutemov 			}
824800d8c63SKirill A. Shutemov 
8251635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
826800d8c63SKirill A. Shutemov 				VM_BUG_ON_PAGE(PageTail(page), page);
827800d8c63SKirill A. Shutemov 				if (page_mapping(page) == mapping) {
828309381feSSasha Levin 					VM_BUG_ON_PAGE(PageWriteback(page), page);
829bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
8307a5d0fbbSHugh Dickins 				}
8311635f6a7SHugh Dickins 			}
832bda97eabSHugh Dickins 			unlock_page(page);
833bda97eabSHugh Dickins 		}
8340cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
83524513264SHugh Dickins 		pagevec_release(&pvec);
836bda97eabSHugh Dickins 		cond_resched();
837bda97eabSHugh Dickins 		index++;
838bda97eabSHugh Dickins 	}
839bda97eabSHugh Dickins 
84083e4fa9cSHugh Dickins 	if (partial_start) {
841bda97eabSHugh Dickins 		struct page *page = NULL;
8429e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, start - 1, &page, SGP_READ);
843bda97eabSHugh Dickins 		if (page) {
84409cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
84583e4fa9cSHugh Dickins 			if (start > end) {
84683e4fa9cSHugh Dickins 				top = partial_end;
84783e4fa9cSHugh Dickins 				partial_end = 0;
84883e4fa9cSHugh Dickins 			}
84983e4fa9cSHugh Dickins 			zero_user_segment(page, partial_start, top);
850bda97eabSHugh Dickins 			set_page_dirty(page);
851bda97eabSHugh Dickins 			unlock_page(page);
85209cbfeafSKirill A. Shutemov 			put_page(page);
853bda97eabSHugh Dickins 		}
854bda97eabSHugh Dickins 	}
85583e4fa9cSHugh Dickins 	if (partial_end) {
85683e4fa9cSHugh Dickins 		struct page *page = NULL;
8579e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, end, &page, SGP_READ);
85883e4fa9cSHugh Dickins 		if (page) {
85983e4fa9cSHugh Dickins 			zero_user_segment(page, 0, partial_end);
86083e4fa9cSHugh Dickins 			set_page_dirty(page);
86183e4fa9cSHugh Dickins 			unlock_page(page);
86209cbfeafSKirill A. Shutemov 			put_page(page);
86383e4fa9cSHugh Dickins 		}
86483e4fa9cSHugh Dickins 	}
86583e4fa9cSHugh Dickins 	if (start >= end)
86683e4fa9cSHugh Dickins 		return;
867bda97eabSHugh Dickins 
868bda97eabSHugh Dickins 	index = start;
869b1a36650SHugh Dickins 	while (index < end) {
870bda97eabSHugh Dickins 		cond_resched();
8710cd6144aSJohannes Weiner 
8720cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
87383e4fa9cSHugh Dickins 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
8747a5d0fbbSHugh Dickins 				pvec.pages, indices);
8757a5d0fbbSHugh Dickins 		if (!pvec.nr) {
876b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
877b1a36650SHugh Dickins 			if (index == start || end != -1)
878bda97eabSHugh Dickins 				break;
879b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
880bda97eabSHugh Dickins 			index = start;
881bda97eabSHugh Dickins 			continue;
882bda97eabSHugh Dickins 		}
883bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
884bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
885bda97eabSHugh Dickins 
8867a5d0fbbSHugh Dickins 			index = indices[i];
88783e4fa9cSHugh Dickins 			if (index >= end)
888bda97eabSHugh Dickins 				break;
889bda97eabSHugh Dickins 
8907a5d0fbbSHugh Dickins 			if (radix_tree_exceptional_entry(page)) {
8911635f6a7SHugh Dickins 				if (unfalloc)
8921635f6a7SHugh Dickins 					continue;
893b1a36650SHugh Dickins 				if (shmem_free_swap(mapping, index, page)) {
894b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
895b1a36650SHugh Dickins 					index--;
896b1a36650SHugh Dickins 					break;
897b1a36650SHugh Dickins 				}
898b1a36650SHugh Dickins 				nr_swaps_freed++;
8997a5d0fbbSHugh Dickins 				continue;
9007a5d0fbbSHugh Dickins 			}
9017a5d0fbbSHugh Dickins 
902bda97eabSHugh Dickins 			lock_page(page);
903800d8c63SKirill A. Shutemov 
904800d8c63SKirill A. Shutemov 			if (PageTransTail(page)) {
905800d8c63SKirill A. Shutemov 				/* Middle of THP: zero out the page */
906800d8c63SKirill A. Shutemov 				clear_highpage(page);
907800d8c63SKirill A. Shutemov 				unlock_page(page);
908800d8c63SKirill A. Shutemov 				/*
909800d8c63SKirill A. Shutemov 				 * Partial thp truncate due 'start' in middle
910800d8c63SKirill A. Shutemov 				 * of THP: don't need to look on these pages
911800d8c63SKirill A. Shutemov 				 * again on !pvec.nr restart.
912800d8c63SKirill A. Shutemov 				 */
913800d8c63SKirill A. Shutemov 				if (index != round_down(end, HPAGE_PMD_NR))
914800d8c63SKirill A. Shutemov 					start++;
915800d8c63SKirill A. Shutemov 				continue;
916800d8c63SKirill A. Shutemov 			} else if (PageTransHuge(page)) {
917800d8c63SKirill A. Shutemov 				if (index == round_down(end, HPAGE_PMD_NR)) {
918800d8c63SKirill A. Shutemov 					/*
919800d8c63SKirill A. Shutemov 					 * Range ends in the middle of THP:
920800d8c63SKirill A. Shutemov 					 * zero out the page
921800d8c63SKirill A. Shutemov 					 */
922800d8c63SKirill A. Shutemov 					clear_highpage(page);
923800d8c63SKirill A. Shutemov 					unlock_page(page);
924800d8c63SKirill A. Shutemov 					continue;
925800d8c63SKirill A. Shutemov 				}
926800d8c63SKirill A. Shutemov 				index += HPAGE_PMD_NR - 1;
927800d8c63SKirill A. Shutemov 				i += HPAGE_PMD_NR - 1;
928800d8c63SKirill A. Shutemov 			}
929800d8c63SKirill A. Shutemov 
9301635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
931800d8c63SKirill A. Shutemov 				VM_BUG_ON_PAGE(PageTail(page), page);
932800d8c63SKirill A. Shutemov 				if (page_mapping(page) == mapping) {
933309381feSSasha Levin 					VM_BUG_ON_PAGE(PageWriteback(page), page);
934bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
935b1a36650SHugh Dickins 				} else {
936b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
937b1a36650SHugh Dickins 					unlock_page(page);
938b1a36650SHugh Dickins 					index--;
939b1a36650SHugh Dickins 					break;
9407a5d0fbbSHugh Dickins 				}
9411635f6a7SHugh Dickins 			}
942bda97eabSHugh Dickins 			unlock_page(page);
943bda97eabSHugh Dickins 		}
9440cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
94524513264SHugh Dickins 		pagevec_release(&pvec);
946bda97eabSHugh Dickins 		index++;
947bda97eabSHugh Dickins 	}
94894c1e62dSHugh Dickins 
9494595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
9507a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
9511da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
9524595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
9531635f6a7SHugh Dickins }
9541da177e4SLinus Torvalds 
9551635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
9561635f6a7SHugh Dickins {
9571635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
958078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
9591da177e4SLinus Torvalds }
96094c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
9611da177e4SLinus Torvalds 
962a528d35eSDavid Howells static int shmem_getattr(const struct path *path, struct kstat *stat,
963a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
96444a30220SYu Zhao {
965a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
96644a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
96744a30220SYu Zhao 
968d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
9694595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
97044a30220SYu Zhao 		shmem_recalc_inode(inode);
9714595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
972d0424c42SHugh Dickins 	}
97344a30220SYu Zhao 	generic_fillattr(inode, stat);
97444a30220SYu Zhao 	return 0;
97544a30220SYu Zhao }
97644a30220SYu Zhao 
97794c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
9781da177e4SLinus Torvalds {
97975c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
98040e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
981779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
9821da177e4SLinus Torvalds 	int error;
9831da177e4SLinus Torvalds 
98431051c85SJan Kara 	error = setattr_prepare(dentry, attr);
985db78b877SChristoph Hellwig 	if (error)
986db78b877SChristoph Hellwig 		return error;
987db78b877SChristoph Hellwig 
98894c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
98994c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
99094c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
9913889e6e7Snpiggin@suse.de 
99240e041a2SDavid Herrmann 		/* protected by i_mutex */
99340e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
99440e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
99540e041a2SDavid Herrmann 			return -EPERM;
99640e041a2SDavid Herrmann 
99794c1e62dSHugh Dickins 		if (newsize != oldsize) {
99877142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
99977142517SKonstantin Khlebnikov 					oldsize, newsize);
100077142517SKonstantin Khlebnikov 			if (error)
100177142517SKonstantin Khlebnikov 				return error;
100294c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1003078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
100494c1e62dSHugh Dickins 		}
1005afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
100694c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1007d0424c42SHugh Dickins 			if (oldsize > holebegin)
1008d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1009d0424c42SHugh Dickins 							holebegin, 0, 1);
1010d0424c42SHugh Dickins 			if (info->alloced)
1011d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1012d0424c42SHugh Dickins 							newsize, (loff_t)-1);
101394c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1014d0424c42SHugh Dickins 			if (oldsize > holebegin)
1015d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1016d0424c42SHugh Dickins 							holebegin, 0, 1);
1017779750d2SKirill A. Shutemov 
1018779750d2SKirill A. Shutemov 			/*
1019779750d2SKirill A. Shutemov 			 * Part of the huge page can be beyond i_size: subject
1020779750d2SKirill A. Shutemov 			 * to shrink under memory pressure.
1021779750d2SKirill A. Shutemov 			 */
1022779750d2SKirill A. Shutemov 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1023779750d2SKirill A. Shutemov 				spin_lock(&sbinfo->shrinklist_lock);
1024d041353dSCong Wang 				/*
1025d041353dSCong Wang 				 * _careful to defend against unlocked access to
1026d041353dSCong Wang 				 * ->shrink_list in shmem_unused_huge_shrink()
1027d041353dSCong Wang 				 */
1028d041353dSCong Wang 				if (list_empty_careful(&info->shrinklist)) {
1029779750d2SKirill A. Shutemov 					list_add_tail(&info->shrinklist,
1030779750d2SKirill A. Shutemov 							&sbinfo->shrinklist);
1031779750d2SKirill A. Shutemov 					sbinfo->shrinklist_len++;
1032779750d2SKirill A. Shutemov 				}
1033779750d2SKirill A. Shutemov 				spin_unlock(&sbinfo->shrinklist_lock);
1034779750d2SKirill A. Shutemov 			}
103594c1e62dSHugh Dickins 		}
10361da177e4SLinus Torvalds 	}
10371da177e4SLinus Torvalds 
10386a1a90adSChristoph Hellwig 	setattr_copy(inode, attr);
1039db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1040feda821eSChristoph Hellwig 		error = posix_acl_chmod(inode, inode->i_mode);
10411da177e4SLinus Torvalds 	return error;
10421da177e4SLinus Torvalds }
10431da177e4SLinus Torvalds 
10441f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
10451da177e4SLinus Torvalds {
10461da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1047779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
10481da177e4SLinus Torvalds 
10493889e6e7Snpiggin@suse.de 	if (inode->i_mapping->a_ops == &shmem_aops) {
10501da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
10511da177e4SLinus Torvalds 		inode->i_size = 0;
10523889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1053779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1054779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1055779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1056779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1057779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1058779750d2SKirill A. Shutemov 			}
1059779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1060779750d2SKirill A. Shutemov 		}
10611da177e4SLinus Torvalds 		if (!list_empty(&info->swaplist)) {
1062cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
10631da177e4SLinus Torvalds 			list_del_init(&info->swaplist);
1064cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
10651da177e4SLinus Torvalds 		}
10663ed47db3SAl Viro 	}
1067b09e0fa4SEric Paris 
106838f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
10690f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
10705b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1071dbd5768fSJan Kara 	clear_inode(inode);
10721da177e4SLinus Torvalds }
10731da177e4SLinus Torvalds 
1074478922e2SMatthew Wilcox static unsigned long find_swap_entry(struct radix_tree_root *root, void *item)
1075478922e2SMatthew Wilcox {
1076478922e2SMatthew Wilcox 	struct radix_tree_iter iter;
1077478922e2SMatthew Wilcox 	void **slot;
1078478922e2SMatthew Wilcox 	unsigned long found = -1;
1079478922e2SMatthew Wilcox 	unsigned int checked = 0;
1080478922e2SMatthew Wilcox 
1081478922e2SMatthew Wilcox 	rcu_read_lock();
1082478922e2SMatthew Wilcox 	radix_tree_for_each_slot(slot, root, &iter, 0) {
1083478922e2SMatthew Wilcox 		if (*slot == item) {
1084478922e2SMatthew Wilcox 			found = iter.index;
1085478922e2SMatthew Wilcox 			break;
1086478922e2SMatthew Wilcox 		}
1087478922e2SMatthew Wilcox 		checked++;
1088478922e2SMatthew Wilcox 		if ((checked % 4096) != 0)
1089478922e2SMatthew Wilcox 			continue;
1090478922e2SMatthew Wilcox 		slot = radix_tree_iter_resume(slot, &iter);
1091478922e2SMatthew Wilcox 		cond_resched_rcu();
1092478922e2SMatthew Wilcox 	}
1093478922e2SMatthew Wilcox 
1094478922e2SMatthew Wilcox 	rcu_read_unlock();
1095478922e2SMatthew Wilcox 	return found;
1096478922e2SMatthew Wilcox }
1097478922e2SMatthew Wilcox 
109846f65ec1SHugh Dickins /*
109946f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
110046f65ec1SHugh Dickins  */
110141ffe5d5SHugh Dickins static int shmem_unuse_inode(struct shmem_inode_info *info,
1102bde05d1cSHugh Dickins 			     swp_entry_t swap, struct page **pagep)
11031da177e4SLinus Torvalds {
1104285b2c4fSHugh Dickins 	struct address_space *mapping = info->vfs_inode.i_mapping;
110546f65ec1SHugh Dickins 	void *radswap;
110641ffe5d5SHugh Dickins 	pgoff_t index;
1107bde05d1cSHugh Dickins 	gfp_t gfp;
1108bde05d1cSHugh Dickins 	int error = 0;
11091da177e4SLinus Torvalds 
111046f65ec1SHugh Dickins 	radswap = swp_to_radix_entry(swap);
1111478922e2SMatthew Wilcox 	index = find_swap_entry(&mapping->page_tree, radswap);
111246f65ec1SHugh Dickins 	if (index == -1)
111300501b53SJohannes Weiner 		return -EAGAIN;	/* tell shmem_unuse we found nothing */
11142e0e26c7SHugh Dickins 
11151b1b32f2SHugh Dickins 	/*
11161b1b32f2SHugh Dickins 	 * Move _head_ to start search for next from here.
11171f895f75SAl Viro 	 * But be careful: shmem_evict_inode checks list_empty without taking
11181b1b32f2SHugh Dickins 	 * mutex, and there's an instant in list_move_tail when info->swaplist
1119285b2c4fSHugh Dickins 	 * would appear empty, if it were the only one on shmem_swaplist.
11201b1b32f2SHugh Dickins 	 */
11211b1b32f2SHugh Dickins 	if (shmem_swaplist.next != &info->swaplist)
11222e0e26c7SHugh Dickins 		list_move_tail(&shmem_swaplist, &info->swaplist);
11232e0e26c7SHugh Dickins 
1124bde05d1cSHugh Dickins 	gfp = mapping_gfp_mask(mapping);
1125bde05d1cSHugh Dickins 	if (shmem_should_replace_page(*pagep, gfp)) {
1126bde05d1cSHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1127bde05d1cSHugh Dickins 		error = shmem_replace_page(pagep, gfp, info, index);
1128bde05d1cSHugh Dickins 		mutex_lock(&shmem_swaplist_mutex);
1129bde05d1cSHugh Dickins 		/*
1130bde05d1cSHugh Dickins 		 * We needed to drop mutex to make that restrictive page
11310142ef6cSHugh Dickins 		 * allocation, but the inode might have been freed while we
11320142ef6cSHugh Dickins 		 * dropped it: although a racing shmem_evict_inode() cannot
11330142ef6cSHugh Dickins 		 * complete without emptying the radix_tree, our page lock
11340142ef6cSHugh Dickins 		 * on this swapcache page is not enough to prevent that -
11350142ef6cSHugh Dickins 		 * free_swap_and_cache() of our swap entry will only
11360142ef6cSHugh Dickins 		 * trylock_page(), removing swap from radix_tree whatever.
11370142ef6cSHugh Dickins 		 *
11380142ef6cSHugh Dickins 		 * We must not proceed to shmem_add_to_page_cache() if the
11390142ef6cSHugh Dickins 		 * inode has been freed, but of course we cannot rely on
11400142ef6cSHugh Dickins 		 * inode or mapping or info to check that.  However, we can
11410142ef6cSHugh Dickins 		 * safely check if our swap entry is still in use (and here
11420142ef6cSHugh Dickins 		 * it can't have got reused for another page): if it's still
11430142ef6cSHugh Dickins 		 * in use, then the inode cannot have been freed yet, and we
11440142ef6cSHugh Dickins 		 * can safely proceed (if it's no longer in use, that tells
11450142ef6cSHugh Dickins 		 * nothing about the inode, but we don't need to unuse swap).
1146bde05d1cSHugh Dickins 		 */
1147bde05d1cSHugh Dickins 		if (!page_swapcount(*pagep))
1148bde05d1cSHugh Dickins 			error = -ENOENT;
1149bde05d1cSHugh Dickins 	}
1150bde05d1cSHugh Dickins 
1151d13d1443SKAMEZAWA Hiroyuki 	/*
1152778dd893SHugh Dickins 	 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
1153778dd893SHugh Dickins 	 * but also to hold up shmem_evict_inode(): so inode cannot be freed
1154778dd893SHugh Dickins 	 * beneath us (pagelock doesn't help until the page is in pagecache).
1155d13d1443SKAMEZAWA Hiroyuki 	 */
1156bde05d1cSHugh Dickins 	if (!error)
1157bde05d1cSHugh Dickins 		error = shmem_add_to_page_cache(*pagep, mapping, index,
1158fed400a1SWang Sheng-Hui 						radswap);
115948f170fbSHugh Dickins 	if (error != -ENOMEM) {
116046f65ec1SHugh Dickins 		/*
116146f65ec1SHugh Dickins 		 * Truncation and eviction use free_swap_and_cache(), which
116246f65ec1SHugh Dickins 		 * only does trylock page: if we raced, best clean up here.
116346f65ec1SHugh Dickins 		 */
1164bde05d1cSHugh Dickins 		delete_from_swap_cache(*pagep);
1165bde05d1cSHugh Dickins 		set_page_dirty(*pagep);
116646f65ec1SHugh Dickins 		if (!error) {
11674595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1168285b2c4fSHugh Dickins 			info->swapped--;
11694595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
117041ffe5d5SHugh Dickins 			swap_free(swap);
117146f65ec1SHugh Dickins 		}
11721da177e4SLinus Torvalds 	}
11732e0e26c7SHugh Dickins 	return error;
11741da177e4SLinus Torvalds }
11751da177e4SLinus Torvalds 
11761da177e4SLinus Torvalds /*
117746f65ec1SHugh Dickins  * Search through swapped inodes to find and replace swap by page.
11781da177e4SLinus Torvalds  */
117941ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page)
11801da177e4SLinus Torvalds {
118141ffe5d5SHugh Dickins 	struct list_head *this, *next;
11821da177e4SLinus Torvalds 	struct shmem_inode_info *info;
118300501b53SJohannes Weiner 	struct mem_cgroup *memcg;
1184bde05d1cSHugh Dickins 	int error = 0;
1185bde05d1cSHugh Dickins 
1186bde05d1cSHugh Dickins 	/*
1187bde05d1cSHugh Dickins 	 * There's a faint possibility that swap page was replaced before
11880142ef6cSHugh Dickins 	 * caller locked it: caller will come back later with the right page.
1189bde05d1cSHugh Dickins 	 */
11900142ef6cSHugh Dickins 	if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
1191bde05d1cSHugh Dickins 		goto out;
1192778dd893SHugh Dickins 
1193778dd893SHugh Dickins 	/*
1194778dd893SHugh Dickins 	 * Charge page using GFP_KERNEL while we can wait, before taking
1195778dd893SHugh Dickins 	 * the shmem_swaplist_mutex which might hold up shmem_writepage().
1196778dd893SHugh Dickins 	 * Charged back to the user (not to caller) when swap account is used.
1197778dd893SHugh Dickins 	 */
1198f627c2f5SKirill A. Shutemov 	error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
1199f627c2f5SKirill A. Shutemov 			false);
1200778dd893SHugh Dickins 	if (error)
1201778dd893SHugh Dickins 		goto out;
120246f65ec1SHugh Dickins 	/* No radix_tree_preload: swap entry keeps a place for page in tree */
120300501b53SJohannes Weiner 	error = -EAGAIN;
12041da177e4SLinus Torvalds 
1205cb5f7b9aSHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
120641ffe5d5SHugh Dickins 	list_for_each_safe(this, next, &shmem_swaplist) {
120741ffe5d5SHugh Dickins 		info = list_entry(this, struct shmem_inode_info, swaplist);
1208285b2c4fSHugh Dickins 		if (info->swapped)
120900501b53SJohannes Weiner 			error = shmem_unuse_inode(info, swap, &page);
12106922c0c7SHugh Dickins 		else
12116922c0c7SHugh Dickins 			list_del_init(&info->swaplist);
1212cb5f7b9aSHugh Dickins 		cond_resched();
121300501b53SJohannes Weiner 		if (error != -EAGAIN)
1214778dd893SHugh Dickins 			break;
121500501b53SJohannes Weiner 		/* found nothing in this: move on to search the next */
12161da177e4SLinus Torvalds 	}
1217cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1218778dd893SHugh Dickins 
121900501b53SJohannes Weiner 	if (error) {
122000501b53SJohannes Weiner 		if (error != -ENOMEM)
122100501b53SJohannes Weiner 			error = 0;
1222f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(page, memcg, false);
122300501b53SJohannes Weiner 	} else
1224f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, true, false);
1225778dd893SHugh Dickins out:
1226aaa46865SHugh Dickins 	unlock_page(page);
122709cbfeafSKirill A. Shutemov 	put_page(page);
1228778dd893SHugh Dickins 	return error;
12291da177e4SLinus Torvalds }
12301da177e4SLinus Torvalds 
12311da177e4SLinus Torvalds /*
12321da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
12331da177e4SLinus Torvalds  */
12341da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
12351da177e4SLinus Torvalds {
12361da177e4SLinus Torvalds 	struct shmem_inode_info *info;
12371da177e4SLinus Torvalds 	struct address_space *mapping;
12381da177e4SLinus Torvalds 	struct inode *inode;
12396922c0c7SHugh Dickins 	swp_entry_t swap;
12406922c0c7SHugh Dickins 	pgoff_t index;
12411da177e4SLinus Torvalds 
1242800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
12431da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
12441da177e4SLinus Torvalds 	mapping = page->mapping;
12451da177e4SLinus Torvalds 	index = page->index;
12461da177e4SLinus Torvalds 	inode = mapping->host;
12471da177e4SLinus Torvalds 	info = SHMEM_I(inode);
12481da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
12491da177e4SLinus Torvalds 		goto redirty;
1250d9fe526aSHugh Dickins 	if (!total_swap_pages)
12511da177e4SLinus Torvalds 		goto redirty;
12521da177e4SLinus Torvalds 
1253d9fe526aSHugh Dickins 	/*
125497b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
125597b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
125697b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
125797b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
125897b713baSChristoph Hellwig 	 * threads or sync.
1259d9fe526aSHugh Dickins 	 */
126048f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
126148f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
126248f170fbSHugh Dickins 		goto redirty;
126348f170fbSHugh Dickins 	}
12641635f6a7SHugh Dickins 
12651635f6a7SHugh Dickins 	/*
12661635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
12671635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
12681635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
12691aac1400SHugh Dickins 	 *
12701aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
12711aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
12721aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
12731aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
12741aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
12751635f6a7SHugh Dickins 	 */
12761635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
12771aac1400SHugh Dickins 		if (inode->i_private) {
12781aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
12791aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
12801aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
12811aac1400SHugh Dickins 			if (shmem_falloc &&
12828e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
12831aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
12841aac1400SHugh Dickins 			    index < shmem_falloc->next)
12851aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
12861aac1400SHugh Dickins 			else
12871aac1400SHugh Dickins 				shmem_falloc = NULL;
12881aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
12891aac1400SHugh Dickins 			if (shmem_falloc)
12901aac1400SHugh Dickins 				goto redirty;
12911aac1400SHugh Dickins 		}
12921635f6a7SHugh Dickins 		clear_highpage(page);
12931635f6a7SHugh Dickins 		flush_dcache_page(page);
12941635f6a7SHugh Dickins 		SetPageUptodate(page);
12951635f6a7SHugh Dickins 	}
12961635f6a7SHugh Dickins 
129738d8b4e6SHuang Ying 	swap = get_swap_page(page);
129848f170fbSHugh Dickins 	if (!swap.val)
129948f170fbSHugh Dickins 		goto redirty;
1300d9fe526aSHugh Dickins 
130137e84351SVladimir Davydov 	if (mem_cgroup_try_charge_swap(page, swap))
130237e84351SVladimir Davydov 		goto free_swap;
130337e84351SVladimir Davydov 
1304b1dea800SHugh Dickins 	/*
1305b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
13066922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
13076922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1308b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
13096922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
13106922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1311b1dea800SHugh Dickins 	 */
1312b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
131305bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
131405bf86b4SHugh Dickins 		list_add_tail(&info->swaplist, &shmem_swaplist);
1315b1dea800SHugh Dickins 
131648f170fbSHugh Dickins 	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
13174595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1318267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1319267a4c76SHugh Dickins 		info->swapped++;
13204595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1321267a4c76SHugh Dickins 
1322aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
13236922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
13246922c0c7SHugh Dickins 
13256922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1326d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
13279fab5619SHugh Dickins 		swap_writepage(page, wbc);
13281da177e4SLinus Torvalds 		return 0;
13291da177e4SLinus Torvalds 	}
13301da177e4SLinus Torvalds 
13316922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
133237e84351SVladimir Davydov free_swap:
133375f6d6d2SMinchan Kim 	put_swap_page(page, swap);
13341da177e4SLinus Torvalds redirty:
13351da177e4SLinus Torvalds 	set_page_dirty(page);
1336d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1337d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1338d9fe526aSHugh Dickins 	unlock_page(page);
1339d9fe526aSHugh Dickins 	return 0;
13401da177e4SLinus Torvalds }
13411da177e4SLinus Torvalds 
134275edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
134371fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1344680d794bSakpm@linux-foundation.org {
1345680d794bSakpm@linux-foundation.org 	char buffer[64];
1346680d794bSakpm@linux-foundation.org 
134771fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1348095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1349095f1fc4SLee Schermerhorn 
1350a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1351095f1fc4SLee Schermerhorn 
1352095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1353680d794bSakpm@linux-foundation.org }
135471fe804bSLee Schermerhorn 
135571fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
135671fe804bSLee Schermerhorn {
135771fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
135871fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
135971fe804bSLee Schermerhorn 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
136071fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
136171fe804bSLee Schermerhorn 		mpol_get(mpol);
136271fe804bSLee Schermerhorn 		spin_unlock(&sbinfo->stat_lock);
136371fe804bSLee Schermerhorn 	}
136471fe804bSLee Schermerhorn 	return mpol;
136571fe804bSLee Schermerhorn }
136675edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
136775edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
136875edd345SHugh Dickins {
136975edd345SHugh Dickins }
137075edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
137175edd345SHugh Dickins {
137275edd345SHugh Dickins 	return NULL;
137375edd345SHugh Dickins }
137475edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
137575edd345SHugh Dickins #ifndef CONFIG_NUMA
137675edd345SHugh Dickins #define vm_policy vm_private_data
137775edd345SHugh Dickins #endif
1378680d794bSakpm@linux-foundation.org 
1379800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1380800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1381800d8c63SKirill A. Shutemov {
1382800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
1383800d8c63SKirill A. Shutemov 	vma->vm_start = 0;
1384800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1385800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1386800d8c63SKirill A. Shutemov 	vma->vm_ops = NULL;
1387800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1388800d8c63SKirill A. Shutemov }
1389800d8c63SKirill A. Shutemov 
1390800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1391800d8c63SKirill A. Shutemov {
1392800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1393800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1394800d8c63SKirill A. Shutemov }
1395800d8c63SKirill A. Shutemov 
139641ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
139741ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
13981da177e4SLinus Torvalds {
13991da177e4SLinus Torvalds 	struct vm_area_struct pvma;
140018a2f371SMel Gorman 	struct page *page;
14011da177e4SLinus Torvalds 
1402800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
140318a2f371SMel Gorman 	page = swapin_readahead(swap, gfp, &pvma, 0);
1404800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
140518a2f371SMel Gorman 
1406800d8c63SKirill A. Shutemov 	return page;
1407800d8c63SKirill A. Shutemov }
140818a2f371SMel Gorman 
1409800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp,
1410800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1411800d8c63SKirill A. Shutemov {
1412800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
1413800d8c63SKirill A. Shutemov 	struct inode *inode = &info->vfs_inode;
1414800d8c63SKirill A. Shutemov 	struct address_space *mapping = inode->i_mapping;
14154620a06eSGeert Uytterhoeven 	pgoff_t idx, hindex;
1416800d8c63SKirill A. Shutemov 	void __rcu **results;
1417800d8c63SKirill A. Shutemov 	struct page *page;
1418800d8c63SKirill A. Shutemov 
1419e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1420800d8c63SKirill A. Shutemov 		return NULL;
1421800d8c63SKirill A. Shutemov 
14224620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
1423800d8c63SKirill A. Shutemov 	rcu_read_lock();
1424800d8c63SKirill A. Shutemov 	if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx,
1425800d8c63SKirill A. Shutemov 				hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
1426800d8c63SKirill A. Shutemov 		rcu_read_unlock();
1427800d8c63SKirill A. Shutemov 		return NULL;
1428800d8c63SKirill A. Shutemov 	}
1429800d8c63SKirill A. Shutemov 	rcu_read_unlock();
1430800d8c63SKirill A. Shutemov 
1431800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1432800d8c63SKirill A. Shutemov 	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1433800d8c63SKirill A. Shutemov 			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1434800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1435800d8c63SKirill A. Shutemov 	if (page)
1436800d8c63SKirill A. Shutemov 		prep_transhuge_page(page);
143718a2f371SMel Gorman 	return page;
143818a2f371SMel Gorman }
143918a2f371SMel Gorman 
144018a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp,
144118a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
144218a2f371SMel Gorman {
144318a2f371SMel Gorman 	struct vm_area_struct pvma;
144418a2f371SMel Gorman 	struct page *page;
144518a2f371SMel Gorman 
1446800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1447800d8c63SKirill A. Shutemov 	page = alloc_page_vma(gfp, &pvma, 0);
1448800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
144918a2f371SMel Gorman 
1450800d8c63SKirill A. Shutemov 	return page;
1451800d8c63SKirill A. Shutemov }
1452800d8c63SKirill A. Shutemov 
1453800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1454800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, struct shmem_sb_info *sbinfo,
1455800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1456800d8c63SKirill A. Shutemov {
1457800d8c63SKirill A. Shutemov 	struct page *page;
1458800d8c63SKirill A. Shutemov 	int nr;
1459800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1460800d8c63SKirill A. Shutemov 
1461e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1462800d8c63SKirill A. Shutemov 		huge = false;
1463800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1464800d8c63SKirill A. Shutemov 
1465800d8c63SKirill A. Shutemov 	if (shmem_acct_block(info->flags, nr))
1466800d8c63SKirill A. Shutemov 		goto failed;
1467800d8c63SKirill A. Shutemov 	if (sbinfo->max_blocks) {
1468800d8c63SKirill A. Shutemov 		if (percpu_counter_compare(&sbinfo->used_blocks,
1469800d8c63SKirill A. Shutemov 					sbinfo->max_blocks - nr) > 0)
1470800d8c63SKirill A. Shutemov 			goto unacct;
1471800d8c63SKirill A. Shutemov 		percpu_counter_add(&sbinfo->used_blocks, nr);
1472800d8c63SKirill A. Shutemov 	}
1473800d8c63SKirill A. Shutemov 
1474800d8c63SKirill A. Shutemov 	if (huge)
1475800d8c63SKirill A. Shutemov 		page = shmem_alloc_hugepage(gfp, info, index);
1476800d8c63SKirill A. Shutemov 	else
1477800d8c63SKirill A. Shutemov 		page = shmem_alloc_page(gfp, info, index);
147875edd345SHugh Dickins 	if (page) {
147975edd345SHugh Dickins 		__SetPageLocked(page);
148075edd345SHugh Dickins 		__SetPageSwapBacked(page);
1481800d8c63SKirill A. Shutemov 		return page;
148275edd345SHugh Dickins 	}
148318a2f371SMel Gorman 
1484800d8c63SKirill A. Shutemov 	err = -ENOMEM;
1485800d8c63SKirill A. Shutemov 	if (sbinfo->max_blocks)
1486800d8c63SKirill A. Shutemov 		percpu_counter_add(&sbinfo->used_blocks, -nr);
1487800d8c63SKirill A. Shutemov unacct:
1488800d8c63SKirill A. Shutemov 	shmem_unacct_blocks(info->flags, nr);
1489800d8c63SKirill A. Shutemov failed:
1490800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
14911da177e4SLinus Torvalds }
149271fe804bSLee Schermerhorn 
14931da177e4SLinus Torvalds /*
1494bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1495bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1496bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1497bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1498bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1499bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1500bde05d1cSHugh Dickins  *
1501bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1502bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1503bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1504bde05d1cSHugh Dickins  */
1505bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1506bde05d1cSHugh Dickins {
1507bde05d1cSHugh Dickins 	return page_zonenum(page) > gfp_zone(gfp);
1508bde05d1cSHugh Dickins }
1509bde05d1cSHugh Dickins 
1510bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1511bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1512bde05d1cSHugh Dickins {
1513bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
1514bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1515bde05d1cSHugh Dickins 	pgoff_t swap_index;
1516bde05d1cSHugh Dickins 	int error;
1517bde05d1cSHugh Dickins 
1518bde05d1cSHugh Dickins 	oldpage = *pagep;
1519bde05d1cSHugh Dickins 	swap_index = page_private(oldpage);
1520bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1521bde05d1cSHugh Dickins 
1522bde05d1cSHugh Dickins 	/*
1523bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1524bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1525bde05d1cSHugh Dickins 	 */
1526bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1527bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1528bde05d1cSHugh Dickins 	if (!newpage)
1529bde05d1cSHugh Dickins 		return -ENOMEM;
1530bde05d1cSHugh Dickins 
153109cbfeafSKirill A. Shutemov 	get_page(newpage);
1532bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
15330142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1534bde05d1cSHugh Dickins 
15359956edf3SHugh Dickins 	__SetPageLocked(newpage);
15369956edf3SHugh Dickins 	__SetPageSwapBacked(newpage);
1537bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1538bde05d1cSHugh Dickins 	set_page_private(newpage, swap_index);
1539bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1540bde05d1cSHugh Dickins 
1541bde05d1cSHugh Dickins 	/*
1542bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1543bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1544bde05d1cSHugh Dickins 	 */
1545bde05d1cSHugh Dickins 	spin_lock_irq(&swap_mapping->tree_lock);
1546bde05d1cSHugh Dickins 	error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1547bde05d1cSHugh Dickins 								   newpage);
15480142ef6cSHugh Dickins 	if (!error) {
154911fb9989SMel Gorman 		__inc_node_page_state(newpage, NR_FILE_PAGES);
155011fb9989SMel Gorman 		__dec_node_page_state(oldpage, NR_FILE_PAGES);
15510142ef6cSHugh Dickins 	}
1552bde05d1cSHugh Dickins 	spin_unlock_irq(&swap_mapping->tree_lock);
1553bde05d1cSHugh Dickins 
15540142ef6cSHugh Dickins 	if (unlikely(error)) {
15550142ef6cSHugh Dickins 		/*
15560142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
15570142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
15580142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
15590142ef6cSHugh Dickins 		 */
15600142ef6cSHugh Dickins 		oldpage = newpage;
15610142ef6cSHugh Dickins 	} else {
15626a93ca8fSJohannes Weiner 		mem_cgroup_migrate(oldpage, newpage);
1563bde05d1cSHugh Dickins 		lru_cache_add_anon(newpage);
15640142ef6cSHugh Dickins 		*pagep = newpage;
15650142ef6cSHugh Dickins 	}
1566bde05d1cSHugh Dickins 
1567bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1568bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1569bde05d1cSHugh Dickins 
1570bde05d1cSHugh Dickins 	unlock_page(oldpage);
157109cbfeafSKirill A. Shutemov 	put_page(oldpage);
157209cbfeafSKirill A. Shutemov 	put_page(oldpage);
15730142ef6cSHugh Dickins 	return error;
1574bde05d1cSHugh Dickins }
1575bde05d1cSHugh Dickins 
1576bde05d1cSHugh Dickins /*
157768da9f05SHugh Dickins  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
15781da177e4SLinus Torvalds  *
15791da177e4SLinus Torvalds  * If we allocate a new one we do not mark it dirty. That's up to the
15801da177e4SLinus Torvalds  * vm. If we swap it in we mark it dirty since we also free the swap
15819e18eb29SAndres Lagar-Cavilla  * entry since a page cannot live in both the swap and page cache.
15829e18eb29SAndres Lagar-Cavilla  *
15839e18eb29SAndres Lagar-Cavilla  * fault_mm and fault_type are only supplied by shmem_fault:
15849e18eb29SAndres Lagar-Cavilla  * otherwise they are NULL.
15851da177e4SLinus Torvalds  */
158641ffe5d5SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
15879e18eb29SAndres Lagar-Cavilla 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1588cfda0526SMike Rapoport 	struct vm_area_struct *vma, struct vm_fault *vmf, int *fault_type)
15891da177e4SLinus Torvalds {
15901da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
159123f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
15921da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo;
15939e18eb29SAndres Lagar-Cavilla 	struct mm_struct *charge_mm;
159400501b53SJohannes Weiner 	struct mem_cgroup *memcg;
159527ab7006SHugh Dickins 	struct page *page;
15961da177e4SLinus Torvalds 	swp_entry_t swap;
1597657e3038SKirill A. Shutemov 	enum sgp_type sgp_huge = sgp;
1598800d8c63SKirill A. Shutemov 	pgoff_t hindex = index;
15991da177e4SLinus Torvalds 	int error;
160054af6042SHugh Dickins 	int once = 0;
16011635f6a7SHugh Dickins 	int alloced = 0;
16021da177e4SLinus Torvalds 
160309cbfeafSKirill A. Shutemov 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
16041da177e4SLinus Torvalds 		return -EFBIG;
1605657e3038SKirill A. Shutemov 	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1606657e3038SKirill A. Shutemov 		sgp = SGP_CACHE;
16071da177e4SLinus Torvalds repeat:
160854af6042SHugh Dickins 	swap.val = 0;
16090cd6144aSJohannes Weiner 	page = find_lock_entry(mapping, index);
161054af6042SHugh Dickins 	if (radix_tree_exceptional_entry(page)) {
161154af6042SHugh Dickins 		swap = radix_to_swp_entry(page);
161254af6042SHugh Dickins 		page = NULL;
161354af6042SHugh Dickins 	}
161454af6042SHugh Dickins 
161575edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
161609cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
161754af6042SHugh Dickins 		error = -EINVAL;
1618267a4c76SHugh Dickins 		goto unlock;
161954af6042SHugh Dickins 	}
162054af6042SHugh Dickins 
162166d2f4d2SHugh Dickins 	if (page && sgp == SGP_WRITE)
162266d2f4d2SHugh Dickins 		mark_page_accessed(page);
162366d2f4d2SHugh Dickins 
16241635f6a7SHugh Dickins 	/* fallocated page? */
16251635f6a7SHugh Dickins 	if (page && !PageUptodate(page)) {
16261635f6a7SHugh Dickins 		if (sgp != SGP_READ)
16271635f6a7SHugh Dickins 			goto clear;
16281635f6a7SHugh Dickins 		unlock_page(page);
162909cbfeafSKirill A. Shutemov 		put_page(page);
16301635f6a7SHugh Dickins 		page = NULL;
16311635f6a7SHugh Dickins 	}
163254af6042SHugh Dickins 	if (page || (sgp == SGP_READ && !swap.val)) {
163354af6042SHugh Dickins 		*pagep = page;
163454af6042SHugh Dickins 		return 0;
163527ab7006SHugh Dickins 	}
163627ab7006SHugh Dickins 
1637b409f9fcSHugh Dickins 	/*
163854af6042SHugh Dickins 	 * Fast cache lookup did not find it:
163954af6042SHugh Dickins 	 * bring it back from swap or allocate.
1640b409f9fcSHugh Dickins 	 */
164154af6042SHugh Dickins 	sbinfo = SHMEM_SB(inode->i_sb);
1642cfda0526SMike Rapoport 	charge_mm = vma ? vma->vm_mm : current->mm;
164327ab7006SHugh Dickins 
16441da177e4SLinus Torvalds 	if (swap.val) {
16451da177e4SLinus Torvalds 		/* Look it up and read it in.. */
164627ab7006SHugh Dickins 		page = lookup_swap_cache(swap);
164727ab7006SHugh Dickins 		if (!page) {
16489e18eb29SAndres Lagar-Cavilla 			/* Or update major stats only when swapin succeeds?? */
16499e18eb29SAndres Lagar-Cavilla 			if (fault_type) {
165068da9f05SHugh Dickins 				*fault_type |= VM_FAULT_MAJOR;
16519e18eb29SAndres Lagar-Cavilla 				count_vm_event(PGMAJFAULT);
16522262185cSRoman Gushchin 				count_memcg_event_mm(charge_mm, PGMAJFAULT);
16539e18eb29SAndres Lagar-Cavilla 			}
16549e18eb29SAndres Lagar-Cavilla 			/* Here we actually start the io */
165541ffe5d5SHugh Dickins 			page = shmem_swapin(swap, gfp, info, index);
165627ab7006SHugh Dickins 			if (!page) {
16571da177e4SLinus Torvalds 				error = -ENOMEM;
165854af6042SHugh Dickins 				goto failed;
1659285b2c4fSHugh Dickins 			}
16601da177e4SLinus Torvalds 		}
16611da177e4SLinus Torvalds 
16621da177e4SLinus Torvalds 		/* We have to do this with page locked to prevent races */
166354af6042SHugh Dickins 		lock_page(page);
16640142ef6cSHugh Dickins 		if (!PageSwapCache(page) || page_private(page) != swap.val ||
1665d1899228SHugh Dickins 		    !shmem_confirm_swap(mapping, index, swap)) {
1666bde05d1cSHugh Dickins 			error = -EEXIST;	/* try again */
1667d1899228SHugh Dickins 			goto unlock;
1668bde05d1cSHugh Dickins 		}
166927ab7006SHugh Dickins 		if (!PageUptodate(page)) {
16701da177e4SLinus Torvalds 			error = -EIO;
167154af6042SHugh Dickins 			goto failed;
167254af6042SHugh Dickins 		}
167354af6042SHugh Dickins 		wait_on_page_writeback(page);
167454af6042SHugh Dickins 
1675bde05d1cSHugh Dickins 		if (shmem_should_replace_page(page, gfp)) {
1676bde05d1cSHugh Dickins 			error = shmem_replace_page(&page, gfp, info, index);
1677bde05d1cSHugh Dickins 			if (error)
167854af6042SHugh Dickins 				goto failed;
16791da177e4SLinus Torvalds 		}
16801da177e4SLinus Torvalds 
16819e18eb29SAndres Lagar-Cavilla 		error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1682f627c2f5SKirill A. Shutemov 				false);
1683d1899228SHugh Dickins 		if (!error) {
168454af6042SHugh Dickins 			error = shmem_add_to_page_cache(page, mapping, index,
1685fed400a1SWang Sheng-Hui 						swp_to_radix_entry(swap));
1686215c02bcSHugh Dickins 			/*
1687215c02bcSHugh Dickins 			 * We already confirmed swap under page lock, and make
1688215c02bcSHugh Dickins 			 * no memory allocation here, so usually no possibility
1689215c02bcSHugh Dickins 			 * of error; but free_swap_and_cache() only trylocks a
1690215c02bcSHugh Dickins 			 * page, so it is just possible that the entry has been
1691215c02bcSHugh Dickins 			 * truncated or holepunched since swap was confirmed.
1692215c02bcSHugh Dickins 			 * shmem_undo_range() will have done some of the
1693215c02bcSHugh Dickins 			 * unaccounting, now delete_from_swap_cache() will do
169493aa7d95SVladimir Davydov 			 * the rest.
1695215c02bcSHugh Dickins 			 * Reset swap.val? No, leave it so "failed" goes back to
1696215c02bcSHugh Dickins 			 * "repeat": reading a hole and writing should succeed.
1697215c02bcSHugh Dickins 			 */
169800501b53SJohannes Weiner 			if (error) {
1699f627c2f5SKirill A. Shutemov 				mem_cgroup_cancel_charge(page, memcg, false);
1700215c02bcSHugh Dickins 				delete_from_swap_cache(page);
1701d1899228SHugh Dickins 			}
170200501b53SJohannes Weiner 		}
170354af6042SHugh Dickins 		if (error)
170454af6042SHugh Dickins 			goto failed;
170554af6042SHugh Dickins 
1706f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, true, false);
170700501b53SJohannes Weiner 
17084595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
170954af6042SHugh Dickins 		info->swapped--;
171054af6042SHugh Dickins 		shmem_recalc_inode(inode);
17114595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
171227ab7006SHugh Dickins 
171366d2f4d2SHugh Dickins 		if (sgp == SGP_WRITE)
171466d2f4d2SHugh Dickins 			mark_page_accessed(page);
171566d2f4d2SHugh Dickins 
171627ab7006SHugh Dickins 		delete_from_swap_cache(page);
171727ab7006SHugh Dickins 		set_page_dirty(page);
171827ab7006SHugh Dickins 		swap_free(swap);
171927ab7006SHugh Dickins 
172054af6042SHugh Dickins 	} else {
1721cfda0526SMike Rapoport 		if (vma && userfaultfd_missing(vma)) {
1722cfda0526SMike Rapoport 			*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1723cfda0526SMike Rapoport 			return 0;
1724cfda0526SMike Rapoport 		}
1725cfda0526SMike Rapoport 
1726800d8c63SKirill A. Shutemov 		/* shmem_symlink() */
1727800d8c63SKirill A. Shutemov 		if (mapping->a_ops != &shmem_aops)
1728800d8c63SKirill A. Shutemov 			goto alloc_nohuge;
1729657e3038SKirill A. Shutemov 		if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1730800d8c63SKirill A. Shutemov 			goto alloc_nohuge;
1731800d8c63SKirill A. Shutemov 		if (shmem_huge == SHMEM_HUGE_FORCE)
1732800d8c63SKirill A. Shutemov 			goto alloc_huge;
1733800d8c63SKirill A. Shutemov 		switch (sbinfo->huge) {
1734800d8c63SKirill A. Shutemov 			loff_t i_size;
1735800d8c63SKirill A. Shutemov 			pgoff_t off;
1736800d8c63SKirill A. Shutemov 		case SHMEM_HUGE_NEVER:
1737800d8c63SKirill A. Shutemov 			goto alloc_nohuge;
1738800d8c63SKirill A. Shutemov 		case SHMEM_HUGE_WITHIN_SIZE:
1739800d8c63SKirill A. Shutemov 			off = round_up(index, HPAGE_PMD_NR);
1740800d8c63SKirill A. Shutemov 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
1741800d8c63SKirill A. Shutemov 			if (i_size >= HPAGE_PMD_SIZE &&
1742800d8c63SKirill A. Shutemov 					i_size >> PAGE_SHIFT >= off)
1743800d8c63SKirill A. Shutemov 				goto alloc_huge;
1744800d8c63SKirill A. Shutemov 			/* fallthrough */
1745800d8c63SKirill A. Shutemov 		case SHMEM_HUGE_ADVISE:
1746657e3038SKirill A. Shutemov 			if (sgp_huge == SGP_HUGE)
1747657e3038SKirill A. Shutemov 				goto alloc_huge;
1748657e3038SKirill A. Shutemov 			/* TODO: implement fadvise() hints */
1749800d8c63SKirill A. Shutemov 			goto alloc_nohuge;
175059a16eadSHugh Dickins 		}
17511da177e4SLinus Torvalds 
1752800d8c63SKirill A. Shutemov alloc_huge:
1753800d8c63SKirill A. Shutemov 		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1754800d8c63SKirill A. Shutemov 				index, true);
1755800d8c63SKirill A. Shutemov 		if (IS_ERR(page)) {
1756800d8c63SKirill A. Shutemov alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1757800d8c63SKirill A. Shutemov 					index, false);
175854af6042SHugh Dickins 		}
1759800d8c63SKirill A. Shutemov 		if (IS_ERR(page)) {
1760779750d2SKirill A. Shutemov 			int retry = 5;
1761800d8c63SKirill A. Shutemov 			error = PTR_ERR(page);
1762800d8c63SKirill A. Shutemov 			page = NULL;
1763779750d2SKirill A. Shutemov 			if (error != -ENOSPC)
1764779750d2SKirill A. Shutemov 				goto failed;
1765779750d2SKirill A. Shutemov 			/*
1766779750d2SKirill A. Shutemov 			 * Try to reclaim some spece by splitting a huge page
1767779750d2SKirill A. Shutemov 			 * beyond i_size on the filesystem.
1768779750d2SKirill A. Shutemov 			 */
1769779750d2SKirill A. Shutemov 			while (retry--) {
1770779750d2SKirill A. Shutemov 				int ret;
1771779750d2SKirill A. Shutemov 				ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1772779750d2SKirill A. Shutemov 				if (ret == SHRINK_STOP)
1773779750d2SKirill A. Shutemov 					break;
1774779750d2SKirill A. Shutemov 				if (ret)
1775779750d2SKirill A. Shutemov 					goto alloc_nohuge;
1776779750d2SKirill A. Shutemov 			}
1777800d8c63SKirill A. Shutemov 			goto failed;
1778800d8c63SKirill A. Shutemov 		}
1779800d8c63SKirill A. Shutemov 
1780800d8c63SKirill A. Shutemov 		if (PageTransHuge(page))
1781800d8c63SKirill A. Shutemov 			hindex = round_down(index, HPAGE_PMD_NR);
1782800d8c63SKirill A. Shutemov 		else
1783800d8c63SKirill A. Shutemov 			hindex = index;
1784800d8c63SKirill A. Shutemov 
178566d2f4d2SHugh Dickins 		if (sgp == SGP_WRITE)
1786eb39d618SHugh Dickins 			__SetPageReferenced(page);
178766d2f4d2SHugh Dickins 
17889e18eb29SAndres Lagar-Cavilla 		error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1789800d8c63SKirill A. Shutemov 				PageTransHuge(page));
179054af6042SHugh Dickins 		if (error)
1791800d8c63SKirill A. Shutemov 			goto unacct;
1792800d8c63SKirill A. Shutemov 		error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK,
1793800d8c63SKirill A. Shutemov 				compound_order(page));
1794b065b432SHugh Dickins 		if (!error) {
1795800d8c63SKirill A. Shutemov 			error = shmem_add_to_page_cache(page, mapping, hindex,
1796fed400a1SWang Sheng-Hui 							NULL);
1797b065b432SHugh Dickins 			radix_tree_preload_end();
1798b065b432SHugh Dickins 		}
1799b065b432SHugh Dickins 		if (error) {
1800800d8c63SKirill A. Shutemov 			mem_cgroup_cancel_charge(page, memcg,
1801800d8c63SKirill A. Shutemov 					PageTransHuge(page));
1802800d8c63SKirill A. Shutemov 			goto unacct;
1803b065b432SHugh Dickins 		}
1804800d8c63SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, false,
1805800d8c63SKirill A. Shutemov 				PageTransHuge(page));
180654af6042SHugh Dickins 		lru_cache_add_anon(page);
180754af6042SHugh Dickins 
18084595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1809800d8c63SKirill A. Shutemov 		info->alloced += 1 << compound_order(page);
1810800d8c63SKirill A. Shutemov 		inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
181154af6042SHugh Dickins 		shmem_recalc_inode(inode);
18124595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
18131635f6a7SHugh Dickins 		alloced = true;
181454af6042SHugh Dickins 
1815779750d2SKirill A. Shutemov 		if (PageTransHuge(page) &&
1816779750d2SKirill A. Shutemov 				DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1817779750d2SKirill A. Shutemov 				hindex + HPAGE_PMD_NR - 1) {
1818779750d2SKirill A. Shutemov 			/*
1819779750d2SKirill A. Shutemov 			 * Part of the huge page is beyond i_size: subject
1820779750d2SKirill A. Shutemov 			 * to shrink under memory pressure.
1821779750d2SKirill A. Shutemov 			 */
1822779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1823d041353dSCong Wang 			/*
1824d041353dSCong Wang 			 * _careful to defend against unlocked access to
1825d041353dSCong Wang 			 * ->shrink_list in shmem_unused_huge_shrink()
1826d041353dSCong Wang 			 */
1827d041353dSCong Wang 			if (list_empty_careful(&info->shrinklist)) {
1828779750d2SKirill A. Shutemov 				list_add_tail(&info->shrinklist,
1829779750d2SKirill A. Shutemov 						&sbinfo->shrinklist);
1830779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len++;
1831779750d2SKirill A. Shutemov 			}
1832779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1833779750d2SKirill A. Shutemov 		}
1834779750d2SKirill A. Shutemov 
1835ec9516fbSHugh Dickins 		/*
18361635f6a7SHugh Dickins 		 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
18371635f6a7SHugh Dickins 		 */
18381635f6a7SHugh Dickins 		if (sgp == SGP_FALLOC)
18391635f6a7SHugh Dickins 			sgp = SGP_WRITE;
18401635f6a7SHugh Dickins clear:
18411635f6a7SHugh Dickins 		/*
18421635f6a7SHugh Dickins 		 * Let SGP_WRITE caller clear ends if write does not fill page;
18431635f6a7SHugh Dickins 		 * but SGP_FALLOC on a page fallocated earlier must initialize
18441635f6a7SHugh Dickins 		 * it now, lest undo on failure cancel our earlier guarantee.
1845ec9516fbSHugh Dickins 		 */
1846800d8c63SKirill A. Shutemov 		if (sgp != SGP_WRITE && !PageUptodate(page)) {
1847800d8c63SKirill A. Shutemov 			struct page *head = compound_head(page);
1848800d8c63SKirill A. Shutemov 			int i;
1849800d8c63SKirill A. Shutemov 
1850800d8c63SKirill A. Shutemov 			for (i = 0; i < (1 << compound_order(head)); i++) {
1851800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
1852800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
1853800d8c63SKirill A. Shutemov 			}
1854800d8c63SKirill A. Shutemov 			SetPageUptodate(head);
1855ec9516fbSHugh Dickins 		}
18561da177e4SLinus Torvalds 	}
1857bde05d1cSHugh Dickins 
185854af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
185975edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
186009cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1861267a4c76SHugh Dickins 		if (alloced) {
1862267a4c76SHugh Dickins 			ClearPageDirty(page);
1863267a4c76SHugh Dickins 			delete_from_page_cache(page);
18644595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1865267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
18664595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
1867267a4c76SHugh Dickins 		}
186854af6042SHugh Dickins 		error = -EINVAL;
1869267a4c76SHugh Dickins 		goto unlock;
1870ff36b801SShaohua Li 	}
1871800d8c63SKirill A. Shutemov 	*pagep = page + index - hindex;
187254af6042SHugh Dickins 	return 0;
1873d00806b1SNick Piggin 
1874d0217ac0SNick Piggin 	/*
187554af6042SHugh Dickins 	 * Error recovery.
18761da177e4SLinus Torvalds 	 */
187754af6042SHugh Dickins unacct:
1878800d8c63SKirill A. Shutemov 	if (sbinfo->max_blocks)
1879800d8c63SKirill A. Shutemov 		percpu_counter_sub(&sbinfo->used_blocks,
1880800d8c63SKirill A. Shutemov 				1 << compound_order(page));
1881800d8c63SKirill A. Shutemov 	shmem_unacct_blocks(info->flags, 1 << compound_order(page));
1882800d8c63SKirill A. Shutemov 
1883800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
1884800d8c63SKirill A. Shutemov 		unlock_page(page);
1885800d8c63SKirill A. Shutemov 		put_page(page);
1886800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1887800d8c63SKirill A. Shutemov 	}
188854af6042SHugh Dickins failed:
1889267a4c76SHugh Dickins 	if (swap.val && !shmem_confirm_swap(mapping, index, swap))
189054af6042SHugh Dickins 		error = -EEXIST;
1891d1899228SHugh Dickins unlock:
189227ab7006SHugh Dickins 	if (page) {
189354af6042SHugh Dickins 		unlock_page(page);
189409cbfeafSKirill A. Shutemov 		put_page(page);
189554af6042SHugh Dickins 	}
189654af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
18974595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
189854af6042SHugh Dickins 		shmem_recalc_inode(inode);
18994595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
19001da177e4SLinus Torvalds 		goto repeat;
1901d8dc74f2SAdrian Bunk 	}
1902d1899228SHugh Dickins 	if (error == -EEXIST)	/* from above or from radix_tree_insert */
190354af6042SHugh Dickins 		goto repeat;
190454af6042SHugh Dickins 	return error;
19051da177e4SLinus Torvalds }
19061da177e4SLinus Torvalds 
190710d20bd2SLinus Torvalds /*
190810d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
190910d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
191010d20bd2SLinus Torvalds  * target.
191110d20bd2SLinus Torvalds  */
1912ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
191310d20bd2SLinus Torvalds {
191410d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
19152055da97SIngo Molnar 	list_del_init(&wait->entry);
191610d20bd2SLinus Torvalds 	return ret;
191710d20bd2SLinus Torvalds }
191810d20bd2SLinus Torvalds 
191911bac800SDave Jiang static int shmem_fault(struct vm_fault *vmf)
19201da177e4SLinus Torvalds {
192111bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
1922496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
19239e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1924657e3038SKirill A. Shutemov 	enum sgp_type sgp;
19251da177e4SLinus Torvalds 	int error;
192668da9f05SHugh Dickins 	int ret = VM_FAULT_LOCKED;
19271da177e4SLinus Torvalds 
1928f00cdc6dSHugh Dickins 	/*
1929f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
1930f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
1931f00cdc6dSHugh Dickins 	 * locks writers out with its hold on i_mutex.  So refrain from
19328e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
19338e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
19348e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
19358e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
19368e205f77SHugh Dickins 	 *
19378e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
19388e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
19398e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
19408e205f77SHugh Dickins 	 *
19418e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
19428e205f77SHugh Dickins 	 * standard mutex or completion: but we cannot take i_mutex in fault,
19438e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
1944f00cdc6dSHugh Dickins 	 */
1945f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
1946f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
1947f00cdc6dSHugh Dickins 
1948f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
1949f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
19508e205f77SHugh Dickins 		if (shmem_falloc &&
19518e205f77SHugh Dickins 		    shmem_falloc->waitq &&
19528e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
19538e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
19548e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
195510d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
19568e205f77SHugh Dickins 
19578e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
1958f00cdc6dSHugh Dickins 			if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1959f00cdc6dSHugh Dickins 			   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
19608e205f77SHugh Dickins 				/* It's polite to up mmap_sem if we can */
1961f00cdc6dSHugh Dickins 				up_read(&vma->vm_mm->mmap_sem);
19628e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
1963f00cdc6dSHugh Dickins 			}
19648e205f77SHugh Dickins 
19658e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
19668e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
19678e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
19688e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
19698e205f77SHugh Dickins 			schedule();
19708e205f77SHugh Dickins 
19718e205f77SHugh Dickins 			/*
19728e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
19738e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
19748e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
19758e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
19768e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
19778e205f77SHugh Dickins 			 */
19788e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
19798e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
19808e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
19818e205f77SHugh Dickins 			return ret;
1982f00cdc6dSHugh Dickins 		}
19838e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
1984f00cdc6dSHugh Dickins 	}
1985f00cdc6dSHugh Dickins 
1986657e3038SKirill A. Shutemov 	sgp = SGP_CACHE;
198718600332SMichal Hocko 
198818600332SMichal Hocko 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
198918600332SMichal Hocko 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
1990657e3038SKirill A. Shutemov 		sgp = SGP_NOHUGE;
199118600332SMichal Hocko 	else if (vma->vm_flags & VM_HUGEPAGE)
199218600332SMichal Hocko 		sgp = SGP_HUGE;
1993657e3038SKirill A. Shutemov 
1994657e3038SKirill A. Shutemov 	error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
1995cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
19961da177e4SLinus Torvalds 	if (error)
19971da177e4SLinus Torvalds 		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
199868da9f05SHugh Dickins 	return ret;
19991da177e4SLinus Torvalds }
20001da177e4SLinus Torvalds 
2001c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2002c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2003c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2004c01d5b30SHugh Dickins {
2005c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2006c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2007c01d5b30SHugh Dickins 	unsigned long addr;
2008c01d5b30SHugh Dickins 	unsigned long offset;
2009c01d5b30SHugh Dickins 	unsigned long inflated_len;
2010c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2011c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2012c01d5b30SHugh Dickins 
2013c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2014c01d5b30SHugh Dickins 		return -ENOMEM;
2015c01d5b30SHugh Dickins 
2016c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2017c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2018c01d5b30SHugh Dickins 
2019e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2020c01d5b30SHugh Dickins 		return addr;
2021c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2022c01d5b30SHugh Dickins 		return addr;
2023c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2024c01d5b30SHugh Dickins 		return addr;
2025c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2026c01d5b30SHugh Dickins 		return addr;
2027c01d5b30SHugh Dickins 
2028c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2029c01d5b30SHugh Dickins 		return addr;
2030c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2031c01d5b30SHugh Dickins 		return addr;
2032c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2033c01d5b30SHugh Dickins 		return addr;
2034c01d5b30SHugh Dickins 	/*
2035c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2036c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2037c01d5b30SHugh Dickins 	 * But if caller specified an address hint, respect that as before.
2038c01d5b30SHugh Dickins 	 */
2039c01d5b30SHugh Dickins 	if (uaddr)
2040c01d5b30SHugh Dickins 		return addr;
2041c01d5b30SHugh Dickins 
2042c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2043c01d5b30SHugh Dickins 		struct super_block *sb;
2044c01d5b30SHugh Dickins 
2045c01d5b30SHugh Dickins 		if (file) {
2046c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2047c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2048c01d5b30SHugh Dickins 		} else {
2049c01d5b30SHugh Dickins 			/*
2050c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2051c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2052c01d5b30SHugh Dickins 			 */
2053c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2054c01d5b30SHugh Dickins 				return addr;
2055c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2056c01d5b30SHugh Dickins 		}
20573089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2058c01d5b30SHugh Dickins 			return addr;
2059c01d5b30SHugh Dickins 	}
2060c01d5b30SHugh Dickins 
2061c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2062c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2063c01d5b30SHugh Dickins 		return addr;
2064c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2065c01d5b30SHugh Dickins 		return addr;
2066c01d5b30SHugh Dickins 
2067c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2068c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2069c01d5b30SHugh Dickins 		return addr;
2070c01d5b30SHugh Dickins 	if (inflated_len < len)
2071c01d5b30SHugh Dickins 		return addr;
2072c01d5b30SHugh Dickins 
2073c01d5b30SHugh Dickins 	inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2074c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2075c01d5b30SHugh Dickins 		return addr;
2076c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2077c01d5b30SHugh Dickins 		return addr;
2078c01d5b30SHugh Dickins 
2079c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2080c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2081c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2082c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2083c01d5b30SHugh Dickins 
2084c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2085c01d5b30SHugh Dickins 		return addr;
2086c01d5b30SHugh Dickins 	return inflated_addr;
2087c01d5b30SHugh Dickins }
2088c01d5b30SHugh Dickins 
20891da177e4SLinus Torvalds #ifdef CONFIG_NUMA
209041ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
20911da177e4SLinus Torvalds {
2092496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
209341ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
20941da177e4SLinus Torvalds }
20951da177e4SLinus Torvalds 
2096d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2097d8dc74f2SAdrian Bunk 					  unsigned long addr)
20981da177e4SLinus Torvalds {
2099496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
210041ffe5d5SHugh Dickins 	pgoff_t index;
21011da177e4SLinus Torvalds 
210241ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
210341ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
21041da177e4SLinus Torvalds }
21051da177e4SLinus Torvalds #endif
21061da177e4SLinus Torvalds 
21071da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user)
21081da177e4SLinus Torvalds {
2109496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
21101da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
21111da177e4SLinus Torvalds 	int retval = -ENOMEM;
21121da177e4SLinus Torvalds 
21134595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
21141da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
21151da177e4SLinus Torvalds 		if (!user_shm_lock(inode->i_size, user))
21161da177e4SLinus Torvalds 			goto out_nomem;
21171da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
211889e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
21191da177e4SLinus Torvalds 	}
21201da177e4SLinus Torvalds 	if (!lock && (info->flags & VM_LOCKED) && user) {
21211da177e4SLinus Torvalds 		user_shm_unlock(inode->i_size, user);
21221da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
212389e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
21241da177e4SLinus Torvalds 	}
21251da177e4SLinus Torvalds 	retval = 0;
212689e004eaSLee Schermerhorn 
21271da177e4SLinus Torvalds out_nomem:
21284595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
21291da177e4SLinus Torvalds 	return retval;
21301da177e4SLinus Torvalds }
21311da177e4SLinus Torvalds 
21329b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
21331da177e4SLinus Torvalds {
21341da177e4SLinus Torvalds 	file_accessed(file);
21351da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
2136e496cf3dSKirill A. Shutemov 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2137f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2138f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
2139f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
2140f3f0e1d2SKirill A. Shutemov 	}
21411da177e4SLinus Torvalds 	return 0;
21421da177e4SLinus Torvalds }
21431da177e4SLinus Torvalds 
2144454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
214509208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
21461da177e4SLinus Torvalds {
21471da177e4SLinus Torvalds 	struct inode *inode;
21481da177e4SLinus Torvalds 	struct shmem_inode_info *info;
21491da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
21501da177e4SLinus Torvalds 
21515b04c689SPavel Emelyanov 	if (shmem_reserve_inode(sb))
21521da177e4SLinus Torvalds 		return NULL;
21531da177e4SLinus Torvalds 
21541da177e4SLinus Torvalds 	inode = new_inode(sb);
21551da177e4SLinus Torvalds 	if (inode) {
215685fe4025SChristoph Hellwig 		inode->i_ino = get_next_ino();
2157454abafeSDmitry Monakhov 		inode_init_owner(inode, dir, mode);
21581da177e4SLinus Torvalds 		inode->i_blocks = 0;
2159078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
216091828a40SDavid M. Grimes 		inode->i_generation = get_seconds();
21611da177e4SLinus Torvalds 		info = SHMEM_I(inode);
21621da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
21631da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
216440e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
21650b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2166779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
21671da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
216838f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
216972c04902SAl Viro 		cache_no_acl(inode);
21701da177e4SLinus Torvalds 
21711da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
21721da177e4SLinus Torvalds 		default:
217339f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
21741da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
21751da177e4SLinus Torvalds 			break;
21761da177e4SLinus Torvalds 		case S_IFREG:
217714fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
21781da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
21791da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
218071fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
218171fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
21821da177e4SLinus Torvalds 			break;
21831da177e4SLinus Torvalds 		case S_IFDIR:
2184d8c76e6fSDave Hansen 			inc_nlink(inode);
21851da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
21861da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
21871da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
21881da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
21891da177e4SLinus Torvalds 			break;
21901da177e4SLinus Torvalds 		case S_IFLNK:
21911da177e4SLinus Torvalds 			/*
21921da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
21931da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
21941da177e4SLinus Torvalds 			 */
219571fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
21961da177e4SLinus Torvalds 			break;
21971da177e4SLinus Torvalds 		}
21985b04c689SPavel Emelyanov 	} else
21995b04c689SPavel Emelyanov 		shmem_free_inode(sb);
22001da177e4SLinus Torvalds 	return inode;
22011da177e4SLinus Torvalds }
22021da177e4SLinus Torvalds 
22030cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping)
22040cd6144aSJohannes Weiner {
2205f8005451SHugh Dickins 	return mapping->a_ops == &shmem_aops;
22060cd6144aSJohannes Weiner }
22070cd6144aSJohannes Weiner 
22084c27fe4cSMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
22094c27fe4cSMike Rapoport 			   pmd_t *dst_pmd,
22104c27fe4cSMike Rapoport 			   struct vm_area_struct *dst_vma,
22114c27fe4cSMike Rapoport 			   unsigned long dst_addr,
22124c27fe4cSMike Rapoport 			   unsigned long src_addr,
22134c27fe4cSMike Rapoport 			   struct page **pagep)
22144c27fe4cSMike Rapoport {
22154c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
22164c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
22174c27fe4cSMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
22184c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
22194c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
22204c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
22214c27fe4cSMike Rapoport 	struct mem_cgroup *memcg;
22224c27fe4cSMike Rapoport 	spinlock_t *ptl;
22234c27fe4cSMike Rapoport 	void *page_kaddr;
22244c27fe4cSMike Rapoport 	struct page *page;
22254c27fe4cSMike Rapoport 	pte_t _dst_pte, *dst_pte;
22264c27fe4cSMike Rapoport 	int ret;
22274c27fe4cSMike Rapoport 
22284c27fe4cSMike Rapoport 	ret = -ENOMEM;
22294c27fe4cSMike Rapoport 	if (shmem_acct_block(info->flags, 1))
22304c27fe4cSMike Rapoport 		goto out;
22314c27fe4cSMike Rapoport 	if (sbinfo->max_blocks) {
22324c27fe4cSMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
22334c27fe4cSMike Rapoport 					   sbinfo->max_blocks) >= 0)
22344c27fe4cSMike Rapoport 			goto out_unacct_blocks;
22354c27fe4cSMike Rapoport 		percpu_counter_inc(&sbinfo->used_blocks);
22364c27fe4cSMike Rapoport 	}
22374c27fe4cSMike Rapoport 
2238cb658a45SAndrea Arcangeli 	if (!*pagep) {
22394c27fe4cSMike Rapoport 		page = shmem_alloc_page(gfp, info, pgoff);
22404c27fe4cSMike Rapoport 		if (!page)
22414c27fe4cSMike Rapoport 			goto out_dec_used_blocks;
22424c27fe4cSMike Rapoport 
22434c27fe4cSMike Rapoport 		page_kaddr = kmap_atomic(page);
22444c27fe4cSMike Rapoport 		ret = copy_from_user(page_kaddr, (const void __user *)src_addr,
22454c27fe4cSMike Rapoport 				     PAGE_SIZE);
22464c27fe4cSMike Rapoport 		kunmap_atomic(page_kaddr);
22474c27fe4cSMike Rapoport 
22484c27fe4cSMike Rapoport 		/* fallback to copy_from_user outside mmap_sem */
22494c27fe4cSMike Rapoport 		if (unlikely(ret)) {
22504c27fe4cSMike Rapoport 			*pagep = page;
2251cb658a45SAndrea Arcangeli 			if (sbinfo->max_blocks)
2252cb658a45SAndrea Arcangeli 				percpu_counter_add(&sbinfo->used_blocks, -1);
2253cb658a45SAndrea Arcangeli 			shmem_unacct_blocks(info->flags, 1);
22544c27fe4cSMike Rapoport 			/* don't free the page */
22554c27fe4cSMike Rapoport 			return -EFAULT;
22564c27fe4cSMike Rapoport 		}
22574c27fe4cSMike Rapoport 	} else {
22584c27fe4cSMike Rapoport 		page = *pagep;
22594c27fe4cSMike Rapoport 		*pagep = NULL;
22604c27fe4cSMike Rapoport 	}
22614c27fe4cSMike Rapoport 
22629cc90c66SAndrea Arcangeli 	VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
22639cc90c66SAndrea Arcangeli 	__SetPageLocked(page);
22649cc90c66SAndrea Arcangeli 	__SetPageSwapBacked(page);
2265a425d358SAndrea Arcangeli 	__SetPageUptodate(page);
22669cc90c66SAndrea Arcangeli 
22674c27fe4cSMike Rapoport 	ret = mem_cgroup_try_charge(page, dst_mm, gfp, &memcg, false);
22684c27fe4cSMike Rapoport 	if (ret)
22694c27fe4cSMike Rapoport 		goto out_release;
22704c27fe4cSMike Rapoport 
22714c27fe4cSMike Rapoport 	ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
22724c27fe4cSMike Rapoport 	if (!ret) {
22734c27fe4cSMike Rapoport 		ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL);
22744c27fe4cSMike Rapoport 		radix_tree_preload_end();
22754c27fe4cSMike Rapoport 	}
22764c27fe4cSMike Rapoport 	if (ret)
22774c27fe4cSMike Rapoport 		goto out_release_uncharge;
22784c27fe4cSMike Rapoport 
22794c27fe4cSMike Rapoport 	mem_cgroup_commit_charge(page, memcg, false, false);
22804c27fe4cSMike Rapoport 
22814c27fe4cSMike Rapoport 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
22824c27fe4cSMike Rapoport 	if (dst_vma->vm_flags & VM_WRITE)
22834c27fe4cSMike Rapoport 		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
22844c27fe4cSMike Rapoport 
22854c27fe4cSMike Rapoport 	ret = -EEXIST;
22864c27fe4cSMike Rapoport 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
22874c27fe4cSMike Rapoport 	if (!pte_none(*dst_pte))
22884c27fe4cSMike Rapoport 		goto out_release_uncharge_unlock;
22894c27fe4cSMike Rapoport 
22904c27fe4cSMike Rapoport 	lru_cache_add_anon(page);
22914c27fe4cSMike Rapoport 
22924c27fe4cSMike Rapoport 	spin_lock(&info->lock);
22934c27fe4cSMike Rapoport 	info->alloced++;
22944c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
22954c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
22964c27fe4cSMike Rapoport 	spin_unlock(&info->lock);
22974c27fe4cSMike Rapoport 
22984c27fe4cSMike Rapoport 	inc_mm_counter(dst_mm, mm_counter_file(page));
22994c27fe4cSMike Rapoport 	page_add_file_rmap(page, false);
23004c27fe4cSMike Rapoport 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
23014c27fe4cSMike Rapoport 
23024c27fe4cSMike Rapoport 	/* No need to invalidate - it was non-present before */
23034c27fe4cSMike Rapoport 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
23044c27fe4cSMike Rapoport 	unlock_page(page);
23054c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
23064c27fe4cSMike Rapoport 	ret = 0;
23074c27fe4cSMike Rapoport out:
23084c27fe4cSMike Rapoport 	return ret;
23094c27fe4cSMike Rapoport out_release_uncharge_unlock:
23104c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
23114c27fe4cSMike Rapoport out_release_uncharge:
23124c27fe4cSMike Rapoport 	mem_cgroup_cancel_charge(page, memcg, false);
23134c27fe4cSMike Rapoport out_release:
23149cc90c66SAndrea Arcangeli 	unlock_page(page);
23154c27fe4cSMike Rapoport 	put_page(page);
23164c27fe4cSMike Rapoport out_dec_used_blocks:
23174c27fe4cSMike Rapoport 	if (sbinfo->max_blocks)
23184c27fe4cSMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, -1);
23194c27fe4cSMike Rapoport out_unacct_blocks:
23204c27fe4cSMike Rapoport 	shmem_unacct_blocks(info->flags, 1);
23214c27fe4cSMike Rapoport 	goto out;
23224c27fe4cSMike Rapoport }
23234c27fe4cSMike Rapoport 
23241da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
232592e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
232669f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
23271da177e4SLinus Torvalds 
23286d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
23296d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
23306d9d88d0SJarkko Sakkinen #else
23316d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
23326d9d88d0SJarkko Sakkinen #endif
23336d9d88d0SJarkko Sakkinen 
23341da177e4SLinus Torvalds static int
2335800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
2336800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
2337800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
23381da177e4SLinus Torvalds {
2339800d15a5SNick Piggin 	struct inode *inode = mapping->host;
234040e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
234109cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
234240e041a2SDavid Herrmann 
234340e041a2SDavid Herrmann 	/* i_mutex is held by caller */
23443f472cc9SSteven Rostedt (VMware) 	if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
234540e041a2SDavid Herrmann 		if (info->seals & F_SEAL_WRITE)
234640e041a2SDavid Herrmann 			return -EPERM;
234740e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
234840e041a2SDavid Herrmann 			return -EPERM;
234940e041a2SDavid Herrmann 	}
235040e041a2SDavid Herrmann 
23519e18eb29SAndres Lagar-Cavilla 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2352800d15a5SNick Piggin }
2353800d15a5SNick Piggin 
2354800d15a5SNick Piggin static int
2355800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2356800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2357800d15a5SNick Piggin 			struct page *page, void *fsdata)
2358800d15a5SNick Piggin {
2359800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2360800d15a5SNick Piggin 
2361800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2362800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2363800d15a5SNick Piggin 
2364ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2365800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2366800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2367800d8c63SKirill A. Shutemov 			int i;
2368800d8c63SKirill A. Shutemov 
2369800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2370800d8c63SKirill A. Shutemov 				if (head + i == page)
2371800d8c63SKirill A. Shutemov 					continue;
2372800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2373800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2374800d8c63SKirill A. Shutemov 			}
2375800d8c63SKirill A. Shutemov 		}
237609cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
237709cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2378ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
237909cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2380ec9516fbSHugh Dickins 		}
2381800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2382ec9516fbSHugh Dickins 	}
2383d3602444SHugh Dickins 	set_page_dirty(page);
23846746aff7SWu Fengguang 	unlock_page(page);
238509cbfeafSKirill A. Shutemov 	put_page(page);
2386d3602444SHugh Dickins 
2387800d15a5SNick Piggin 	return copied;
23881da177e4SLinus Torvalds }
23891da177e4SLinus Torvalds 
23902ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
23911da177e4SLinus Torvalds {
23926e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
23936e58e79dSAl Viro 	struct inode *inode = file_inode(file);
23941da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
239541ffe5d5SHugh Dickins 	pgoff_t index;
239641ffe5d5SHugh Dickins 	unsigned long offset;
2397a0ee5ec5SHugh Dickins 	enum sgp_type sgp = SGP_READ;
2398f7c1d074SGeert Uytterhoeven 	int error = 0;
2399cb66a7a1SAl Viro 	ssize_t retval = 0;
24006e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2401a0ee5ec5SHugh Dickins 
2402a0ee5ec5SHugh Dickins 	/*
2403a0ee5ec5SHugh Dickins 	 * Might this read be for a stacking filesystem?  Then when reading
2404a0ee5ec5SHugh Dickins 	 * holes of a sparse file, we actually need to allocate those pages,
2405a0ee5ec5SHugh Dickins 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2406a0ee5ec5SHugh Dickins 	 */
2407777eda2cSAl Viro 	if (!iter_is_iovec(to))
240875edd345SHugh Dickins 		sgp = SGP_CACHE;
24091da177e4SLinus Torvalds 
241009cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
241109cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
24121da177e4SLinus Torvalds 
24131da177e4SLinus Torvalds 	for (;;) {
24141da177e4SLinus Torvalds 		struct page *page = NULL;
241541ffe5d5SHugh Dickins 		pgoff_t end_index;
241641ffe5d5SHugh Dickins 		unsigned long nr, ret;
24171da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
24181da177e4SLinus Torvalds 
241909cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
24201da177e4SLinus Torvalds 		if (index > end_index)
24211da177e4SLinus Torvalds 			break;
24221da177e4SLinus Torvalds 		if (index == end_index) {
242309cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
24241da177e4SLinus Torvalds 			if (nr <= offset)
24251da177e4SLinus Torvalds 				break;
24261da177e4SLinus Torvalds 		}
24271da177e4SLinus Torvalds 
24289e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, index, &page, sgp);
24296e58e79dSAl Viro 		if (error) {
24306e58e79dSAl Viro 			if (error == -EINVAL)
24316e58e79dSAl Viro 				error = 0;
24321da177e4SLinus Torvalds 			break;
24331da177e4SLinus Torvalds 		}
243475edd345SHugh Dickins 		if (page) {
243575edd345SHugh Dickins 			if (sgp == SGP_CACHE)
243675edd345SHugh Dickins 				set_page_dirty(page);
2437d3602444SHugh Dickins 			unlock_page(page);
243875edd345SHugh Dickins 		}
24391da177e4SLinus Torvalds 
24401da177e4SLinus Torvalds 		/*
24411da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
24421b1dcc1bSJes Sorensen 		 * are called without i_mutex protection against truncate
24431da177e4SLinus Torvalds 		 */
244409cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
24451da177e4SLinus Torvalds 		i_size = i_size_read(inode);
244609cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
24471da177e4SLinus Torvalds 		if (index == end_index) {
244809cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
24491da177e4SLinus Torvalds 			if (nr <= offset) {
24501da177e4SLinus Torvalds 				if (page)
245109cbfeafSKirill A. Shutemov 					put_page(page);
24521da177e4SLinus Torvalds 				break;
24531da177e4SLinus Torvalds 			}
24541da177e4SLinus Torvalds 		}
24551da177e4SLinus Torvalds 		nr -= offset;
24561da177e4SLinus Torvalds 
24571da177e4SLinus Torvalds 		if (page) {
24581da177e4SLinus Torvalds 			/*
24591da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
24601da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
24611da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
24621da177e4SLinus Torvalds 			 */
24631da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
24641da177e4SLinus Torvalds 				flush_dcache_page(page);
24651da177e4SLinus Torvalds 			/*
24661da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
24671da177e4SLinus Torvalds 			 */
24681da177e4SLinus Torvalds 			if (!offset)
24691da177e4SLinus Torvalds 				mark_page_accessed(page);
2470b5810039SNick Piggin 		} else {
24711da177e4SLinus Torvalds 			page = ZERO_PAGE(0);
247209cbfeafSKirill A. Shutemov 			get_page(page);
2473b5810039SNick Piggin 		}
24741da177e4SLinus Torvalds 
24751da177e4SLinus Torvalds 		/*
24761da177e4SLinus Torvalds 		 * Ok, we have the page, and it's up-to-date, so
24771da177e4SLinus Torvalds 		 * now we can copy it to user space...
24781da177e4SLinus Torvalds 		 */
24792ba5bbedSAl Viro 		ret = copy_page_to_iter(page, offset, nr, to);
24806e58e79dSAl Viro 		retval += ret;
24811da177e4SLinus Torvalds 		offset += ret;
248209cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
248309cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
24841da177e4SLinus Torvalds 
248509cbfeafSKirill A. Shutemov 		put_page(page);
24862ba5bbedSAl Viro 		if (!iov_iter_count(to))
24871da177e4SLinus Torvalds 			break;
24886e58e79dSAl Viro 		if (ret < nr) {
24896e58e79dSAl Viro 			error = -EFAULT;
24906e58e79dSAl Viro 			break;
24916e58e79dSAl Viro 		}
24921da177e4SLinus Torvalds 		cond_resched();
24931da177e4SLinus Torvalds 	}
24941da177e4SLinus Torvalds 
249509cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
24966e58e79dSAl Viro 	file_accessed(file);
24976e58e79dSAl Viro 	return retval ? retval : error;
24981da177e4SLinus Torvalds }
24991da177e4SLinus Torvalds 
2500220f2ac9SHugh Dickins /*
2501220f2ac9SHugh Dickins  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
2502220f2ac9SHugh Dickins  */
2503220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2504965c8e59SAndrew Morton 				    pgoff_t index, pgoff_t end, int whence)
2505220f2ac9SHugh Dickins {
2506220f2ac9SHugh Dickins 	struct page *page;
2507220f2ac9SHugh Dickins 	struct pagevec pvec;
2508220f2ac9SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
2509220f2ac9SHugh Dickins 	bool done = false;
2510220f2ac9SHugh Dickins 	int i;
2511220f2ac9SHugh Dickins 
2512220f2ac9SHugh Dickins 	pagevec_init(&pvec, 0);
2513220f2ac9SHugh Dickins 	pvec.nr = 1;		/* start small: we may be there already */
2514220f2ac9SHugh Dickins 	while (!done) {
25150cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
2516220f2ac9SHugh Dickins 					pvec.nr, pvec.pages, indices);
2517220f2ac9SHugh Dickins 		if (!pvec.nr) {
2518965c8e59SAndrew Morton 			if (whence == SEEK_DATA)
2519220f2ac9SHugh Dickins 				index = end;
2520220f2ac9SHugh Dickins 			break;
2521220f2ac9SHugh Dickins 		}
2522220f2ac9SHugh Dickins 		for (i = 0; i < pvec.nr; i++, index++) {
2523220f2ac9SHugh Dickins 			if (index < indices[i]) {
2524965c8e59SAndrew Morton 				if (whence == SEEK_HOLE) {
2525220f2ac9SHugh Dickins 					done = true;
2526220f2ac9SHugh Dickins 					break;
2527220f2ac9SHugh Dickins 				}
2528220f2ac9SHugh Dickins 				index = indices[i];
2529220f2ac9SHugh Dickins 			}
2530220f2ac9SHugh Dickins 			page = pvec.pages[i];
2531220f2ac9SHugh Dickins 			if (page && !radix_tree_exceptional_entry(page)) {
2532220f2ac9SHugh Dickins 				if (!PageUptodate(page))
2533220f2ac9SHugh Dickins 					page = NULL;
2534220f2ac9SHugh Dickins 			}
2535220f2ac9SHugh Dickins 			if (index >= end ||
2536965c8e59SAndrew Morton 			    (page && whence == SEEK_DATA) ||
2537965c8e59SAndrew Morton 			    (!page && whence == SEEK_HOLE)) {
2538220f2ac9SHugh Dickins 				done = true;
2539220f2ac9SHugh Dickins 				break;
2540220f2ac9SHugh Dickins 			}
2541220f2ac9SHugh Dickins 		}
25420cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
2543220f2ac9SHugh Dickins 		pagevec_release(&pvec);
2544220f2ac9SHugh Dickins 		pvec.nr = PAGEVEC_SIZE;
2545220f2ac9SHugh Dickins 		cond_resched();
2546220f2ac9SHugh Dickins 	}
2547220f2ac9SHugh Dickins 	return index;
2548220f2ac9SHugh Dickins }
2549220f2ac9SHugh Dickins 
2550965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2551220f2ac9SHugh Dickins {
2552220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2553220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2554220f2ac9SHugh Dickins 	pgoff_t start, end;
2555220f2ac9SHugh Dickins 	loff_t new_offset;
2556220f2ac9SHugh Dickins 
2557965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2558965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2559220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
25605955102cSAl Viro 	inode_lock(inode);
2561220f2ac9SHugh Dickins 	/* We're holding i_mutex so we can access i_size directly */
2562220f2ac9SHugh Dickins 
2563220f2ac9SHugh Dickins 	if (offset < 0)
2564220f2ac9SHugh Dickins 		offset = -EINVAL;
2565220f2ac9SHugh Dickins 	else if (offset >= inode->i_size)
2566220f2ac9SHugh Dickins 		offset = -ENXIO;
2567220f2ac9SHugh Dickins 	else {
256809cbfeafSKirill A. Shutemov 		start = offset >> PAGE_SHIFT;
256909cbfeafSKirill A. Shutemov 		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2570965c8e59SAndrew Morton 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
257109cbfeafSKirill A. Shutemov 		new_offset <<= PAGE_SHIFT;
2572220f2ac9SHugh Dickins 		if (new_offset > offset) {
2573220f2ac9SHugh Dickins 			if (new_offset < inode->i_size)
2574220f2ac9SHugh Dickins 				offset = new_offset;
2575965c8e59SAndrew Morton 			else if (whence == SEEK_DATA)
2576220f2ac9SHugh Dickins 				offset = -ENXIO;
2577220f2ac9SHugh Dickins 			else
2578220f2ac9SHugh Dickins 				offset = inode->i_size;
2579220f2ac9SHugh Dickins 		}
2580220f2ac9SHugh Dickins 	}
2581220f2ac9SHugh Dickins 
2582387aae6fSHugh Dickins 	if (offset >= 0)
258346a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
25845955102cSAl Viro 	inode_unlock(inode);
2585220f2ac9SHugh Dickins 	return offset;
2586220f2ac9SHugh Dickins }
2587220f2ac9SHugh Dickins 
258805f65b5cSDavid Herrmann /*
258905f65b5cSDavid Herrmann  * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
259005f65b5cSDavid Herrmann  * so reuse a tag which we firmly believe is never set or cleared on shmem.
259105f65b5cSDavid Herrmann  */
259205f65b5cSDavid Herrmann #define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
259305f65b5cSDavid Herrmann #define LAST_SCAN               4       /* about 150ms max */
259405f65b5cSDavid Herrmann 
259505f65b5cSDavid Herrmann static void shmem_tag_pins(struct address_space *mapping)
259605f65b5cSDavid Herrmann {
259705f65b5cSDavid Herrmann 	struct radix_tree_iter iter;
259805f65b5cSDavid Herrmann 	void **slot;
259905f65b5cSDavid Herrmann 	pgoff_t start;
260005f65b5cSDavid Herrmann 	struct page *page;
260105f65b5cSDavid Herrmann 
260205f65b5cSDavid Herrmann 	lru_add_drain();
260305f65b5cSDavid Herrmann 	start = 0;
260405f65b5cSDavid Herrmann 	rcu_read_lock();
260505f65b5cSDavid Herrmann 
260605f65b5cSDavid Herrmann 	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
260705f65b5cSDavid Herrmann 		page = radix_tree_deref_slot(slot);
260805f65b5cSDavid Herrmann 		if (!page || radix_tree_exception(page)) {
26092cf938aaSMatthew Wilcox 			if (radix_tree_deref_retry(page)) {
26102cf938aaSMatthew Wilcox 				slot = radix_tree_iter_retry(&iter);
26112cf938aaSMatthew Wilcox 				continue;
26122cf938aaSMatthew Wilcox 			}
261305f65b5cSDavid Herrmann 		} else if (page_count(page) - page_mapcount(page) > 1) {
261405f65b5cSDavid Herrmann 			spin_lock_irq(&mapping->tree_lock);
261505f65b5cSDavid Herrmann 			radix_tree_tag_set(&mapping->page_tree, iter.index,
261605f65b5cSDavid Herrmann 					   SHMEM_TAG_PINNED);
261705f65b5cSDavid Herrmann 			spin_unlock_irq(&mapping->tree_lock);
261805f65b5cSDavid Herrmann 		}
261905f65b5cSDavid Herrmann 
262005f65b5cSDavid Herrmann 		if (need_resched()) {
2621148deab2SMatthew Wilcox 			slot = radix_tree_iter_resume(slot, &iter);
262205f65b5cSDavid Herrmann 			cond_resched_rcu();
262305f65b5cSDavid Herrmann 		}
262405f65b5cSDavid Herrmann 	}
262505f65b5cSDavid Herrmann 	rcu_read_unlock();
262605f65b5cSDavid Herrmann }
262705f65b5cSDavid Herrmann 
262805f65b5cSDavid Herrmann /*
262905f65b5cSDavid Herrmann  * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
263005f65b5cSDavid Herrmann  * via get_user_pages(), drivers might have some pending I/O without any active
263105f65b5cSDavid Herrmann  * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
263205f65b5cSDavid Herrmann  * and see whether it has an elevated ref-count. If so, we tag them and wait for
263305f65b5cSDavid Herrmann  * them to be dropped.
263405f65b5cSDavid Herrmann  * The caller must guarantee that no new user will acquire writable references
263505f65b5cSDavid Herrmann  * to those pages to avoid races.
263605f65b5cSDavid Herrmann  */
263740e041a2SDavid Herrmann static int shmem_wait_for_pins(struct address_space *mapping)
263840e041a2SDavid Herrmann {
263905f65b5cSDavid Herrmann 	struct radix_tree_iter iter;
264005f65b5cSDavid Herrmann 	void **slot;
264105f65b5cSDavid Herrmann 	pgoff_t start;
264205f65b5cSDavid Herrmann 	struct page *page;
264305f65b5cSDavid Herrmann 	int error, scan;
264405f65b5cSDavid Herrmann 
264505f65b5cSDavid Herrmann 	shmem_tag_pins(mapping);
264605f65b5cSDavid Herrmann 
264705f65b5cSDavid Herrmann 	error = 0;
264805f65b5cSDavid Herrmann 	for (scan = 0; scan <= LAST_SCAN; scan++) {
264905f65b5cSDavid Herrmann 		if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
265005f65b5cSDavid Herrmann 			break;
265105f65b5cSDavid Herrmann 
265205f65b5cSDavid Herrmann 		if (!scan)
265305f65b5cSDavid Herrmann 			lru_add_drain_all();
265405f65b5cSDavid Herrmann 		else if (schedule_timeout_killable((HZ << scan) / 200))
265505f65b5cSDavid Herrmann 			scan = LAST_SCAN;
265605f65b5cSDavid Herrmann 
265705f65b5cSDavid Herrmann 		start = 0;
265805f65b5cSDavid Herrmann 		rcu_read_lock();
265905f65b5cSDavid Herrmann 		radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
266005f65b5cSDavid Herrmann 					   start, SHMEM_TAG_PINNED) {
266105f65b5cSDavid Herrmann 
266205f65b5cSDavid Herrmann 			page = radix_tree_deref_slot(slot);
266305f65b5cSDavid Herrmann 			if (radix_tree_exception(page)) {
26642cf938aaSMatthew Wilcox 				if (radix_tree_deref_retry(page)) {
26652cf938aaSMatthew Wilcox 					slot = radix_tree_iter_retry(&iter);
26662cf938aaSMatthew Wilcox 					continue;
26672cf938aaSMatthew Wilcox 				}
266805f65b5cSDavid Herrmann 
266905f65b5cSDavid Herrmann 				page = NULL;
267005f65b5cSDavid Herrmann 			}
267105f65b5cSDavid Herrmann 
267205f65b5cSDavid Herrmann 			if (page &&
267305f65b5cSDavid Herrmann 			    page_count(page) - page_mapcount(page) != 1) {
267405f65b5cSDavid Herrmann 				if (scan < LAST_SCAN)
267505f65b5cSDavid Herrmann 					goto continue_resched;
267605f65b5cSDavid Herrmann 
267705f65b5cSDavid Herrmann 				/*
267805f65b5cSDavid Herrmann 				 * On the last scan, we clean up all those tags
267905f65b5cSDavid Herrmann 				 * we inserted; but make a note that we still
268005f65b5cSDavid Herrmann 				 * found pages pinned.
268105f65b5cSDavid Herrmann 				 */
268205f65b5cSDavid Herrmann 				error = -EBUSY;
268305f65b5cSDavid Herrmann 			}
268405f65b5cSDavid Herrmann 
268505f65b5cSDavid Herrmann 			spin_lock_irq(&mapping->tree_lock);
268605f65b5cSDavid Herrmann 			radix_tree_tag_clear(&mapping->page_tree,
268705f65b5cSDavid Herrmann 					     iter.index, SHMEM_TAG_PINNED);
268805f65b5cSDavid Herrmann 			spin_unlock_irq(&mapping->tree_lock);
268905f65b5cSDavid Herrmann continue_resched:
269005f65b5cSDavid Herrmann 			if (need_resched()) {
2691148deab2SMatthew Wilcox 				slot = radix_tree_iter_resume(slot, &iter);
269205f65b5cSDavid Herrmann 				cond_resched_rcu();
269305f65b5cSDavid Herrmann 			}
269405f65b5cSDavid Herrmann 		}
269505f65b5cSDavid Herrmann 		rcu_read_unlock();
269605f65b5cSDavid Herrmann 	}
269705f65b5cSDavid Herrmann 
269805f65b5cSDavid Herrmann 	return error;
269940e041a2SDavid Herrmann }
270040e041a2SDavid Herrmann 
270140e041a2SDavid Herrmann #define F_ALL_SEALS (F_SEAL_SEAL | \
270240e041a2SDavid Herrmann 		     F_SEAL_SHRINK | \
270340e041a2SDavid Herrmann 		     F_SEAL_GROW | \
270440e041a2SDavid Herrmann 		     F_SEAL_WRITE)
270540e041a2SDavid Herrmann 
270640e041a2SDavid Herrmann int shmem_add_seals(struct file *file, unsigned int seals)
270740e041a2SDavid Herrmann {
270840e041a2SDavid Herrmann 	struct inode *inode = file_inode(file);
270940e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
271040e041a2SDavid Herrmann 	int error;
271140e041a2SDavid Herrmann 
271240e041a2SDavid Herrmann 	/*
271340e041a2SDavid Herrmann 	 * SEALING
271440e041a2SDavid Herrmann 	 * Sealing allows multiple parties to share a shmem-file but restrict
271540e041a2SDavid Herrmann 	 * access to a specific subset of file operations. Seals can only be
271640e041a2SDavid Herrmann 	 * added, but never removed. This way, mutually untrusted parties can
271740e041a2SDavid Herrmann 	 * share common memory regions with a well-defined policy. A malicious
271840e041a2SDavid Herrmann 	 * peer can thus never perform unwanted operations on a shared object.
271940e041a2SDavid Herrmann 	 *
272040e041a2SDavid Herrmann 	 * Seals are only supported on special shmem-files and always affect
272140e041a2SDavid Herrmann 	 * the whole underlying inode. Once a seal is set, it may prevent some
272240e041a2SDavid Herrmann 	 * kinds of access to the file. Currently, the following seals are
272340e041a2SDavid Herrmann 	 * defined:
272440e041a2SDavid Herrmann 	 *   SEAL_SEAL: Prevent further seals from being set on this file
272540e041a2SDavid Herrmann 	 *   SEAL_SHRINK: Prevent the file from shrinking
272640e041a2SDavid Herrmann 	 *   SEAL_GROW: Prevent the file from growing
272740e041a2SDavid Herrmann 	 *   SEAL_WRITE: Prevent write access to the file
272840e041a2SDavid Herrmann 	 *
272940e041a2SDavid Herrmann 	 * As we don't require any trust relationship between two parties, we
273040e041a2SDavid Herrmann 	 * must prevent seals from being removed. Therefore, sealing a file
273140e041a2SDavid Herrmann 	 * only adds a given set of seals to the file, it never touches
273240e041a2SDavid Herrmann 	 * existing seals. Furthermore, the "setting seals"-operation can be
273340e041a2SDavid Herrmann 	 * sealed itself, which basically prevents any further seal from being
273440e041a2SDavid Herrmann 	 * added.
273540e041a2SDavid Herrmann 	 *
273640e041a2SDavid Herrmann 	 * Semantics of sealing are only defined on volatile files. Only
273740e041a2SDavid Herrmann 	 * anonymous shmem files support sealing. More importantly, seals are
273840e041a2SDavid Herrmann 	 * never written to disk. Therefore, there's no plan to support it on
273940e041a2SDavid Herrmann 	 * other file types.
274040e041a2SDavid Herrmann 	 */
274140e041a2SDavid Herrmann 
274240e041a2SDavid Herrmann 	if (file->f_op != &shmem_file_operations)
274340e041a2SDavid Herrmann 		return -EINVAL;
274440e041a2SDavid Herrmann 	if (!(file->f_mode & FMODE_WRITE))
274540e041a2SDavid Herrmann 		return -EPERM;
274640e041a2SDavid Herrmann 	if (seals & ~(unsigned int)F_ALL_SEALS)
274740e041a2SDavid Herrmann 		return -EINVAL;
274840e041a2SDavid Herrmann 
27495955102cSAl Viro 	inode_lock(inode);
275040e041a2SDavid Herrmann 
275140e041a2SDavid Herrmann 	if (info->seals & F_SEAL_SEAL) {
275240e041a2SDavid Herrmann 		error = -EPERM;
275340e041a2SDavid Herrmann 		goto unlock;
275440e041a2SDavid Herrmann 	}
275540e041a2SDavid Herrmann 
275640e041a2SDavid Herrmann 	if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
275740e041a2SDavid Herrmann 		error = mapping_deny_writable(file->f_mapping);
275840e041a2SDavid Herrmann 		if (error)
275940e041a2SDavid Herrmann 			goto unlock;
276040e041a2SDavid Herrmann 
276140e041a2SDavid Herrmann 		error = shmem_wait_for_pins(file->f_mapping);
276240e041a2SDavid Herrmann 		if (error) {
276340e041a2SDavid Herrmann 			mapping_allow_writable(file->f_mapping);
276440e041a2SDavid Herrmann 			goto unlock;
276540e041a2SDavid Herrmann 		}
276640e041a2SDavid Herrmann 	}
276740e041a2SDavid Herrmann 
276840e041a2SDavid Herrmann 	info->seals |= seals;
276940e041a2SDavid Herrmann 	error = 0;
277040e041a2SDavid Herrmann 
277140e041a2SDavid Herrmann unlock:
27725955102cSAl Viro 	inode_unlock(inode);
277340e041a2SDavid Herrmann 	return error;
277440e041a2SDavid Herrmann }
277540e041a2SDavid Herrmann EXPORT_SYMBOL_GPL(shmem_add_seals);
277640e041a2SDavid Herrmann 
277740e041a2SDavid Herrmann int shmem_get_seals(struct file *file)
277840e041a2SDavid Herrmann {
277940e041a2SDavid Herrmann 	if (file->f_op != &shmem_file_operations)
278040e041a2SDavid Herrmann 		return -EINVAL;
278140e041a2SDavid Herrmann 
278240e041a2SDavid Herrmann 	return SHMEM_I(file_inode(file))->seals;
278340e041a2SDavid Herrmann }
278440e041a2SDavid Herrmann EXPORT_SYMBOL_GPL(shmem_get_seals);
278540e041a2SDavid Herrmann 
278640e041a2SDavid Herrmann long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
278740e041a2SDavid Herrmann {
278840e041a2SDavid Herrmann 	long error;
278940e041a2SDavid Herrmann 
279040e041a2SDavid Herrmann 	switch (cmd) {
279140e041a2SDavid Herrmann 	case F_ADD_SEALS:
279240e041a2SDavid Herrmann 		/* disallow upper 32bit */
279340e041a2SDavid Herrmann 		if (arg > UINT_MAX)
279440e041a2SDavid Herrmann 			return -EINVAL;
279540e041a2SDavid Herrmann 
279640e041a2SDavid Herrmann 		error = shmem_add_seals(file, arg);
279740e041a2SDavid Herrmann 		break;
279840e041a2SDavid Herrmann 	case F_GET_SEALS:
279940e041a2SDavid Herrmann 		error = shmem_get_seals(file);
280040e041a2SDavid Herrmann 		break;
280140e041a2SDavid Herrmann 	default:
280240e041a2SDavid Herrmann 		error = -EINVAL;
280340e041a2SDavid Herrmann 		break;
280440e041a2SDavid Herrmann 	}
280540e041a2SDavid Herrmann 
280640e041a2SDavid Herrmann 	return error;
280740e041a2SDavid Herrmann }
280840e041a2SDavid Herrmann 
280983e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
281083e4fa9cSHugh Dickins 							 loff_t len)
281183e4fa9cSHugh Dickins {
2812496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2813e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
281440e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
28151aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2816e2d12e22SHugh Dickins 	pgoff_t start, index, end;
2817e2d12e22SHugh Dickins 	int error;
281883e4fa9cSHugh Dickins 
281913ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
282013ace4d0SHugh Dickins 		return -EOPNOTSUPP;
282113ace4d0SHugh Dickins 
28225955102cSAl Viro 	inode_lock(inode);
282383e4fa9cSHugh Dickins 
282483e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
282583e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
282683e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
282783e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
28288e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
282983e4fa9cSHugh Dickins 
283040e041a2SDavid Herrmann 		/* protected by i_mutex */
283140e041a2SDavid Herrmann 		if (info->seals & F_SEAL_WRITE) {
283240e041a2SDavid Herrmann 			error = -EPERM;
283340e041a2SDavid Herrmann 			goto out;
283440e041a2SDavid Herrmann 		}
283540e041a2SDavid Herrmann 
28368e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2837f00cdc6dSHugh Dickins 		shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2838f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2839f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2840f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2841f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2842f00cdc6dSHugh Dickins 
284383e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
284483e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
284583e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
284683e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
284783e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
28488e205f77SHugh Dickins 
28498e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
28508e205f77SHugh Dickins 		inode->i_private = NULL;
28518e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
28522055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
28538e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
285483e4fa9cSHugh Dickins 		error = 0;
28558e205f77SHugh Dickins 		goto out;
285683e4fa9cSHugh Dickins 	}
285783e4fa9cSHugh Dickins 
2858e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2859e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2860e2d12e22SHugh Dickins 	if (error)
2861e2d12e22SHugh Dickins 		goto out;
2862e2d12e22SHugh Dickins 
286340e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
286440e041a2SDavid Herrmann 		error = -EPERM;
286540e041a2SDavid Herrmann 		goto out;
286640e041a2SDavid Herrmann 	}
286740e041a2SDavid Herrmann 
286809cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
286909cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2870e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2871e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2872e2d12e22SHugh Dickins 		error = -ENOSPC;
2873e2d12e22SHugh Dickins 		goto out;
2874e2d12e22SHugh Dickins 	}
2875e2d12e22SHugh Dickins 
28768e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
28771aac1400SHugh Dickins 	shmem_falloc.start = start;
28781aac1400SHugh Dickins 	shmem_falloc.next  = start;
28791aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
28801aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
28811aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
28821aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
28831aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
28841aac1400SHugh Dickins 
2885e2d12e22SHugh Dickins 	for (index = start; index < end; index++) {
2886e2d12e22SHugh Dickins 		struct page *page;
2887e2d12e22SHugh Dickins 
2888e2d12e22SHugh Dickins 		/*
2889e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2890e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2891e2d12e22SHugh Dickins 		 */
2892e2d12e22SHugh Dickins 		if (signal_pending(current))
2893e2d12e22SHugh Dickins 			error = -EINTR;
28941aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
28951aac1400SHugh Dickins 			error = -ENOMEM;
2896e2d12e22SHugh Dickins 		else
28979e18eb29SAndres Lagar-Cavilla 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2898e2d12e22SHugh Dickins 		if (error) {
28991635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
29007f556567SHugh Dickins 			if (index > start) {
29011635f6a7SHugh Dickins 				shmem_undo_range(inode,
290209cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2903b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
29047f556567SHugh Dickins 			}
29051aac1400SHugh Dickins 			goto undone;
2906e2d12e22SHugh Dickins 		}
2907e2d12e22SHugh Dickins 
2908e2d12e22SHugh Dickins 		/*
29091aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
29101aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
29111aac1400SHugh Dickins 		 */
29121aac1400SHugh Dickins 		shmem_falloc.next++;
29131aac1400SHugh Dickins 		if (!PageUptodate(page))
29141aac1400SHugh Dickins 			shmem_falloc.nr_falloced++;
29151aac1400SHugh Dickins 
29161aac1400SHugh Dickins 		/*
29171635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
29181635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
29191635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
2920e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
2921e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2922e2d12e22SHugh Dickins 		 */
2923e2d12e22SHugh Dickins 		set_page_dirty(page);
2924e2d12e22SHugh Dickins 		unlock_page(page);
292509cbfeafSKirill A. Shutemov 		put_page(page);
2926e2d12e22SHugh Dickins 		cond_resched();
2927e2d12e22SHugh Dickins 	}
2928e2d12e22SHugh Dickins 
2929e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2930e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
2931078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
29321aac1400SHugh Dickins undone:
29331aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
29341aac1400SHugh Dickins 	inode->i_private = NULL;
29351aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2936e2d12e22SHugh Dickins out:
29375955102cSAl Viro 	inode_unlock(inode);
293883e4fa9cSHugh Dickins 	return error;
293983e4fa9cSHugh Dickins }
294083e4fa9cSHugh Dickins 
2941726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
29421da177e4SLinus Torvalds {
2943726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
29441da177e4SLinus Torvalds 
29451da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
294609cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
29471da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
29480edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
29491da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
295041ffe5d5SHugh Dickins 		buf->f_bavail =
295141ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
295241ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
29530edd73b3SHugh Dickins 	}
29540edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
29551da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
29561da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
29571da177e4SLinus Torvalds 	}
29581da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
29591da177e4SLinus Torvalds 	return 0;
29601da177e4SLinus Torvalds }
29611da177e4SLinus Torvalds 
29621da177e4SLinus Torvalds /*
29631da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
29641da177e4SLinus Torvalds  */
29651da177e4SLinus Torvalds static int
29661a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
29671da177e4SLinus Torvalds {
29680b0a0806SHugh Dickins 	struct inode *inode;
29691da177e4SLinus Torvalds 	int error = -ENOSPC;
29701da177e4SLinus Torvalds 
2971454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
29721da177e4SLinus Torvalds 	if (inode) {
2973feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2974feda821eSChristoph Hellwig 		if (error)
2975feda821eSChristoph Hellwig 			goto out_iput;
29762a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
29779d8f13baSMimi Zohar 						     &dentry->d_name,
29786d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2979feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2980feda821eSChristoph Hellwig 			goto out_iput;
298137ec43cdSMimi Zohar 
2982718deb6bSAl Viro 		error = 0;
29831da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2984078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
29851da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
29861da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
29871da177e4SLinus Torvalds 	}
29881da177e4SLinus Torvalds 	return error;
2989feda821eSChristoph Hellwig out_iput:
2990feda821eSChristoph Hellwig 	iput(inode);
2991feda821eSChristoph Hellwig 	return error;
29921da177e4SLinus Torvalds }
29931da177e4SLinus Torvalds 
299460545d0dSAl Viro static int
299560545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
299660545d0dSAl Viro {
299760545d0dSAl Viro 	struct inode *inode;
299860545d0dSAl Viro 	int error = -ENOSPC;
299960545d0dSAl Viro 
300060545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
300160545d0dSAl Viro 	if (inode) {
300260545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
300360545d0dSAl Viro 						     NULL,
300460545d0dSAl Viro 						     shmem_initxattrs, NULL);
3005feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
3006feda821eSChristoph Hellwig 			goto out_iput;
3007feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
3008feda821eSChristoph Hellwig 		if (error)
3009feda821eSChristoph Hellwig 			goto out_iput;
301060545d0dSAl Viro 		d_tmpfile(dentry, inode);
301160545d0dSAl Viro 	}
301260545d0dSAl Viro 	return error;
3013feda821eSChristoph Hellwig out_iput:
3014feda821eSChristoph Hellwig 	iput(inode);
3015feda821eSChristoph Hellwig 	return error;
301660545d0dSAl Viro }
301760545d0dSAl Viro 
301818bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
30191da177e4SLinus Torvalds {
30201da177e4SLinus Torvalds 	int error;
30211da177e4SLinus Torvalds 
30221da177e4SLinus Torvalds 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
30231da177e4SLinus Torvalds 		return error;
3024d8c76e6fSDave Hansen 	inc_nlink(dir);
30251da177e4SLinus Torvalds 	return 0;
30261da177e4SLinus Torvalds }
30271da177e4SLinus Torvalds 
30284acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
3029ebfc3b49SAl Viro 		bool excl)
30301da177e4SLinus Torvalds {
30311da177e4SLinus Torvalds 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
30321da177e4SLinus Torvalds }
30331da177e4SLinus Torvalds 
30341da177e4SLinus Torvalds /*
30351da177e4SLinus Torvalds  * Link a file..
30361da177e4SLinus Torvalds  */
30371da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
30381da177e4SLinus Torvalds {
303975c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
30405b04c689SPavel Emelyanov 	int ret;
30411da177e4SLinus Torvalds 
30421da177e4SLinus Torvalds 	/*
30431da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
30441da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
30451da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
30461da177e4SLinus Torvalds 	 */
30475b04c689SPavel Emelyanov 	ret = shmem_reserve_inode(inode->i_sb);
30485b04c689SPavel Emelyanov 	if (ret)
30495b04c689SPavel Emelyanov 		goto out;
30501da177e4SLinus Torvalds 
30511da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3052078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3053d8c76e6fSDave Hansen 	inc_nlink(inode);
30547de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
30551da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
30561da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
30575b04c689SPavel Emelyanov out:
30585b04c689SPavel Emelyanov 	return ret;
30591da177e4SLinus Torvalds }
30601da177e4SLinus Torvalds 
30611da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
30621da177e4SLinus Torvalds {
306375c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
30641da177e4SLinus Torvalds 
30655b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
30665b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
30671da177e4SLinus Torvalds 
30681da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
3069078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
30709a53c3a7SDave Hansen 	drop_nlink(inode);
30711da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
30721da177e4SLinus Torvalds 	return 0;
30731da177e4SLinus Torvalds }
30741da177e4SLinus Torvalds 
30751da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
30761da177e4SLinus Torvalds {
30771da177e4SLinus Torvalds 	if (!simple_empty(dentry))
30781da177e4SLinus Torvalds 		return -ENOTEMPTY;
30791da177e4SLinus Torvalds 
308075c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
30819a53c3a7SDave Hansen 	drop_nlink(dir);
30821da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
30831da177e4SLinus Torvalds }
30841da177e4SLinus Torvalds 
308537456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
308637456771SMiklos Szeredi {
3087e36cb0b8SDavid Howells 	bool old_is_dir = d_is_dir(old_dentry);
3088e36cb0b8SDavid Howells 	bool new_is_dir = d_is_dir(new_dentry);
308937456771SMiklos Szeredi 
309037456771SMiklos Szeredi 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
309137456771SMiklos Szeredi 		if (old_is_dir) {
309237456771SMiklos Szeredi 			drop_nlink(old_dir);
309337456771SMiklos Szeredi 			inc_nlink(new_dir);
309437456771SMiklos Szeredi 		} else {
309537456771SMiklos Szeredi 			drop_nlink(new_dir);
309637456771SMiklos Szeredi 			inc_nlink(old_dir);
309737456771SMiklos Szeredi 		}
309837456771SMiklos Szeredi 	}
309937456771SMiklos Szeredi 	old_dir->i_ctime = old_dir->i_mtime =
310037456771SMiklos Szeredi 	new_dir->i_ctime = new_dir->i_mtime =
310175c3cfa8SDavid Howells 	d_inode(old_dentry)->i_ctime =
3102078cd827SDeepa Dinamani 	d_inode(new_dentry)->i_ctime = current_time(old_dir);
310337456771SMiklos Szeredi 
310437456771SMiklos Szeredi 	return 0;
310537456771SMiklos Szeredi }
310637456771SMiklos Szeredi 
310746fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
310846fdb794SMiklos Szeredi {
310946fdb794SMiklos Szeredi 	struct dentry *whiteout;
311046fdb794SMiklos Szeredi 	int error;
311146fdb794SMiklos Szeredi 
311246fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
311346fdb794SMiklos Szeredi 	if (!whiteout)
311446fdb794SMiklos Szeredi 		return -ENOMEM;
311546fdb794SMiklos Szeredi 
311646fdb794SMiklos Szeredi 	error = shmem_mknod(old_dir, whiteout,
311746fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
311846fdb794SMiklos Szeredi 	dput(whiteout);
311946fdb794SMiklos Szeredi 	if (error)
312046fdb794SMiklos Szeredi 		return error;
312146fdb794SMiklos Szeredi 
312246fdb794SMiklos Szeredi 	/*
312346fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
312446fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
312546fdb794SMiklos Szeredi 	 *
312646fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
312746fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
312846fdb794SMiklos Szeredi 	 */
312946fdb794SMiklos Szeredi 	d_rehash(whiteout);
313046fdb794SMiklos Szeredi 	return 0;
313146fdb794SMiklos Szeredi }
313246fdb794SMiklos Szeredi 
31331da177e4SLinus Torvalds /*
31341da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
31351da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
31361da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
31371da177e4SLinus Torvalds  * gets overwritten.
31381da177e4SLinus Torvalds  */
31393b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
31401da177e4SLinus Torvalds {
314175c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
31421da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
31431da177e4SLinus Torvalds 
314446fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
31453b69ff51SMiklos Szeredi 		return -EINVAL;
31463b69ff51SMiklos Szeredi 
314737456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
314837456771SMiklos Szeredi 		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
314937456771SMiklos Szeredi 
31501da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
31511da177e4SLinus Torvalds 		return -ENOTEMPTY;
31521da177e4SLinus Torvalds 
315346fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
315446fdb794SMiklos Szeredi 		int error;
315546fdb794SMiklos Szeredi 
315646fdb794SMiklos Szeredi 		error = shmem_whiteout(old_dir, old_dentry);
315746fdb794SMiklos Szeredi 		if (error)
315846fdb794SMiklos Szeredi 			return error;
315946fdb794SMiklos Szeredi 	}
316046fdb794SMiklos Szeredi 
316175c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
31621da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3163b928095bSMiklos Szeredi 		if (they_are_dirs) {
316475c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
31659a53c3a7SDave Hansen 			drop_nlink(old_dir);
3166b928095bSMiklos Szeredi 		}
31671da177e4SLinus Torvalds 	} else if (they_are_dirs) {
31689a53c3a7SDave Hansen 		drop_nlink(old_dir);
3169d8c76e6fSDave Hansen 		inc_nlink(new_dir);
31701da177e4SLinus Torvalds 	}
31711da177e4SLinus Torvalds 
31721da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
31731da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
31741da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
31751da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3176078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
31771da177e4SLinus Torvalds 	return 0;
31781da177e4SLinus Torvalds }
31791da177e4SLinus Torvalds 
31801da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
31811da177e4SLinus Torvalds {
31821da177e4SLinus Torvalds 	int error;
31831da177e4SLinus Torvalds 	int len;
31841da177e4SLinus Torvalds 	struct inode *inode;
31859276aad6SHugh Dickins 	struct page *page;
31861da177e4SLinus Torvalds 	struct shmem_inode_info *info;
31871da177e4SLinus Torvalds 
31881da177e4SLinus Torvalds 	len = strlen(symname) + 1;
318909cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
31901da177e4SLinus Torvalds 		return -ENAMETOOLONG;
31911da177e4SLinus Torvalds 
3192454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
31931da177e4SLinus Torvalds 	if (!inode)
31941da177e4SLinus Torvalds 		return -ENOSPC;
31951da177e4SLinus Torvalds 
31969d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
31976d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3198570bc1c2SStephen Smalley 	if (error) {
3199570bc1c2SStephen Smalley 		if (error != -EOPNOTSUPP) {
3200570bc1c2SStephen Smalley 			iput(inode);
3201570bc1c2SStephen Smalley 			return error;
3202570bc1c2SStephen Smalley 		}
3203570bc1c2SStephen Smalley 		error = 0;
3204570bc1c2SStephen Smalley 	}
3205570bc1c2SStephen Smalley 
32061da177e4SLinus Torvalds 	info = SHMEM_I(inode);
32071da177e4SLinus Torvalds 	inode->i_size = len-1;
320869f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
32093ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
32103ed47db3SAl Viro 		if (!inode->i_link) {
321169f07ec9SHugh Dickins 			iput(inode);
321269f07ec9SHugh Dickins 			return -ENOMEM;
321369f07ec9SHugh Dickins 		}
321469f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
32151da177e4SLinus Torvalds 	} else {
3216e8ecde25SAl Viro 		inode_nohighmem(inode);
32179e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
32181da177e4SLinus Torvalds 		if (error) {
32191da177e4SLinus Torvalds 			iput(inode);
32201da177e4SLinus Torvalds 			return error;
32211da177e4SLinus Torvalds 		}
322214fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
32231da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
322421fc61c7SAl Viro 		memcpy(page_address(page), symname, len);
3225ec9516fbSHugh Dickins 		SetPageUptodate(page);
32261da177e4SLinus Torvalds 		set_page_dirty(page);
32276746aff7SWu Fengguang 		unlock_page(page);
322809cbfeafSKirill A. Shutemov 		put_page(page);
32291da177e4SLinus Torvalds 	}
32301da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3231078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
32321da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
32331da177e4SLinus Torvalds 	dget(dentry);
32341da177e4SLinus Torvalds 	return 0;
32351da177e4SLinus Torvalds }
32361da177e4SLinus Torvalds 
3237fceef393SAl Viro static void shmem_put_link(void *arg)
3238fceef393SAl Viro {
3239fceef393SAl Viro 	mark_page_accessed(arg);
3240fceef393SAl Viro 	put_page(arg);
3241fceef393SAl Viro }
3242fceef393SAl Viro 
32436b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3244fceef393SAl Viro 				  struct inode *inode,
3245fceef393SAl Viro 				  struct delayed_call *done)
32461da177e4SLinus Torvalds {
32471da177e4SLinus Torvalds 	struct page *page = NULL;
32486b255391SAl Viro 	int error;
32496a6c9904SAl Viro 	if (!dentry) {
32506a6c9904SAl Viro 		page = find_get_page(inode->i_mapping, 0);
32516a6c9904SAl Viro 		if (!page)
32526b255391SAl Viro 			return ERR_PTR(-ECHILD);
32536a6c9904SAl Viro 		if (!PageUptodate(page)) {
32546a6c9904SAl Viro 			put_page(page);
32556a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
32566a6c9904SAl Viro 		}
32576a6c9904SAl Viro 	} else {
32589e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3259680baacbSAl Viro 		if (error)
3260680baacbSAl Viro 			return ERR_PTR(error);
3261d3602444SHugh Dickins 		unlock_page(page);
32621da177e4SLinus Torvalds 	}
3263fceef393SAl Viro 	set_delayed_call(done, shmem_put_link, page);
326421fc61c7SAl Viro 	return page_address(page);
32651da177e4SLinus Torvalds }
32661da177e4SLinus Torvalds 
3267b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3268b09e0fa4SEric Paris /*
3269b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3270b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3271b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3272b09e0fa4SEric Paris  * filesystem level, though.
3273b09e0fa4SEric Paris  */
3274b09e0fa4SEric Paris 
32756d9d88d0SJarkko Sakkinen /*
32766d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
32776d9d88d0SJarkko Sakkinen  */
32786d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
32796d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
32806d9d88d0SJarkko Sakkinen 			    void *fs_info)
32816d9d88d0SJarkko Sakkinen {
32826d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
32836d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
328438f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
32856d9d88d0SJarkko Sakkinen 	size_t len;
32866d9d88d0SJarkko Sakkinen 
32876d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
328838f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
32896d9d88d0SJarkko Sakkinen 		if (!new_xattr)
32906d9d88d0SJarkko Sakkinen 			return -ENOMEM;
32916d9d88d0SJarkko Sakkinen 
32926d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
32936d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
32946d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
32956d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
32966d9d88d0SJarkko Sakkinen 			kfree(new_xattr);
32976d9d88d0SJarkko Sakkinen 			return -ENOMEM;
32986d9d88d0SJarkko Sakkinen 		}
32996d9d88d0SJarkko Sakkinen 
33006d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
33016d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
33026d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
33036d9d88d0SJarkko Sakkinen 		       xattr->name, len);
33046d9d88d0SJarkko Sakkinen 
330538f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
33066d9d88d0SJarkko Sakkinen 	}
33076d9d88d0SJarkko Sakkinen 
33086d9d88d0SJarkko Sakkinen 	return 0;
33096d9d88d0SJarkko Sakkinen }
33106d9d88d0SJarkko Sakkinen 
3311aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3312b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3313b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3314aa7c5241SAndreas Gruenbacher {
3315b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3316aa7c5241SAndreas Gruenbacher 
3317aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3318aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3319aa7c5241SAndreas Gruenbacher }
3320aa7c5241SAndreas Gruenbacher 
3321aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
332259301226SAl Viro 				   struct dentry *unused, struct inode *inode,
332359301226SAl Viro 				   const char *name, const void *value,
332459301226SAl Viro 				   size_t size, int flags)
3325aa7c5241SAndreas Gruenbacher {
332659301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3327aa7c5241SAndreas Gruenbacher 
3328aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3329aa7c5241SAndreas Gruenbacher 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
3330aa7c5241SAndreas Gruenbacher }
3331aa7c5241SAndreas Gruenbacher 
3332aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3333aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3334aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3335aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3336aa7c5241SAndreas Gruenbacher };
3337aa7c5241SAndreas Gruenbacher 
3338aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3339aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3340aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3341aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3342aa7c5241SAndreas Gruenbacher };
3343aa7c5241SAndreas Gruenbacher 
3344b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3345b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3346feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3347feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3348b09e0fa4SEric Paris #endif
3349aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3350aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3351b09e0fa4SEric Paris 	NULL
3352b09e0fa4SEric Paris };
3353b09e0fa4SEric Paris 
3354b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3355b09e0fa4SEric Paris {
335675c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3357786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3358b09e0fa4SEric Paris }
3359b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3360b09e0fa4SEric Paris 
336169f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
33626b255391SAl Viro 	.get_link	= simple_get_link,
3363b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3364b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3365b09e0fa4SEric Paris #endif
33661da177e4SLinus Torvalds };
33671da177e4SLinus Torvalds 
336892e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
33696b255391SAl Viro 	.get_link	= shmem_get_link,
3370b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3371b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
337239f0247dSAndreas Gruenbacher #endif
3373b09e0fa4SEric Paris };
337439f0247dSAndreas Gruenbacher 
337591828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
337691828a40SDavid M. Grimes {
337791828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
337891828a40SDavid M. Grimes }
337991828a40SDavid M. Grimes 
338091828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
338191828a40SDavid M. Grimes {
338291828a40SDavid M. Grimes 	__u32 *fh = vfh;
338391828a40SDavid M. Grimes 	__u64 inum = fh[2];
338491828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
338591828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
338691828a40SDavid M. Grimes }
338791828a40SDavid M. Grimes 
3388480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3389480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
339091828a40SDavid M. Grimes {
339191828a40SDavid M. Grimes 	struct inode *inode;
3392480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
339335c2a7f4SHugh Dickins 	u64 inum;
339491828a40SDavid M. Grimes 
3395480b116cSChristoph Hellwig 	if (fh_len < 3)
3396480b116cSChristoph Hellwig 		return NULL;
3397480b116cSChristoph Hellwig 
339835c2a7f4SHugh Dickins 	inum = fid->raw[2];
339935c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
340035c2a7f4SHugh Dickins 
3401480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3402480b116cSChristoph Hellwig 			shmem_match, fid->raw);
340391828a40SDavid M. Grimes 	if (inode) {
3404480b116cSChristoph Hellwig 		dentry = d_find_alias(inode);
340591828a40SDavid M. Grimes 		iput(inode);
340691828a40SDavid M. Grimes 	}
340791828a40SDavid M. Grimes 
3408480b116cSChristoph Hellwig 	return dentry;
340991828a40SDavid M. Grimes }
341091828a40SDavid M. Grimes 
3411b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3412b0b0382bSAl Viro 				struct inode *parent)
341391828a40SDavid M. Grimes {
34145fe0c237SAneesh Kumar K.V 	if (*len < 3) {
34155fe0c237SAneesh Kumar K.V 		*len = 3;
341694e07a75SNamjae Jeon 		return FILEID_INVALID;
34175fe0c237SAneesh Kumar K.V 	}
341891828a40SDavid M. Grimes 
34191d3382cbSAl Viro 	if (inode_unhashed(inode)) {
342091828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
342191828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
342291828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
342391828a40SDavid M. Grimes 		 * to do it once
342491828a40SDavid M. Grimes 		 */
342591828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
342691828a40SDavid M. Grimes 		spin_lock(&lock);
34271d3382cbSAl Viro 		if (inode_unhashed(inode))
342891828a40SDavid M. Grimes 			__insert_inode_hash(inode,
342991828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
343091828a40SDavid M. Grimes 		spin_unlock(&lock);
343191828a40SDavid M. Grimes 	}
343291828a40SDavid M. Grimes 
343391828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
343491828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
343591828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
343691828a40SDavid M. Grimes 
343791828a40SDavid M. Grimes 	*len = 3;
343891828a40SDavid M. Grimes 	return 1;
343991828a40SDavid M. Grimes }
344091828a40SDavid M. Grimes 
344139655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
344291828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
344391828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3444480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
344591828a40SDavid M. Grimes };
344691828a40SDavid M. Grimes 
3447680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3448680d794bSakpm@linux-foundation.org 			       bool remount)
34491da177e4SLinus Torvalds {
34501da177e4SLinus Torvalds 	char *this_char, *value, *rest;
345149cd0a5cSGreg Thelen 	struct mempolicy *mpol = NULL;
34528751e039SEric W. Biederman 	uid_t uid;
34538751e039SEric W. Biederman 	gid_t gid;
34541da177e4SLinus Torvalds 
3455b00dc3adSHugh Dickins 	while (options != NULL) {
3456b00dc3adSHugh Dickins 		this_char = options;
3457b00dc3adSHugh Dickins 		for (;;) {
3458b00dc3adSHugh Dickins 			/*
3459b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3460b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3461b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3462b00dc3adSHugh Dickins 			 */
3463b00dc3adSHugh Dickins 			options = strchr(options, ',');
3464b00dc3adSHugh Dickins 			if (options == NULL)
3465b00dc3adSHugh Dickins 				break;
3466b00dc3adSHugh Dickins 			options++;
3467b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3468b00dc3adSHugh Dickins 				options[-1] = '\0';
3469b00dc3adSHugh Dickins 				break;
3470b00dc3adSHugh Dickins 			}
3471b00dc3adSHugh Dickins 		}
34721da177e4SLinus Torvalds 		if (!*this_char)
34731da177e4SLinus Torvalds 			continue;
34741da177e4SLinus Torvalds 		if ((value = strchr(this_char,'=')) != NULL) {
34751da177e4SLinus Torvalds 			*value++ = 0;
34761da177e4SLinus Torvalds 		} else {
34771170532bSJoe Perches 			pr_err("tmpfs: No value for mount option '%s'\n",
34781da177e4SLinus Torvalds 			       this_char);
347949cd0a5cSGreg Thelen 			goto error;
34801da177e4SLinus Torvalds 		}
34811da177e4SLinus Torvalds 
34821da177e4SLinus Torvalds 		if (!strcmp(this_char,"size")) {
34831da177e4SLinus Torvalds 			unsigned long long size;
34841da177e4SLinus Torvalds 			size = memparse(value,&rest);
34851da177e4SLinus Torvalds 			if (*rest == '%') {
34861da177e4SLinus Torvalds 				size <<= PAGE_SHIFT;
34871da177e4SLinus Torvalds 				size *= totalram_pages;
34881da177e4SLinus Torvalds 				do_div(size, 100);
34891da177e4SLinus Torvalds 				rest++;
34901da177e4SLinus Torvalds 			}
34911da177e4SLinus Torvalds 			if (*rest)
34921da177e4SLinus Torvalds 				goto bad_val;
3493680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks =
349409cbfeafSKirill A. Shutemov 				DIV_ROUND_UP(size, PAGE_SIZE);
34951da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"nr_blocks")) {
3496680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks = memparse(value, &rest);
34971da177e4SLinus Torvalds 			if (*rest)
34981da177e4SLinus Torvalds 				goto bad_val;
34991da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"nr_inodes")) {
3500680d794bSakpm@linux-foundation.org 			sbinfo->max_inodes = memparse(value, &rest);
35011da177e4SLinus Torvalds 			if (*rest)
35021da177e4SLinus Torvalds 				goto bad_val;
35031da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"mode")) {
3504680d794bSakpm@linux-foundation.org 			if (remount)
35051da177e4SLinus Torvalds 				continue;
3506680d794bSakpm@linux-foundation.org 			sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
35071da177e4SLinus Torvalds 			if (*rest)
35081da177e4SLinus Torvalds 				goto bad_val;
35091da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"uid")) {
3510680d794bSakpm@linux-foundation.org 			if (remount)
35111da177e4SLinus Torvalds 				continue;
35128751e039SEric W. Biederman 			uid = simple_strtoul(value, &rest, 0);
35131da177e4SLinus Torvalds 			if (*rest)
35141da177e4SLinus Torvalds 				goto bad_val;
35158751e039SEric W. Biederman 			sbinfo->uid = make_kuid(current_user_ns(), uid);
35168751e039SEric W. Biederman 			if (!uid_valid(sbinfo->uid))
35178751e039SEric W. Biederman 				goto bad_val;
35181da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"gid")) {
3519680d794bSakpm@linux-foundation.org 			if (remount)
35201da177e4SLinus Torvalds 				continue;
35218751e039SEric W. Biederman 			gid = simple_strtoul(value, &rest, 0);
35221da177e4SLinus Torvalds 			if (*rest)
35231da177e4SLinus Torvalds 				goto bad_val;
35248751e039SEric W. Biederman 			sbinfo->gid = make_kgid(current_user_ns(), gid);
35258751e039SEric W. Biederman 			if (!gid_valid(sbinfo->gid))
35268751e039SEric W. Biederman 				goto bad_val;
3527e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
35285a6e75f8SKirill A. Shutemov 		} else if (!strcmp(this_char, "huge")) {
35295a6e75f8SKirill A. Shutemov 			int huge;
35305a6e75f8SKirill A. Shutemov 			huge = shmem_parse_huge(value);
35315a6e75f8SKirill A. Shutemov 			if (huge < 0)
35325a6e75f8SKirill A. Shutemov 				goto bad_val;
35335a6e75f8SKirill A. Shutemov 			if (!has_transparent_hugepage() &&
35345a6e75f8SKirill A. Shutemov 					huge != SHMEM_HUGE_NEVER)
35355a6e75f8SKirill A. Shutemov 				goto bad_val;
35365a6e75f8SKirill A. Shutemov 			sbinfo->huge = huge;
35375a6e75f8SKirill A. Shutemov #endif
35385a6e75f8SKirill A. Shutemov #ifdef CONFIG_NUMA
35397339ff83SRobin Holt 		} else if (!strcmp(this_char,"mpol")) {
354049cd0a5cSGreg Thelen 			mpol_put(mpol);
354149cd0a5cSGreg Thelen 			mpol = NULL;
354249cd0a5cSGreg Thelen 			if (mpol_parse_str(value, &mpol))
35437339ff83SRobin Holt 				goto bad_val;
35445a6e75f8SKirill A. Shutemov #endif
35451da177e4SLinus Torvalds 		} else {
35461170532bSJoe Perches 			pr_err("tmpfs: Bad mount option %s\n", this_char);
354749cd0a5cSGreg Thelen 			goto error;
35481da177e4SLinus Torvalds 		}
35491da177e4SLinus Torvalds 	}
355049cd0a5cSGreg Thelen 	sbinfo->mpol = mpol;
35511da177e4SLinus Torvalds 	return 0;
35521da177e4SLinus Torvalds 
35531da177e4SLinus Torvalds bad_val:
35541170532bSJoe Perches 	pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
35551da177e4SLinus Torvalds 	       value, this_char);
355649cd0a5cSGreg Thelen error:
355749cd0a5cSGreg Thelen 	mpol_put(mpol);
35581da177e4SLinus Torvalds 	return 1;
35591da177e4SLinus Torvalds 
35601da177e4SLinus Torvalds }
35611da177e4SLinus Torvalds 
35621da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
35631da177e4SLinus Torvalds {
35641da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3565680d794bSakpm@linux-foundation.org 	struct shmem_sb_info config = *sbinfo;
35660edd73b3SHugh Dickins 	unsigned long inodes;
35670edd73b3SHugh Dickins 	int error = -EINVAL;
35681da177e4SLinus Torvalds 
35695f00110fSGreg Thelen 	config.mpol = NULL;
3570680d794bSakpm@linux-foundation.org 	if (shmem_parse_options(data, &config, true))
35710edd73b3SHugh Dickins 		return error;
35720edd73b3SHugh Dickins 
35730edd73b3SHugh Dickins 	spin_lock(&sbinfo->stat_lock);
35740edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
35757e496299STim Chen 	if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
35760edd73b3SHugh Dickins 		goto out;
3577680d794bSakpm@linux-foundation.org 	if (config.max_inodes < inodes)
35780edd73b3SHugh Dickins 		goto out;
35790edd73b3SHugh Dickins 	/*
358054af6042SHugh Dickins 	 * Those tests disallow limited->unlimited while any are in use;
35810edd73b3SHugh Dickins 	 * but we must separately disallow unlimited->limited, because
35820edd73b3SHugh Dickins 	 * in that case we have no record of how much is already in use.
35830edd73b3SHugh Dickins 	 */
3584680d794bSakpm@linux-foundation.org 	if (config.max_blocks && !sbinfo->max_blocks)
35850edd73b3SHugh Dickins 		goto out;
3586680d794bSakpm@linux-foundation.org 	if (config.max_inodes && !sbinfo->max_inodes)
35870edd73b3SHugh Dickins 		goto out;
35880edd73b3SHugh Dickins 
35890edd73b3SHugh Dickins 	error = 0;
35905a6e75f8SKirill A. Shutemov 	sbinfo->huge = config.huge;
3591680d794bSakpm@linux-foundation.org 	sbinfo->max_blocks  = config.max_blocks;
3592680d794bSakpm@linux-foundation.org 	sbinfo->max_inodes  = config.max_inodes;
3593680d794bSakpm@linux-foundation.org 	sbinfo->free_inodes = config.max_inodes - inodes;
359471fe804bSLee Schermerhorn 
35955f00110fSGreg Thelen 	/*
35965f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
35975f00110fSGreg Thelen 	 */
35985f00110fSGreg Thelen 	if (config.mpol) {
359971fe804bSLee Schermerhorn 		mpol_put(sbinfo->mpol);
360071fe804bSLee Schermerhorn 		sbinfo->mpol = config.mpol;	/* transfers initial ref */
36015f00110fSGreg Thelen 	}
36020edd73b3SHugh Dickins out:
36030edd73b3SHugh Dickins 	spin_unlock(&sbinfo->stat_lock);
36040edd73b3SHugh Dickins 	return error;
36051da177e4SLinus Torvalds }
3606680d794bSakpm@linux-foundation.org 
360734c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3608680d794bSakpm@linux-foundation.org {
360934c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3610680d794bSakpm@linux-foundation.org 
3611680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3612680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
361309cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3614680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3615680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3616680d794bSakpm@linux-foundation.org 	if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
361709208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
36188751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
36198751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
36208751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
36218751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
36228751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
36238751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3624e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
36255a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
36265a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
36275a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
36285a6e75f8SKirill A. Shutemov #endif
362971fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3630680d794bSakpm@linux-foundation.org 	return 0;
3631680d794bSakpm@linux-foundation.org }
36329183df25SDavid Herrmann 
36339183df25SDavid Herrmann #define MFD_NAME_PREFIX "memfd:"
36349183df25SDavid Herrmann #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
36359183df25SDavid Herrmann #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
36369183df25SDavid Herrmann 
36379183df25SDavid Herrmann #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
36389183df25SDavid Herrmann 
36399183df25SDavid Herrmann SYSCALL_DEFINE2(memfd_create,
36409183df25SDavid Herrmann 		const char __user *, uname,
36419183df25SDavid Herrmann 		unsigned int, flags)
36429183df25SDavid Herrmann {
36439183df25SDavid Herrmann 	struct shmem_inode_info *info;
36449183df25SDavid Herrmann 	struct file *file;
36459183df25SDavid Herrmann 	int fd, error;
36469183df25SDavid Herrmann 	char *name;
36479183df25SDavid Herrmann 	long len;
36489183df25SDavid Herrmann 
36499183df25SDavid Herrmann 	if (flags & ~(unsigned int)MFD_ALL_FLAGS)
36509183df25SDavid Herrmann 		return -EINVAL;
36519183df25SDavid Herrmann 
36529183df25SDavid Herrmann 	/* length includes terminating zero */
36539183df25SDavid Herrmann 	len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
36549183df25SDavid Herrmann 	if (len <= 0)
36559183df25SDavid Herrmann 		return -EFAULT;
36569183df25SDavid Herrmann 	if (len > MFD_NAME_MAX_LEN + 1)
36579183df25SDavid Herrmann 		return -EINVAL;
36589183df25SDavid Herrmann 
36599183df25SDavid Herrmann 	name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
36609183df25SDavid Herrmann 	if (!name)
36619183df25SDavid Herrmann 		return -ENOMEM;
36629183df25SDavid Herrmann 
36639183df25SDavid Herrmann 	strcpy(name, MFD_NAME_PREFIX);
36649183df25SDavid Herrmann 	if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
36659183df25SDavid Herrmann 		error = -EFAULT;
36669183df25SDavid Herrmann 		goto err_name;
36679183df25SDavid Herrmann 	}
36689183df25SDavid Herrmann 
36699183df25SDavid Herrmann 	/* terminating-zero may have changed after strnlen_user() returned */
36709183df25SDavid Herrmann 	if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
36719183df25SDavid Herrmann 		error = -EFAULT;
36729183df25SDavid Herrmann 		goto err_name;
36739183df25SDavid Herrmann 	}
36749183df25SDavid Herrmann 
36759183df25SDavid Herrmann 	fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
36769183df25SDavid Herrmann 	if (fd < 0) {
36779183df25SDavid Herrmann 		error = fd;
36789183df25SDavid Herrmann 		goto err_name;
36799183df25SDavid Herrmann 	}
36809183df25SDavid Herrmann 
36819183df25SDavid Herrmann 	file = shmem_file_setup(name, 0, VM_NORESERVE);
36829183df25SDavid Herrmann 	if (IS_ERR(file)) {
36839183df25SDavid Herrmann 		error = PTR_ERR(file);
36849183df25SDavid Herrmann 		goto err_fd;
36859183df25SDavid Herrmann 	}
36869183df25SDavid Herrmann 	info = SHMEM_I(file_inode(file));
36879183df25SDavid Herrmann 	file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
36889183df25SDavid Herrmann 	file->f_flags |= O_RDWR | O_LARGEFILE;
36899183df25SDavid Herrmann 	if (flags & MFD_ALLOW_SEALING)
36909183df25SDavid Herrmann 		info->seals &= ~F_SEAL_SEAL;
36919183df25SDavid Herrmann 
36929183df25SDavid Herrmann 	fd_install(fd, file);
36939183df25SDavid Herrmann 	kfree(name);
36949183df25SDavid Herrmann 	return fd;
36959183df25SDavid Herrmann 
36969183df25SDavid Herrmann err_fd:
36979183df25SDavid Herrmann 	put_unused_fd(fd);
36989183df25SDavid Herrmann err_name:
36999183df25SDavid Herrmann 	kfree(name);
37009183df25SDavid Herrmann 	return error;
37019183df25SDavid Herrmann }
37029183df25SDavid Herrmann 
3703680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
37041da177e4SLinus Torvalds 
37051da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
37061da177e4SLinus Torvalds {
3707602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3708602586a8SHugh Dickins 
3709602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
371049cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3711602586a8SHugh Dickins 	kfree(sbinfo);
37121da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
37131da177e4SLinus Torvalds }
37141da177e4SLinus Torvalds 
37152b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent)
37161da177e4SLinus Torvalds {
37171da177e4SLinus Torvalds 	struct inode *inode;
37180edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3719680d794bSakpm@linux-foundation.org 	int err = -ENOMEM;
3720680d794bSakpm@linux-foundation.org 
3721680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3722425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3723680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3724680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3725680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3726680d794bSakpm@linux-foundation.org 
3727680d794bSakpm@linux-foundation.org 	sbinfo->mode = S_IRWXUGO | S_ISVTX;
372876aac0e9SDavid Howells 	sbinfo->uid = current_fsuid();
372976aac0e9SDavid Howells 	sbinfo->gid = current_fsgid();
3730680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
37311da177e4SLinus Torvalds 
37320edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
37331da177e4SLinus Torvalds 	/*
37341da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
37351da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
37361da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
37371da177e4SLinus Torvalds 	 */
3738ca4e0519SAl Viro 	if (!(sb->s_flags & MS_KERNMOUNT)) {
3739680d794bSakpm@linux-foundation.org 		sbinfo->max_blocks = shmem_default_max_blocks();
3740680d794bSakpm@linux-foundation.org 		sbinfo->max_inodes = shmem_default_max_inodes();
3741680d794bSakpm@linux-foundation.org 		if (shmem_parse_options(data, sbinfo, false)) {
3742680d794bSakpm@linux-foundation.org 			err = -EINVAL;
3743680d794bSakpm@linux-foundation.org 			goto failed;
3744680d794bSakpm@linux-foundation.org 		}
3745ca4e0519SAl Viro 	} else {
3746ca4e0519SAl Viro 		sb->s_flags |= MS_NOUSER;
37471da177e4SLinus Torvalds 	}
374891828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
37492f6e38f3SHugh Dickins 	sb->s_flags |= MS_NOSEC;
37500edd73b3SHugh Dickins #else
37510edd73b3SHugh Dickins 	sb->s_flags |= MS_NOUSER;
37520edd73b3SHugh Dickins #endif
37531da177e4SLinus Torvalds 
37541da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
3755908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3756602586a8SHugh Dickins 		goto failed;
3757680d794bSakpm@linux-foundation.org 	sbinfo->free_inodes = sbinfo->max_inodes;
3758779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3759779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
37601da177e4SLinus Torvalds 
3761285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
376209cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
376309cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
37641da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
37651da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3766cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3767b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
376839f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3769b09e0fa4SEric Paris #endif
3770b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
377139f0247dSAndreas Gruenbacher 	sb->s_flags |= MS_POSIXACL;
377239f0247dSAndreas Gruenbacher #endif
37732b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
37740edd73b3SHugh Dickins 
3775454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
37761da177e4SLinus Torvalds 	if (!inode)
37771da177e4SLinus Torvalds 		goto failed;
3778680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3779680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3780318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3781318ceed0SAl Viro 	if (!sb->s_root)
378248fde701SAl Viro 		goto failed;
37831da177e4SLinus Torvalds 	return 0;
37841da177e4SLinus Torvalds 
37851da177e4SLinus Torvalds failed:
37861da177e4SLinus Torvalds 	shmem_put_super(sb);
37871da177e4SLinus Torvalds 	return err;
37881da177e4SLinus Torvalds }
37891da177e4SLinus Torvalds 
3790fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
37911da177e4SLinus Torvalds 
37921da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
37931da177e4SLinus Torvalds {
379441ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
379541ffe5d5SHugh Dickins 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
379641ffe5d5SHugh Dickins 	if (!info)
37971da177e4SLinus Torvalds 		return NULL;
379841ffe5d5SHugh Dickins 	return &info->vfs_inode;
37991da177e4SLinus Torvalds }
38001da177e4SLinus Torvalds 
380141ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head)
3802fa0d7e3dSNick Piggin {
3803fa0d7e3dSNick Piggin 	struct inode *inode = container_of(head, struct inode, i_rcu);
380484e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
38053ed47db3SAl Viro 		kfree(inode->i_link);
3806fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3807fa0d7e3dSNick Piggin }
3808fa0d7e3dSNick Piggin 
38091da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
38101da177e4SLinus Torvalds {
381109208d15SAl Viro 	if (S_ISREG(inode->i_mode))
38121da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
381341ffe5d5SHugh Dickins 	call_rcu(&inode->i_rcu, shmem_destroy_callback);
38141da177e4SLinus Torvalds }
38151da177e4SLinus Torvalds 
381641ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
38171da177e4SLinus Torvalds {
381841ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
381941ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
38201da177e4SLinus Torvalds }
38211da177e4SLinus Torvalds 
382241ffe5d5SHugh Dickins static int shmem_init_inodecache(void)
38231da177e4SLinus Torvalds {
38241da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
38251da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
38265d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
38271da177e4SLinus Torvalds 	return 0;
38281da177e4SLinus Torvalds }
38291da177e4SLinus Torvalds 
383041ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
38311da177e4SLinus Torvalds {
38321a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
38331da177e4SLinus Torvalds }
38341da177e4SLinus Torvalds 
3835f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = {
38361da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
383776719325SKen Chen 	.set_page_dirty	= __set_page_dirty_no_writeback,
38381da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3839800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3840800d15a5SNick Piggin 	.write_end	= shmem_write_end,
38411da177e4SLinus Torvalds #endif
38421c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
3843304dbdb7SLee Schermerhorn 	.migratepage	= migrate_page,
38441c93923cSAndrew Morton #endif
3845aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
38461da177e4SLinus Torvalds };
38471da177e4SLinus Torvalds 
384815ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
38491da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3850c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
38511da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3852220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
38532ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
38548174202bSAl Viro 	.write_iter	= generic_file_write_iter,
38551b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
385682c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3857f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
385883e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
38591da177e4SLinus Torvalds #endif
38601da177e4SLinus Torvalds };
38611da177e4SLinus Torvalds 
386292e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
386344a30220SYu Zhao 	.getattr	= shmem_getattr,
386494c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3865b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3866b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3867feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3868b09e0fa4SEric Paris #endif
38691da177e4SLinus Torvalds };
38701da177e4SLinus Torvalds 
387192e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
38721da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
38731da177e4SLinus Torvalds 	.create		= shmem_create,
38741da177e4SLinus Torvalds 	.lookup		= simple_lookup,
38751da177e4SLinus Torvalds 	.link		= shmem_link,
38761da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
38771da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
38781da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
38791da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
38801da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
38812773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
388260545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
38831da177e4SLinus Torvalds #endif
3884b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3885b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3886b09e0fa4SEric Paris #endif
388739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
388894c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3889feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
389039f0247dSAndreas Gruenbacher #endif
389139f0247dSAndreas Gruenbacher };
389239f0247dSAndreas Gruenbacher 
389392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3894b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3895b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3896b09e0fa4SEric Paris #endif
389739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
389894c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3899feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
390039f0247dSAndreas Gruenbacher #endif
39011da177e4SLinus Torvalds };
39021da177e4SLinus Torvalds 
3903759b9775SHugh Dickins static const struct super_operations shmem_ops = {
39041da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
39051da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
39061da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
39071da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
39081da177e4SLinus Torvalds 	.remount_fs	= shmem_remount_fs,
3909680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
39101da177e4SLinus Torvalds #endif
39111f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
39121da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
39131da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3914779750d2SKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3915779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3916779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3917779750d2SKirill A. Shutemov #endif
39181da177e4SLinus Torvalds };
39191da177e4SLinus Torvalds 
3920f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
392154cb8821SNick Piggin 	.fault		= shmem_fault,
3922d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
39231da177e4SLinus Torvalds #ifdef CONFIG_NUMA
39241da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
39251da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
39261da177e4SLinus Torvalds #endif
39271da177e4SLinus Torvalds };
39281da177e4SLinus Torvalds 
39293c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type,
39303c26ff6eSAl Viro 	int flags, const char *dev_name, void *data)
39311da177e4SLinus Torvalds {
39323c26ff6eSAl Viro 	return mount_nodev(fs_type, flags, data, shmem_fill_super);
39331da177e4SLinus Torvalds }
39341da177e4SLinus Torvalds 
393541ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
39361da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
39371da177e4SLinus Torvalds 	.name		= "tmpfs",
39383c26ff6eSAl Viro 	.mount		= shmem_mount,
39391da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
39402b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
39411da177e4SLinus Torvalds };
39421da177e4SLinus Torvalds 
394341ffe5d5SHugh Dickins int __init shmem_init(void)
39441da177e4SLinus Torvalds {
39451da177e4SLinus Torvalds 	int error;
39461da177e4SLinus Torvalds 
394716203a7aSRob Landley 	/* If rootfs called this, don't re-init */
394816203a7aSRob Landley 	if (shmem_inode_cachep)
394916203a7aSRob Landley 		return 0;
395016203a7aSRob Landley 
395141ffe5d5SHugh Dickins 	error = shmem_init_inodecache();
39521da177e4SLinus Torvalds 	if (error)
39531da177e4SLinus Torvalds 		goto out3;
39541da177e4SLinus Torvalds 
395541ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
39561da177e4SLinus Torvalds 	if (error) {
39571170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
39581da177e4SLinus Torvalds 		goto out2;
39591da177e4SLinus Torvalds 	}
396095dc112aSGreg Kroah-Hartman 
3961ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
39621da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
39631da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
39641170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
39651da177e4SLinus Torvalds 		goto out1;
39661da177e4SLinus Torvalds 	}
39675a6e75f8SKirill A. Shutemov 
3968e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3969435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
39705a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
39715a6e75f8SKirill A. Shutemov 	else
39725a6e75f8SKirill A. Shutemov 		shmem_huge = 0; /* just in case it was patched */
39735a6e75f8SKirill A. Shutemov #endif
39741da177e4SLinus Torvalds 	return 0;
39751da177e4SLinus Torvalds 
39761da177e4SLinus Torvalds out1:
397741ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
39781da177e4SLinus Torvalds out2:
397941ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
39801da177e4SLinus Torvalds out3:
39811da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
39821da177e4SLinus Torvalds 	return error;
39831da177e4SLinus Torvalds }
3984853ac43aSMatt Mackall 
3985e496cf3dSKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
39865a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
39875a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, char *buf)
39885a6e75f8SKirill A. Shutemov {
39895a6e75f8SKirill A. Shutemov 	int values[] = {
39905a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
39915a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
39925a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
39935a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
39945a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
39955a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
39965a6e75f8SKirill A. Shutemov 	};
39975a6e75f8SKirill A. Shutemov 	int i, count;
39985a6e75f8SKirill A. Shutemov 
39995a6e75f8SKirill A. Shutemov 	for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
40005a6e75f8SKirill A. Shutemov 		const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
40015a6e75f8SKirill A. Shutemov 
40025a6e75f8SKirill A. Shutemov 		count += sprintf(buf + count, fmt,
40035a6e75f8SKirill A. Shutemov 				shmem_format_huge(values[i]));
40045a6e75f8SKirill A. Shutemov 	}
40055a6e75f8SKirill A. Shutemov 	buf[count - 1] = '\n';
40065a6e75f8SKirill A. Shutemov 	return count;
40075a6e75f8SKirill A. Shutemov }
40085a6e75f8SKirill A. Shutemov 
40095a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
40105a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
40115a6e75f8SKirill A. Shutemov {
40125a6e75f8SKirill A. Shutemov 	char tmp[16];
40135a6e75f8SKirill A. Shutemov 	int huge;
40145a6e75f8SKirill A. Shutemov 
40155a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
40165a6e75f8SKirill A. Shutemov 		return -EINVAL;
40175a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
40185a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
40195a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
40205a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
40215a6e75f8SKirill A. Shutemov 
40225a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
40235a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
40245a6e75f8SKirill A. Shutemov 		return -EINVAL;
40255a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
40265a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
40275a6e75f8SKirill A. Shutemov 		return -EINVAL;
40285a6e75f8SKirill A. Shutemov 
40295a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
4030435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
40315a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
40325a6e75f8SKirill A. Shutemov 	return count;
40335a6e75f8SKirill A. Shutemov }
40345a6e75f8SKirill A. Shutemov 
40355a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr =
40365a6e75f8SKirill A. Shutemov 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
40373b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
4038f3f0e1d2SKirill A. Shutemov 
40393b33719cSArnd Bergmann #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
4040f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma)
4041f3f0e1d2SKirill A. Shutemov {
4042f3f0e1d2SKirill A. Shutemov 	struct inode *inode = file_inode(vma->vm_file);
4043f3f0e1d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4044f3f0e1d2SKirill A. Shutemov 	loff_t i_size;
4045f3f0e1d2SKirill A. Shutemov 	pgoff_t off;
4046f3f0e1d2SKirill A. Shutemov 
4047f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
4048f3f0e1d2SKirill A. Shutemov 		return true;
4049f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY)
4050f3f0e1d2SKirill A. Shutemov 		return false;
4051f3f0e1d2SKirill A. Shutemov 	switch (sbinfo->huge) {
4052f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_NEVER:
4053f3f0e1d2SKirill A. Shutemov 			return false;
4054f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ALWAYS:
4055f3f0e1d2SKirill A. Shutemov 			return true;
4056f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_WITHIN_SIZE:
4057f3f0e1d2SKirill A. Shutemov 			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4058f3f0e1d2SKirill A. Shutemov 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
4059f3f0e1d2SKirill A. Shutemov 			if (i_size >= HPAGE_PMD_SIZE &&
4060f3f0e1d2SKirill A. Shutemov 					i_size >> PAGE_SHIFT >= off)
4061f3f0e1d2SKirill A. Shutemov 				return true;
4062f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ADVISE:
4063f3f0e1d2SKirill A. Shutemov 			/* TODO: implement fadvise() hints */
4064f3f0e1d2SKirill A. Shutemov 			return (vma->vm_flags & VM_HUGEPAGE);
4065f3f0e1d2SKirill A. Shutemov 		default:
4066f3f0e1d2SKirill A. Shutemov 			VM_BUG_ON(1);
4067f3f0e1d2SKirill A. Shutemov 			return false;
4068f3f0e1d2SKirill A. Shutemov 	}
4069f3f0e1d2SKirill A. Shutemov }
40703b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
40715a6e75f8SKirill A. Shutemov 
4072853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
4073853ac43aSMatt Mackall 
4074853ac43aSMatt Mackall /*
4075853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4076853ac43aSMatt Mackall  *
4077853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
4078853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
4079853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
4080853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
4081853ac43aSMatt Mackall  */
4082853ac43aSMatt Mackall 
408341ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
4084853ac43aSMatt Mackall 	.name		= "tmpfs",
40853c26ff6eSAl Viro 	.mount		= ramfs_mount,
4086853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
40872b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
4088853ac43aSMatt Mackall };
4089853ac43aSMatt Mackall 
409041ffe5d5SHugh Dickins int __init shmem_init(void)
4091853ac43aSMatt Mackall {
409241ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4093853ac43aSMatt Mackall 
409441ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
4095853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
4096853ac43aSMatt Mackall 
4097853ac43aSMatt Mackall 	return 0;
4098853ac43aSMatt Mackall }
4099853ac43aSMatt Mackall 
410041ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page)
4101853ac43aSMatt Mackall {
4102853ac43aSMatt Mackall 	return 0;
4103853ac43aSMatt Mackall }
4104853ac43aSMatt Mackall 
41053f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user)
41063f96b79aSHugh Dickins {
41073f96b79aSHugh Dickins 	return 0;
41083f96b79aSHugh Dickins }
41093f96b79aSHugh Dickins 
411024513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
411124513264SHugh Dickins {
411224513264SHugh Dickins }
411324513264SHugh Dickins 
4114c01d5b30SHugh Dickins #ifdef CONFIG_MMU
4115c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
4116c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
4117c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
4118c01d5b30SHugh Dickins {
4119c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4120c01d5b30SHugh Dickins }
4121c01d5b30SHugh Dickins #endif
4122c01d5b30SHugh Dickins 
412341ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
412494c1e62dSHugh Dickins {
412541ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
412694c1e62dSHugh Dickins }
412794c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
412894c1e62dSHugh Dickins 
4129853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
41300b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
4131454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
41320b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
41330b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
4134853ac43aSMatt Mackall 
4135853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
4136853ac43aSMatt Mackall 
4137853ac43aSMatt Mackall /* common code */
41381da177e4SLinus Torvalds 
413919938e35SRasmus Villemoes static const struct dentry_operations anon_ops = {
4140118b2302SAl Viro 	.d_dname = simple_dname
41413451538aSAl Viro };
41423451538aSAl Viro 
4143c7277090SEric Paris static struct file *__shmem_file_setup(const char *name, loff_t size,
4144c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
41451da177e4SLinus Torvalds {
41466b4d0b27SAl Viro 	struct file *res;
41471da177e4SLinus Torvalds 	struct inode *inode;
41482c48b9c4SAl Viro 	struct path path;
41493451538aSAl Viro 	struct super_block *sb;
41501da177e4SLinus Torvalds 	struct qstr this;
41511da177e4SLinus Torvalds 
41521da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt))
41536b4d0b27SAl Viro 		return ERR_CAST(shm_mnt);
41541da177e4SLinus Torvalds 
4155285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
41561da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
41571da177e4SLinus Torvalds 
41581da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
41591da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
41601da177e4SLinus Torvalds 
41616b4d0b27SAl Viro 	res = ERR_PTR(-ENOMEM);
41621da177e4SLinus Torvalds 	this.name = name;
41631da177e4SLinus Torvalds 	this.len = strlen(name);
41641da177e4SLinus Torvalds 	this.hash = 0; /* will go */
41653451538aSAl Viro 	sb = shm_mnt->mnt_sb;
416666ee4b88SKonstantin Khlebnikov 	path.mnt = mntget(shm_mnt);
41673451538aSAl Viro 	path.dentry = d_alloc_pseudo(sb, &this);
41682c48b9c4SAl Viro 	if (!path.dentry)
41691da177e4SLinus Torvalds 		goto put_memory;
41703451538aSAl Viro 	d_set_d_op(path.dentry, &anon_ops);
41711da177e4SLinus Torvalds 
41726b4d0b27SAl Viro 	res = ERR_PTR(-ENOSPC);
41733451538aSAl Viro 	inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
41741da177e4SLinus Torvalds 	if (!inode)
417566ee4b88SKonstantin Khlebnikov 		goto put_memory;
41761da177e4SLinus Torvalds 
4177c7277090SEric Paris 	inode->i_flags |= i_flags;
41782c48b9c4SAl Viro 	d_instantiate(path.dentry, inode);
41791da177e4SLinus Torvalds 	inode->i_size = size;
41806d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
418126567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
418226567cdbSAl Viro 	if (IS_ERR(res))
418366ee4b88SKonstantin Khlebnikov 		goto put_path;
41844b42af81SAl Viro 
41856b4d0b27SAl Viro 	res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
41864b42af81SAl Viro 		  &shmem_file_operations);
41876b4d0b27SAl Viro 	if (IS_ERR(res))
418866ee4b88SKonstantin Khlebnikov 		goto put_path;
41894b42af81SAl Viro 
41906b4d0b27SAl Viro 	return res;
41911da177e4SLinus Torvalds 
41921da177e4SLinus Torvalds put_memory:
41931da177e4SLinus Torvalds 	shmem_unacct_size(flags, size);
419466ee4b88SKonstantin Khlebnikov put_path:
419566ee4b88SKonstantin Khlebnikov 	path_put(&path);
41966b4d0b27SAl Viro 	return res;
41971da177e4SLinus Torvalds }
4198c7277090SEric Paris 
4199c7277090SEric Paris /**
4200c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4201c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
4202c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
4203e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
4204e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
4205c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4206c7277090SEric Paris  * @size: size to be set for the file
4207c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4208c7277090SEric Paris  */
4209c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4210c7277090SEric Paris {
4211c7277090SEric Paris 	return __shmem_file_setup(name, size, flags, S_PRIVATE);
4212c7277090SEric Paris }
4213c7277090SEric Paris 
4214c7277090SEric Paris /**
4215c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4216c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4217c7277090SEric Paris  * @size: size to be set for the file
4218c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4219c7277090SEric Paris  */
4220c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4221c7277090SEric Paris {
4222c7277090SEric Paris 	return __shmem_file_setup(name, size, flags, 0);
4223c7277090SEric Paris }
4224395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
42251da177e4SLinus Torvalds 
422646711810SRandy Dunlap /**
42271da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
42281da177e4SLinus Torvalds  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
42291da177e4SLinus Torvalds  */
42301da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
42311da177e4SLinus Torvalds {
42321da177e4SLinus Torvalds 	struct file *file;
42331da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
42341da177e4SLinus Torvalds 
423566fc1303SHugh Dickins 	/*
423666fc1303SHugh Dickins 	 * Cloning a new file under mmap_sem leads to a lock ordering conflict
423766fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
423866fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
423966fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
424066fc1303SHugh Dickins 	 */
424166fc1303SHugh Dickins 	file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
42421da177e4SLinus Torvalds 	if (IS_ERR(file))
42431da177e4SLinus Torvalds 		return PTR_ERR(file);
42441da177e4SLinus Torvalds 
42451da177e4SLinus Torvalds 	if (vma->vm_file)
42461da177e4SLinus Torvalds 		fput(vma->vm_file);
42471da177e4SLinus Torvalds 	vma->vm_file = file;
42481da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
4249f3f0e1d2SKirill A. Shutemov 
4250e496cf3dSKirill A. Shutemov 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4251f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4252f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
4253f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
4254f3f0e1d2SKirill A. Shutemov 	}
4255f3f0e1d2SKirill A. Shutemov 
42561da177e4SLinus Torvalds 	return 0;
42571da177e4SLinus Torvalds }
4258d9d90e5eSHugh Dickins 
4259d9d90e5eSHugh Dickins /**
4260d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4261d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4262d9d90e5eSHugh Dickins  * @index:	the page index
4263d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4264d9d90e5eSHugh Dickins  *
4265d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4266d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
4267d9d90e5eSHugh Dickins  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4268d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4269d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4270d9d90e5eSHugh Dickins  *
427168da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
427268da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4273d9d90e5eSHugh Dickins  */
4274d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4275d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4276d9d90e5eSHugh Dickins {
427768da9f05SHugh Dickins #ifdef CONFIG_SHMEM
427868da9f05SHugh Dickins 	struct inode *inode = mapping->host;
42799276aad6SHugh Dickins 	struct page *page;
428068da9f05SHugh Dickins 	int error;
428168da9f05SHugh Dickins 
428268da9f05SHugh Dickins 	BUG_ON(mapping->a_ops != &shmem_aops);
42839e18eb29SAndres Lagar-Cavilla 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4284cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
428568da9f05SHugh Dickins 	if (error)
428668da9f05SHugh Dickins 		page = ERR_PTR(error);
428768da9f05SHugh Dickins 	else
428868da9f05SHugh Dickins 		unlock_page(page);
428968da9f05SHugh Dickins 	return page;
429068da9f05SHugh Dickins #else
429168da9f05SHugh Dickins 	/*
429268da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
429368da9f05SHugh Dickins 	 */
4294d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
429568da9f05SHugh Dickins #endif
4296d9d90e5eSHugh Dickins }
4297d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4298