xref: /openbmc/linux/mm/shmem.c (revision b56a2d8af9147a4efe4011b60d93779c0461ca97)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31853ac43aSMatt Mackall #include <linux/mm.h>
3246c9a946SArnd Bergmann #include <linux/random.h>
33174cd4b1SIngo Molnar #include <linux/sched/signal.h>
34b95f1b31SPaul Gortmaker #include <linux/export.h>
35853ac43aSMatt Mackall #include <linux/swap.h>
36e2e40f2cSChristoph Hellwig #include <linux/uio.h>
37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h>
38749df87bSMike Kravetz #include <linux/hugetlb.h>
39*b56a2d8aSVineeth Remanan Pillai #include <linux/frontswap.h>
40853ac43aSMatt Mackall 
4195cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
4295cc09d6SAndrea Arcangeli 
43853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
44853ac43aSMatt Mackall 
45853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
461da177e4SLinus Torvalds /*
471da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
481da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
491da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
501da177e4SLinus Torvalds  */
511da177e4SLinus Torvalds 
5239f0247dSAndreas Gruenbacher #include <linux/xattr.h>
53a5694255SChristoph Hellwig #include <linux/exportfs.h>
541c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
55feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
561da177e4SLinus Torvalds #include <linux/mman.h>
571da177e4SLinus Torvalds #include <linux/string.h>
581da177e4SLinus Torvalds #include <linux/slab.h>
591da177e4SLinus Torvalds #include <linux/backing-dev.h>
601da177e4SLinus Torvalds #include <linux/shmem_fs.h>
611da177e4SLinus Torvalds #include <linux/writeback.h>
621da177e4SLinus Torvalds #include <linux/blkdev.h>
63bda97eabSHugh Dickins #include <linux/pagevec.h>
6441ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6583e4fa9cSHugh Dickins #include <linux/falloc.h>
66708e3508SHugh Dickins #include <linux/splice.h>
671da177e4SLinus Torvalds #include <linux/security.h>
681da177e4SLinus Torvalds #include <linux/swapops.h>
691da177e4SLinus Torvalds #include <linux/mempolicy.h>
701da177e4SLinus Torvalds #include <linux/namei.h>
71b00dc3adSHugh Dickins #include <linux/ctype.h>
72304dbdb7SLee Schermerhorn #include <linux/migrate.h>
73c1f60a5aSChristoph Lameter #include <linux/highmem.h>
74680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7592562927SMimi Zohar #include <linux/magic.h>
769183df25SDavid Herrmann #include <linux/syscalls.h>
7740e041a2SDavid Herrmann #include <linux/fcntl.h>
789183df25SDavid Herrmann #include <uapi/linux/memfd.h>
79cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
804c27fe4cSMike Rapoport #include <linux/rmap.h>
812b4db796SAmir Goldstein #include <linux/uuid.h>
82304dbdb7SLee Schermerhorn 
837c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
841da177e4SLinus Torvalds #include <asm/pgtable.h>
851da177e4SLinus Torvalds 
86dd56b046SMel Gorman #include "internal.h"
87dd56b046SMel Gorman 
8809cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8909cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
921da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
931da177e4SLinus Torvalds 
9469f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9569f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9669f07ec9SHugh Dickins 
971aac1400SHugh Dickins /*
98f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99f00cdc6dSHugh Dickins  * inode->i_private (with i_mutex making sure that it has only one user at
100f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
1011aac1400SHugh Dickins  */
1021aac1400SHugh Dickins struct shmem_falloc {
1038e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1041aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1051aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1061aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1071aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1081aac1400SHugh Dickins };
1091aac1400SHugh Dickins 
110b76db735SAndrew Morton #ifdef CONFIG_TMPFS
111680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
112680d794bSakpm@linux-foundation.org {
113ca79b0c2SArun KS 	return totalram_pages() / 2;
114680d794bSakpm@linux-foundation.org }
115680d794bSakpm@linux-foundation.org 
116680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
117680d794bSakpm@linux-foundation.org {
118ca79b0c2SArun KS 	unsigned long nr_pages = totalram_pages();
119ca79b0c2SArun KS 
120ca79b0c2SArun KS 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
121680d794bSakpm@linux-foundation.org }
122b76db735SAndrew Morton #endif
123680d794bSakpm@linux-foundation.org 
124bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
125bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
126bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index);
127c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
128c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
129c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
130c5bf121eSVineeth Remanan Pillai 			     vm_fault_t *fault_type);
13168da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1329e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp,
133cfda0526SMike Rapoport 		gfp_t gfp, struct vm_area_struct *vma,
1342b740303SSouptick Joarder 		struct vm_fault *vmf, vm_fault_t *fault_type);
13568da9f05SHugh Dickins 
136f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index,
1379e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp)
13868da9f05SHugh Dickins {
13968da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
140cfda0526SMike Rapoport 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
14168da9f05SHugh Dickins }
1421da177e4SLinus Torvalds 
1431da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds 	return sb->s_fs_info;
1461da177e4SLinus Torvalds }
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds /*
1491da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1501da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1511da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1521da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1531da177e4SLinus Torvalds  */
1541da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1551da177e4SLinus Torvalds {
1560b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
157191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1581da177e4SLinus Torvalds }
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1611da177e4SLinus Torvalds {
1620b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1631da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1641da177e4SLinus Torvalds }
1651da177e4SLinus Torvalds 
16677142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
16777142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
16877142517SKonstantin Khlebnikov {
16977142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
17077142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
17177142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
17277142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
17377142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
17477142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
17577142517SKonstantin Khlebnikov 	}
17677142517SKonstantin Khlebnikov 	return 0;
17777142517SKonstantin Khlebnikov }
17877142517SKonstantin Khlebnikov 
1791da177e4SLinus Torvalds /*
1801da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
18175edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
1821da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1831da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1841da177e4SLinus Torvalds  */
185800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
1861da177e4SLinus Torvalds {
187800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
188800d8c63SKirill A. Shutemov 		return 0;
189800d8c63SKirill A. Shutemov 
190800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
191800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
1921da177e4SLinus Torvalds }
1931da177e4SLinus Torvalds 
1941da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
1951da177e4SLinus Torvalds {
1960b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
19709cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
1981da177e4SLinus Torvalds }
1991da177e4SLinus Torvalds 
2000f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
2010f079694SMike Rapoport {
2020f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2030f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2040f079694SMike Rapoport 
2050f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
2060f079694SMike Rapoport 		return false;
2070f079694SMike Rapoport 
2080f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2090f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2100f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2110f079694SMike Rapoport 			goto unacct;
2120f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2130f079694SMike Rapoport 	}
2140f079694SMike Rapoport 
2150f079694SMike Rapoport 	return true;
2160f079694SMike Rapoport 
2170f079694SMike Rapoport unacct:
2180f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2190f079694SMike Rapoport 	return false;
2200f079694SMike Rapoport }
2210f079694SMike Rapoport 
2220f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2230f079694SMike Rapoport {
2240f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2250f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2260f079694SMike Rapoport 
2270f079694SMike Rapoport 	if (sbinfo->max_blocks)
2280f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2290f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2300f079694SMike Rapoport }
2310f079694SMike Rapoport 
232759b9775SHugh Dickins static const struct super_operations shmem_ops;
233f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops;
23415ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
23592e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
23692e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
23792e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
238f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
239779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2401da177e4SLinus Torvalds 
241b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
242b0506e48SMike Rapoport {
243b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
244b0506e48SMike Rapoport }
245b0506e48SMike Rapoport 
2461da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
247cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2481da177e4SLinus Torvalds 
2495b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb)
2505b04c689SPavel Emelyanov {
2515b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2525b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
2535b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2545b04c689SPavel Emelyanov 		if (!sbinfo->free_inodes) {
2555b04c689SPavel Emelyanov 			spin_unlock(&sbinfo->stat_lock);
2565b04c689SPavel Emelyanov 			return -ENOSPC;
2575b04c689SPavel Emelyanov 		}
2585b04c689SPavel Emelyanov 		sbinfo->free_inodes--;
2595b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
2605b04c689SPavel Emelyanov 	}
2615b04c689SPavel Emelyanov 	return 0;
2625b04c689SPavel Emelyanov }
2635b04c689SPavel Emelyanov 
2645b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
2655b04c689SPavel Emelyanov {
2665b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2675b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
2685b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2695b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
2705b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
2715b04c689SPavel Emelyanov 	}
2725b04c689SPavel Emelyanov }
2735b04c689SPavel Emelyanov 
27446711810SRandy Dunlap /**
27541ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
2761da177e4SLinus Torvalds  * @inode: inode to recalc
2771da177e4SLinus Torvalds  *
2781da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
2791da177e4SLinus Torvalds  * undirtied hole pages behind our back.
2801da177e4SLinus Torvalds  *
2811da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
2821da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
2831da177e4SLinus Torvalds  *
2841da177e4SLinus Torvalds  * It has to be called with the spinlock held.
2851da177e4SLinus Torvalds  */
2861da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
2871da177e4SLinus Torvalds {
2881da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
2891da177e4SLinus Torvalds 	long freed;
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
2921da177e4SLinus Torvalds 	if (freed > 0) {
2931da177e4SLinus Torvalds 		info->alloced -= freed;
29454af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
2950f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
2961da177e4SLinus Torvalds 	}
2971da177e4SLinus Torvalds }
2981da177e4SLinus Torvalds 
299800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
300800d8c63SKirill A. Shutemov {
301800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3024595ef88SKirill A. Shutemov 	unsigned long flags;
303800d8c63SKirill A. Shutemov 
3040f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
305800d8c63SKirill A. Shutemov 		return false;
306b1cc94abSMike Rapoport 
307aaa52e34SHugh Dickins 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
308aaa52e34SHugh Dickins 	inode->i_mapping->nrpages += pages;
309aaa52e34SHugh Dickins 
3104595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
311800d8c63SKirill A. Shutemov 	info->alloced += pages;
312800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
313800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3144595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
315800d8c63SKirill A. Shutemov 
316800d8c63SKirill A. Shutemov 	return true;
317800d8c63SKirill A. Shutemov }
318800d8c63SKirill A. Shutemov 
319800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
320800d8c63SKirill A. Shutemov {
321800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3224595ef88SKirill A. Shutemov 	unsigned long flags;
323800d8c63SKirill A. Shutemov 
324aaa52e34SHugh Dickins 	/* nrpages adjustment done by __delete_from_page_cache() or caller */
325aaa52e34SHugh Dickins 
3264595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
327800d8c63SKirill A. Shutemov 	info->alloced -= pages;
328800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
329800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3304595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
331800d8c63SKirill A. Shutemov 
3320f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
333800d8c63SKirill A. Shutemov }
334800d8c63SKirill A. Shutemov 
3357a5d0fbbSHugh Dickins /*
33662f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
3377a5d0fbbSHugh Dickins  */
33862f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
3397a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
3407a5d0fbbSHugh Dickins {
34162f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
3426dbaf22cSJohannes Weiner 	void *item;
3437a5d0fbbSHugh Dickins 
3447a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
3456dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
34662f945b6SMatthew Wilcox 	item = xas_load(&xas);
3477a5d0fbbSHugh Dickins 	if (item != expected)
3487a5d0fbbSHugh Dickins 		return -ENOENT;
34962f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
3507a5d0fbbSHugh Dickins 	return 0;
3517a5d0fbbSHugh Dickins }
3527a5d0fbbSHugh Dickins 
3537a5d0fbbSHugh Dickins /*
354d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
355d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
356d1899228SHugh Dickins  *
357d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
358d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
359d1899228SHugh Dickins  */
360d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
361d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
362d1899228SHugh Dickins {
363a12831bfSMatthew Wilcox 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
364d1899228SHugh Dickins }
365d1899228SHugh Dickins 
366d1899228SHugh Dickins /*
3675a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
3685a6e75f8SKirill A. Shutemov  *
3695a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
3705a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
3715a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
3725a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
3735a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
3745a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
3755a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
3765a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
3775a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
3785a6e75f8SKirill A. Shutemov  */
3795a6e75f8SKirill A. Shutemov 
3805a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
3815a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
3825a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
3835a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
3845a6e75f8SKirill A. Shutemov 
3855a6e75f8SKirill A. Shutemov /*
3865a6e75f8SKirill A. Shutemov  * Special values.
3875a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
3885a6e75f8SKirill A. Shutemov  *
3895a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
3905a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
3915a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
3925a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
3935a6e75f8SKirill A. Shutemov  *
3945a6e75f8SKirill A. Shutemov  */
3955a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
3965a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
3975a6e75f8SKirill A. Shutemov 
398e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3995a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
4005a6e75f8SKirill A. Shutemov 
4015b9c98f3SMike Kravetz static int shmem_huge __read_mostly;
4025a6e75f8SKirill A. Shutemov 
403f1f5929cSJérémy Lefaure #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
4045a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
4055a6e75f8SKirill A. Shutemov {
4065a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
4075a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
4085a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
4095a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
4105a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
4115a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
4125a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
4135a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
4145a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
4155a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
4165a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
4175a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
4185a6e75f8SKirill A. Shutemov 	return -EINVAL;
4195a6e75f8SKirill A. Shutemov }
4205a6e75f8SKirill A. Shutemov 
4215a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
4225a6e75f8SKirill A. Shutemov {
4235a6e75f8SKirill A. Shutemov 	switch (huge) {
4245a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
4255a6e75f8SKirill A. Shutemov 		return "never";
4265a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
4275a6e75f8SKirill A. Shutemov 		return "always";
4285a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
4295a6e75f8SKirill A. Shutemov 		return "within_size";
4305a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
4315a6e75f8SKirill A. Shutemov 		return "advise";
4325a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
4335a6e75f8SKirill A. Shutemov 		return "deny";
4345a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
4355a6e75f8SKirill A. Shutemov 		return "force";
4365a6e75f8SKirill A. Shutemov 	default:
4375a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
4385a6e75f8SKirill A. Shutemov 		return "bad_val";
4395a6e75f8SKirill A. Shutemov 	}
4405a6e75f8SKirill A. Shutemov }
441f1f5929cSJérémy Lefaure #endif
4425a6e75f8SKirill A. Shutemov 
443779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
444779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
445779750d2SKirill A. Shutemov {
446779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
447253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
448779750d2SKirill A. Shutemov 	struct inode *inode;
449779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
450779750d2SKirill A. Shutemov 	struct page *page;
451779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
452779750d2SKirill A. Shutemov 	int removed = 0, split = 0;
453779750d2SKirill A. Shutemov 
454779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
455779750d2SKirill A. Shutemov 		return SHRINK_STOP;
456779750d2SKirill A. Shutemov 
457779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
458779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
459779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
460779750d2SKirill A. Shutemov 
461779750d2SKirill A. Shutemov 		/* pin the inode */
462779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
463779750d2SKirill A. Shutemov 
464779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
465779750d2SKirill A. Shutemov 		if (!inode) {
466779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
467779750d2SKirill A. Shutemov 			removed++;
468779750d2SKirill A. Shutemov 			goto next;
469779750d2SKirill A. Shutemov 		}
470779750d2SKirill A. Shutemov 
471779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
472779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
473779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
474253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
475779750d2SKirill A. Shutemov 			removed++;
476779750d2SKirill A. Shutemov 			goto next;
477779750d2SKirill A. Shutemov 		}
478779750d2SKirill A. Shutemov 
479779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
480779750d2SKirill A. Shutemov next:
481779750d2SKirill A. Shutemov 		if (!--batch)
482779750d2SKirill A. Shutemov 			break;
483779750d2SKirill A. Shutemov 	}
484779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
485779750d2SKirill A. Shutemov 
486253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
487253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
488253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
489253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
490253fd0f0SKirill A. Shutemov 		iput(inode);
491253fd0f0SKirill A. Shutemov 	}
492253fd0f0SKirill A. Shutemov 
493779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
494779750d2SKirill A. Shutemov 		int ret;
495779750d2SKirill A. Shutemov 
496779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
497779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
498779750d2SKirill A. Shutemov 
499b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
500b3cd54b2SKirill A. Shutemov 			goto leave;
501779750d2SKirill A. Shutemov 
502b3cd54b2SKirill A. Shutemov 		page = find_get_page(inode->i_mapping,
503779750d2SKirill A. Shutemov 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
504779750d2SKirill A. Shutemov 		if (!page)
505779750d2SKirill A. Shutemov 			goto drop;
506779750d2SKirill A. Shutemov 
507b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
508779750d2SKirill A. Shutemov 		if (!PageTransHuge(page)) {
509779750d2SKirill A. Shutemov 			put_page(page);
510779750d2SKirill A. Shutemov 			goto drop;
511779750d2SKirill A. Shutemov 		}
512779750d2SKirill A. Shutemov 
513b3cd54b2SKirill A. Shutemov 		/*
514b3cd54b2SKirill A. Shutemov 		 * Leave the inode on the list if we failed to lock
515b3cd54b2SKirill A. Shutemov 		 * the page at this time.
516b3cd54b2SKirill A. Shutemov 		 *
517b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
518b3cd54b2SKirill A. Shutemov 		 * reclaim path.
519b3cd54b2SKirill A. Shutemov 		 */
520b3cd54b2SKirill A. Shutemov 		if (!trylock_page(page)) {
521b3cd54b2SKirill A. Shutemov 			put_page(page);
522b3cd54b2SKirill A. Shutemov 			goto leave;
523b3cd54b2SKirill A. Shutemov 		}
524b3cd54b2SKirill A. Shutemov 
525779750d2SKirill A. Shutemov 		ret = split_huge_page(page);
526779750d2SKirill A. Shutemov 		unlock_page(page);
527779750d2SKirill A. Shutemov 		put_page(page);
528779750d2SKirill A. Shutemov 
529b3cd54b2SKirill A. Shutemov 		/* If split failed leave the inode on the list */
530b3cd54b2SKirill A. Shutemov 		if (ret)
531b3cd54b2SKirill A. Shutemov 			goto leave;
532779750d2SKirill A. Shutemov 
533779750d2SKirill A. Shutemov 		split++;
534779750d2SKirill A. Shutemov drop:
535779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
536779750d2SKirill A. Shutemov 		removed++;
537b3cd54b2SKirill A. Shutemov leave:
538779750d2SKirill A. Shutemov 		iput(inode);
539779750d2SKirill A. Shutemov 	}
540779750d2SKirill A. Shutemov 
541779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
542779750d2SKirill A. Shutemov 	list_splice_tail(&list, &sbinfo->shrinklist);
543779750d2SKirill A. Shutemov 	sbinfo->shrinklist_len -= removed;
544779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
545779750d2SKirill A. Shutemov 
546779750d2SKirill A. Shutemov 	return split;
547779750d2SKirill A. Shutemov }
548779750d2SKirill A. Shutemov 
549779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
550779750d2SKirill A. Shutemov 		struct shrink_control *sc)
551779750d2SKirill A. Shutemov {
552779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
553779750d2SKirill A. Shutemov 
554779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
555779750d2SKirill A. Shutemov 		return SHRINK_STOP;
556779750d2SKirill A. Shutemov 
557779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
558779750d2SKirill A. Shutemov }
559779750d2SKirill A. Shutemov 
560779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
561779750d2SKirill A. Shutemov 		struct shrink_control *sc)
562779750d2SKirill A. Shutemov {
563779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
564779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
565779750d2SKirill A. Shutemov }
566e496cf3dSKirill A. Shutemov #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
5675a6e75f8SKirill A. Shutemov 
5685a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
5695a6e75f8SKirill A. Shutemov 
570779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
571779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
572779750d2SKirill A. Shutemov {
573779750d2SKirill A. Shutemov 	return 0;
574779750d2SKirill A. Shutemov }
575e496cf3dSKirill A. Shutemov #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
5765a6e75f8SKirill A. Shutemov 
57789fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
57889fdcd26SYang Shi {
57989fdcd26SYang Shi 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
58089fdcd26SYang Shi 	    (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
58189fdcd26SYang Shi 	    shmem_huge != SHMEM_HUGE_DENY)
58289fdcd26SYang Shi 		return true;
58389fdcd26SYang Shi 	return false;
58489fdcd26SYang Shi }
58589fdcd26SYang Shi 
5865a6e75f8SKirill A. Shutemov /*
58746f65ec1SHugh Dickins  * Like add_to_page_cache_locked, but error if expected item has gone.
58846f65ec1SHugh Dickins  */
58946f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page,
59046f65ec1SHugh Dickins 				   struct address_space *mapping,
591552446a4SMatthew Wilcox 				   pgoff_t index, void *expected, gfp_t gfp)
59246f65ec1SHugh Dickins {
593552446a4SMatthew Wilcox 	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
594552446a4SMatthew Wilcox 	unsigned long i = 0;
595552446a4SMatthew Wilcox 	unsigned long nr = 1UL << compound_order(page);
59646f65ec1SHugh Dickins 
597800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
598800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
599309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
600309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
601800d8c63SKirill A. Shutemov 	VM_BUG_ON(expected && PageTransHuge(page));
60246f65ec1SHugh Dickins 
603800d8c63SKirill A. Shutemov 	page_ref_add(page, nr);
60446f65ec1SHugh Dickins 	page->mapping = mapping;
60546f65ec1SHugh Dickins 	page->index = index;
60646f65ec1SHugh Dickins 
607552446a4SMatthew Wilcox 	do {
608552446a4SMatthew Wilcox 		void *entry;
609552446a4SMatthew Wilcox 		xas_lock_irq(&xas);
610552446a4SMatthew Wilcox 		entry = xas_find_conflict(&xas);
611552446a4SMatthew Wilcox 		if (entry != expected)
612552446a4SMatthew Wilcox 			xas_set_err(&xas, -EEXIST);
613552446a4SMatthew Wilcox 		xas_create_range(&xas);
614552446a4SMatthew Wilcox 		if (xas_error(&xas))
615552446a4SMatthew Wilcox 			goto unlock;
616552446a4SMatthew Wilcox next:
617552446a4SMatthew Wilcox 		xas_store(&xas, page + i);
618552446a4SMatthew Wilcox 		if (++i < nr) {
619552446a4SMatthew Wilcox 			xas_next(&xas);
620552446a4SMatthew Wilcox 			goto next;
621552446a4SMatthew Wilcox 		}
622800d8c63SKirill A. Shutemov 		if (PageTransHuge(page)) {
623800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
62411fb9989SMel Gorman 			__inc_node_page_state(page, NR_SHMEM_THPS);
625552446a4SMatthew Wilcox 		}
626552446a4SMatthew Wilcox 		mapping->nrpages += nr;
62711fb9989SMel Gorman 		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
62811fb9989SMel Gorman 		__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
629552446a4SMatthew Wilcox unlock:
630552446a4SMatthew Wilcox 		xas_unlock_irq(&xas);
631552446a4SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
632552446a4SMatthew Wilcox 
633552446a4SMatthew Wilcox 	if (xas_error(&xas)) {
63446f65ec1SHugh Dickins 		page->mapping = NULL;
635800d8c63SKirill A. Shutemov 		page_ref_sub(page, nr);
636552446a4SMatthew Wilcox 		return xas_error(&xas);
63746f65ec1SHugh Dickins 	}
638552446a4SMatthew Wilcox 
639552446a4SMatthew Wilcox 	return 0;
64046f65ec1SHugh Dickins }
64146f65ec1SHugh Dickins 
64246f65ec1SHugh Dickins /*
6436922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
6446922c0c7SHugh Dickins  */
6456922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
6466922c0c7SHugh Dickins {
6476922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
6486922c0c7SHugh Dickins 	int error;
6496922c0c7SHugh Dickins 
650800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
651800d8c63SKirill A. Shutemov 
652b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
65362f945b6SMatthew Wilcox 	error = shmem_replace_entry(mapping, page->index, page, radswap);
6546922c0c7SHugh Dickins 	page->mapping = NULL;
6556922c0c7SHugh Dickins 	mapping->nrpages--;
65611fb9989SMel Gorman 	__dec_node_page_state(page, NR_FILE_PAGES);
65711fb9989SMel Gorman 	__dec_node_page_state(page, NR_SHMEM);
658b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
65909cbfeafSKirill A. Shutemov 	put_page(page);
6606922c0c7SHugh Dickins 	BUG_ON(error);
6616922c0c7SHugh Dickins }
6626922c0c7SHugh Dickins 
6636922c0c7SHugh Dickins /*
664c121d3bbSMatthew Wilcox  * Remove swap entry from page cache, free the swap and its page cache.
6657a5d0fbbSHugh Dickins  */
6667a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
6677a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
6687a5d0fbbSHugh Dickins {
6696dbaf22cSJohannes Weiner 	void *old;
6707a5d0fbbSHugh Dickins 
67155f3f7eaSMatthew Wilcox 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
6726dbaf22cSJohannes Weiner 	if (old != radswap)
6736dbaf22cSJohannes Weiner 		return -ENOENT;
6747a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
6756dbaf22cSJohannes Weiner 	return 0;
6767a5d0fbbSHugh Dickins }
6777a5d0fbbSHugh Dickins 
6787a5d0fbbSHugh Dickins /*
6796a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
68048131e03SVlastimil Babka  * given offsets are swapped out.
6816a15a370SVlastimil Babka  *
682b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
6836a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
6846a15a370SVlastimil Babka  */
68548131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
68648131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
6876a15a370SVlastimil Babka {
6887ae3424fSMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, start);
6896a15a370SVlastimil Babka 	struct page *page;
69048131e03SVlastimil Babka 	unsigned long swapped = 0;
6916a15a370SVlastimil Babka 
6926a15a370SVlastimil Babka 	rcu_read_lock();
6937ae3424fSMatthew Wilcox 	xas_for_each(&xas, page, end - 1) {
6947ae3424fSMatthew Wilcox 		if (xas_retry(&xas, page))
6952cf938aaSMatthew Wilcox 			continue;
6963159f943SMatthew Wilcox 		if (xa_is_value(page))
6976a15a370SVlastimil Babka 			swapped++;
6986a15a370SVlastimil Babka 
6996a15a370SVlastimil Babka 		if (need_resched()) {
7007ae3424fSMatthew Wilcox 			xas_pause(&xas);
7016a15a370SVlastimil Babka 			cond_resched_rcu();
7026a15a370SVlastimil Babka 		}
7036a15a370SVlastimil Babka 	}
7046a15a370SVlastimil Babka 
7056a15a370SVlastimil Babka 	rcu_read_unlock();
7066a15a370SVlastimil Babka 
7076a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
7086a15a370SVlastimil Babka }
7096a15a370SVlastimil Babka 
7106a15a370SVlastimil Babka /*
71148131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
71248131e03SVlastimil Babka  * given vma is swapped out.
71348131e03SVlastimil Babka  *
714b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
71548131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
71648131e03SVlastimil Babka  */
71748131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
71848131e03SVlastimil Babka {
71948131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
72048131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
72148131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
72248131e03SVlastimil Babka 	unsigned long swapped;
72348131e03SVlastimil Babka 
72448131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
72548131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
72648131e03SVlastimil Babka 
72748131e03SVlastimil Babka 	/*
72848131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
72948131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
73048131e03SVlastimil Babka 	 * already track.
73148131e03SVlastimil Babka 	 */
73248131e03SVlastimil Babka 	if (!swapped)
73348131e03SVlastimil Babka 		return 0;
73448131e03SVlastimil Babka 
73548131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
73648131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
73748131e03SVlastimil Babka 
73848131e03SVlastimil Babka 	/* Here comes the more involved part */
73948131e03SVlastimil Babka 	return shmem_partial_swap_usage(mapping,
74048131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_start),
74148131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_end));
74248131e03SVlastimil Babka }
74348131e03SVlastimil Babka 
74448131e03SVlastimil Babka /*
74524513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
74624513264SHugh Dickins  */
74724513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
74824513264SHugh Dickins {
74924513264SHugh Dickins 	struct pagevec pvec;
75024513264SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
75124513264SHugh Dickins 	pgoff_t index = 0;
75224513264SHugh Dickins 
75386679820SMel Gorman 	pagevec_init(&pvec);
75424513264SHugh Dickins 	/*
75524513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
75624513264SHugh Dickins 	 */
75724513264SHugh Dickins 	while (!mapping_unevictable(mapping)) {
75824513264SHugh Dickins 		/*
75924513264SHugh Dickins 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
76024513264SHugh Dickins 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
76124513264SHugh Dickins 		 */
7620cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
76324513264SHugh Dickins 					   PAGEVEC_SIZE, pvec.pages, indices);
76424513264SHugh Dickins 		if (!pvec.nr)
76524513264SHugh Dickins 			break;
76624513264SHugh Dickins 		index = indices[pvec.nr - 1] + 1;
7670cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
76864e3d12fSKuo-Hsin Yang 		check_move_unevictable_pages(&pvec);
76924513264SHugh Dickins 		pagevec_release(&pvec);
77024513264SHugh Dickins 		cond_resched();
77124513264SHugh Dickins 	}
7727a5d0fbbSHugh Dickins }
7737a5d0fbbSHugh Dickins 
7747a5d0fbbSHugh Dickins /*
7757f4446eeSMatthew Wilcox  * Remove range of pages and swap entries from page cache, and free them.
7761635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
7777a5d0fbbSHugh Dickins  */
7781635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
7791635f6a7SHugh Dickins 								 bool unfalloc)
7801da177e4SLinus Torvalds {
781285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
7821da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
78309cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
78409cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
78509cbfeafSKirill A. Shutemov 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
78609cbfeafSKirill A. Shutemov 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
787bda97eabSHugh Dickins 	struct pagevec pvec;
7887a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
7897a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
790285b2c4fSHugh Dickins 	pgoff_t index;
791bda97eabSHugh Dickins 	int i;
7921da177e4SLinus Torvalds 
79383e4fa9cSHugh Dickins 	if (lend == -1)
79483e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
795bda97eabSHugh Dickins 
79686679820SMel Gorman 	pagevec_init(&pvec);
797bda97eabSHugh Dickins 	index = start;
79883e4fa9cSHugh Dickins 	while (index < end) {
7990cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
80083e4fa9cSHugh Dickins 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
8017a5d0fbbSHugh Dickins 			pvec.pages, indices);
8027a5d0fbbSHugh Dickins 		if (!pvec.nr)
8037a5d0fbbSHugh Dickins 			break;
804bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
805bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
806bda97eabSHugh Dickins 
8077a5d0fbbSHugh Dickins 			index = indices[i];
80883e4fa9cSHugh Dickins 			if (index >= end)
809bda97eabSHugh Dickins 				break;
810bda97eabSHugh Dickins 
8113159f943SMatthew Wilcox 			if (xa_is_value(page)) {
8121635f6a7SHugh Dickins 				if (unfalloc)
8131635f6a7SHugh Dickins 					continue;
8147a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
8157a5d0fbbSHugh Dickins 								index, page);
8167a5d0fbbSHugh Dickins 				continue;
8177a5d0fbbSHugh Dickins 			}
8187a5d0fbbSHugh Dickins 
819800d8c63SKirill A. Shutemov 			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
820800d8c63SKirill A. Shutemov 
821bda97eabSHugh Dickins 			if (!trylock_page(page))
822bda97eabSHugh Dickins 				continue;
823800d8c63SKirill A. Shutemov 
824800d8c63SKirill A. Shutemov 			if (PageTransTail(page)) {
825800d8c63SKirill A. Shutemov 				/* Middle of THP: zero out the page */
826800d8c63SKirill A. Shutemov 				clear_highpage(page);
827800d8c63SKirill A. Shutemov 				unlock_page(page);
828800d8c63SKirill A. Shutemov 				continue;
829800d8c63SKirill A. Shutemov 			} else if (PageTransHuge(page)) {
830800d8c63SKirill A. Shutemov 				if (index == round_down(end, HPAGE_PMD_NR)) {
831800d8c63SKirill A. Shutemov 					/*
832800d8c63SKirill A. Shutemov 					 * Range ends in the middle of THP:
833800d8c63SKirill A. Shutemov 					 * zero out the page
834800d8c63SKirill A. Shutemov 					 */
835800d8c63SKirill A. Shutemov 					clear_highpage(page);
836800d8c63SKirill A. Shutemov 					unlock_page(page);
837800d8c63SKirill A. Shutemov 					continue;
838800d8c63SKirill A. Shutemov 				}
839800d8c63SKirill A. Shutemov 				index += HPAGE_PMD_NR - 1;
840800d8c63SKirill A. Shutemov 				i += HPAGE_PMD_NR - 1;
841800d8c63SKirill A. Shutemov 			}
842800d8c63SKirill A. Shutemov 
8431635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
844800d8c63SKirill A. Shutemov 				VM_BUG_ON_PAGE(PageTail(page), page);
845800d8c63SKirill A. Shutemov 				if (page_mapping(page) == mapping) {
846309381feSSasha Levin 					VM_BUG_ON_PAGE(PageWriteback(page), page);
847bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
8487a5d0fbbSHugh Dickins 				}
8491635f6a7SHugh Dickins 			}
850bda97eabSHugh Dickins 			unlock_page(page);
851bda97eabSHugh Dickins 		}
8520cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
85324513264SHugh Dickins 		pagevec_release(&pvec);
854bda97eabSHugh Dickins 		cond_resched();
855bda97eabSHugh Dickins 		index++;
856bda97eabSHugh Dickins 	}
857bda97eabSHugh Dickins 
85883e4fa9cSHugh Dickins 	if (partial_start) {
859bda97eabSHugh Dickins 		struct page *page = NULL;
8609e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, start - 1, &page, SGP_READ);
861bda97eabSHugh Dickins 		if (page) {
86209cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
86383e4fa9cSHugh Dickins 			if (start > end) {
86483e4fa9cSHugh Dickins 				top = partial_end;
86583e4fa9cSHugh Dickins 				partial_end = 0;
86683e4fa9cSHugh Dickins 			}
86783e4fa9cSHugh Dickins 			zero_user_segment(page, partial_start, top);
868bda97eabSHugh Dickins 			set_page_dirty(page);
869bda97eabSHugh Dickins 			unlock_page(page);
87009cbfeafSKirill A. Shutemov 			put_page(page);
871bda97eabSHugh Dickins 		}
872bda97eabSHugh Dickins 	}
87383e4fa9cSHugh Dickins 	if (partial_end) {
87483e4fa9cSHugh Dickins 		struct page *page = NULL;
8759e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, end, &page, SGP_READ);
87683e4fa9cSHugh Dickins 		if (page) {
87783e4fa9cSHugh Dickins 			zero_user_segment(page, 0, partial_end);
87883e4fa9cSHugh Dickins 			set_page_dirty(page);
87983e4fa9cSHugh Dickins 			unlock_page(page);
88009cbfeafSKirill A. Shutemov 			put_page(page);
88183e4fa9cSHugh Dickins 		}
88283e4fa9cSHugh Dickins 	}
88383e4fa9cSHugh Dickins 	if (start >= end)
88483e4fa9cSHugh Dickins 		return;
885bda97eabSHugh Dickins 
886bda97eabSHugh Dickins 	index = start;
887b1a36650SHugh Dickins 	while (index < end) {
888bda97eabSHugh Dickins 		cond_resched();
8890cd6144aSJohannes Weiner 
8900cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
89183e4fa9cSHugh Dickins 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
8927a5d0fbbSHugh Dickins 				pvec.pages, indices);
8937a5d0fbbSHugh Dickins 		if (!pvec.nr) {
894b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
895b1a36650SHugh Dickins 			if (index == start || end != -1)
896bda97eabSHugh Dickins 				break;
897b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
898bda97eabSHugh Dickins 			index = start;
899bda97eabSHugh Dickins 			continue;
900bda97eabSHugh Dickins 		}
901bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
902bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
903bda97eabSHugh Dickins 
9047a5d0fbbSHugh Dickins 			index = indices[i];
90583e4fa9cSHugh Dickins 			if (index >= end)
906bda97eabSHugh Dickins 				break;
907bda97eabSHugh Dickins 
9083159f943SMatthew Wilcox 			if (xa_is_value(page)) {
9091635f6a7SHugh Dickins 				if (unfalloc)
9101635f6a7SHugh Dickins 					continue;
911b1a36650SHugh Dickins 				if (shmem_free_swap(mapping, index, page)) {
912b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
913b1a36650SHugh Dickins 					index--;
914b1a36650SHugh Dickins 					break;
915b1a36650SHugh Dickins 				}
916b1a36650SHugh Dickins 				nr_swaps_freed++;
9177a5d0fbbSHugh Dickins 				continue;
9187a5d0fbbSHugh Dickins 			}
9197a5d0fbbSHugh Dickins 
920bda97eabSHugh Dickins 			lock_page(page);
921800d8c63SKirill A. Shutemov 
922800d8c63SKirill A. Shutemov 			if (PageTransTail(page)) {
923800d8c63SKirill A. Shutemov 				/* Middle of THP: zero out the page */
924800d8c63SKirill A. Shutemov 				clear_highpage(page);
925800d8c63SKirill A. Shutemov 				unlock_page(page);
926800d8c63SKirill A. Shutemov 				/*
927800d8c63SKirill A. Shutemov 				 * Partial thp truncate due 'start' in middle
928800d8c63SKirill A. Shutemov 				 * of THP: don't need to look on these pages
929800d8c63SKirill A. Shutemov 				 * again on !pvec.nr restart.
930800d8c63SKirill A. Shutemov 				 */
931800d8c63SKirill A. Shutemov 				if (index != round_down(end, HPAGE_PMD_NR))
932800d8c63SKirill A. Shutemov 					start++;
933800d8c63SKirill A. Shutemov 				continue;
934800d8c63SKirill A. Shutemov 			} else if (PageTransHuge(page)) {
935800d8c63SKirill A. Shutemov 				if (index == round_down(end, HPAGE_PMD_NR)) {
936800d8c63SKirill A. Shutemov 					/*
937800d8c63SKirill A. Shutemov 					 * Range ends in the middle of THP:
938800d8c63SKirill A. Shutemov 					 * zero out the page
939800d8c63SKirill A. Shutemov 					 */
940800d8c63SKirill A. Shutemov 					clear_highpage(page);
941800d8c63SKirill A. Shutemov 					unlock_page(page);
942800d8c63SKirill A. Shutemov 					continue;
943800d8c63SKirill A. Shutemov 				}
944800d8c63SKirill A. Shutemov 				index += HPAGE_PMD_NR - 1;
945800d8c63SKirill A. Shutemov 				i += HPAGE_PMD_NR - 1;
946800d8c63SKirill A. Shutemov 			}
947800d8c63SKirill A. Shutemov 
9481635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
949800d8c63SKirill A. Shutemov 				VM_BUG_ON_PAGE(PageTail(page), page);
950800d8c63SKirill A. Shutemov 				if (page_mapping(page) == mapping) {
951309381feSSasha Levin 					VM_BUG_ON_PAGE(PageWriteback(page), page);
952bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
953b1a36650SHugh Dickins 				} else {
954b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
955b1a36650SHugh Dickins 					unlock_page(page);
956b1a36650SHugh Dickins 					index--;
957b1a36650SHugh Dickins 					break;
9587a5d0fbbSHugh Dickins 				}
9591635f6a7SHugh Dickins 			}
960bda97eabSHugh Dickins 			unlock_page(page);
961bda97eabSHugh Dickins 		}
9620cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
96324513264SHugh Dickins 		pagevec_release(&pvec);
964bda97eabSHugh Dickins 		index++;
965bda97eabSHugh Dickins 	}
96694c1e62dSHugh Dickins 
9674595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
9687a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
9691da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
9704595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
9711635f6a7SHugh Dickins }
9721da177e4SLinus Torvalds 
9731635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
9741635f6a7SHugh Dickins {
9751635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
976078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
9771da177e4SLinus Torvalds }
97894c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
9791da177e4SLinus Torvalds 
980a528d35eSDavid Howells static int shmem_getattr(const struct path *path, struct kstat *stat,
981a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
98244a30220SYu Zhao {
983a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
98444a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
98589fdcd26SYang Shi 	struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
98644a30220SYu Zhao 
987d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
9884595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
98944a30220SYu Zhao 		shmem_recalc_inode(inode);
9904595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
991d0424c42SHugh Dickins 	}
99244a30220SYu Zhao 	generic_fillattr(inode, stat);
99389fdcd26SYang Shi 
99489fdcd26SYang Shi 	if (is_huge_enabled(sb_info))
99589fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
99689fdcd26SYang Shi 
99744a30220SYu Zhao 	return 0;
99844a30220SYu Zhao }
99944a30220SYu Zhao 
100094c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
10011da177e4SLinus Torvalds {
100275c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
100340e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
1004779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
10051da177e4SLinus Torvalds 	int error;
10061da177e4SLinus Torvalds 
100731051c85SJan Kara 	error = setattr_prepare(dentry, attr);
1008db78b877SChristoph Hellwig 	if (error)
1009db78b877SChristoph Hellwig 		return error;
1010db78b877SChristoph Hellwig 
101194c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
101294c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
101394c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
10143889e6e7Snpiggin@suse.de 
101540e041a2SDavid Herrmann 		/* protected by i_mutex */
101640e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
101740e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
101840e041a2SDavid Herrmann 			return -EPERM;
101940e041a2SDavid Herrmann 
102094c1e62dSHugh Dickins 		if (newsize != oldsize) {
102177142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
102277142517SKonstantin Khlebnikov 					oldsize, newsize);
102377142517SKonstantin Khlebnikov 			if (error)
102477142517SKonstantin Khlebnikov 				return error;
102594c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1026078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
102794c1e62dSHugh Dickins 		}
1028afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
102994c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1030d0424c42SHugh Dickins 			if (oldsize > holebegin)
1031d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1032d0424c42SHugh Dickins 							holebegin, 0, 1);
1033d0424c42SHugh Dickins 			if (info->alloced)
1034d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1035d0424c42SHugh Dickins 							newsize, (loff_t)-1);
103694c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1037d0424c42SHugh Dickins 			if (oldsize > holebegin)
1038d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1039d0424c42SHugh Dickins 							holebegin, 0, 1);
1040779750d2SKirill A. Shutemov 
1041779750d2SKirill A. Shutemov 			/*
1042779750d2SKirill A. Shutemov 			 * Part of the huge page can be beyond i_size: subject
1043779750d2SKirill A. Shutemov 			 * to shrink under memory pressure.
1044779750d2SKirill A. Shutemov 			 */
1045779750d2SKirill A. Shutemov 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1046779750d2SKirill A. Shutemov 				spin_lock(&sbinfo->shrinklist_lock);
1047d041353dSCong Wang 				/*
1048d041353dSCong Wang 				 * _careful to defend against unlocked access to
1049d041353dSCong Wang 				 * ->shrink_list in shmem_unused_huge_shrink()
1050d041353dSCong Wang 				 */
1051d041353dSCong Wang 				if (list_empty_careful(&info->shrinklist)) {
1052779750d2SKirill A. Shutemov 					list_add_tail(&info->shrinklist,
1053779750d2SKirill A. Shutemov 							&sbinfo->shrinklist);
1054779750d2SKirill A. Shutemov 					sbinfo->shrinklist_len++;
1055779750d2SKirill A. Shutemov 				}
1056779750d2SKirill A. Shutemov 				spin_unlock(&sbinfo->shrinklist_lock);
1057779750d2SKirill A. Shutemov 			}
105894c1e62dSHugh Dickins 		}
10591da177e4SLinus Torvalds 	}
10601da177e4SLinus Torvalds 
10616a1a90adSChristoph Hellwig 	setattr_copy(inode, attr);
1062db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1063feda821eSChristoph Hellwig 		error = posix_acl_chmod(inode, inode->i_mode);
10641da177e4SLinus Torvalds 	return error;
10651da177e4SLinus Torvalds }
10661da177e4SLinus Torvalds 
10671f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
10681da177e4SLinus Torvalds {
10691da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1070779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
10711da177e4SLinus Torvalds 
10723889e6e7Snpiggin@suse.de 	if (inode->i_mapping->a_ops == &shmem_aops) {
10731da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
10741da177e4SLinus Torvalds 		inode->i_size = 0;
10753889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1076779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1077779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1078779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1079779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1080779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1081779750d2SKirill A. Shutemov 			}
1082779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1083779750d2SKirill A. Shutemov 		}
10841da177e4SLinus Torvalds 		if (!list_empty(&info->swaplist)) {
1085cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
10861da177e4SLinus Torvalds 			list_del_init(&info->swaplist);
1087cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
10881da177e4SLinus Torvalds 		}
10893ed47db3SAl Viro 	}
1090b09e0fa4SEric Paris 
109138f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
10920f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
10935b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1094dbd5768fSJan Kara 	clear_inode(inode);
10951da177e4SLinus Torvalds }
10961da177e4SLinus Torvalds 
1097*b56a2d8aSVineeth Remanan Pillai extern struct swap_info_struct *swap_info[];
1098*b56a2d8aSVineeth Remanan Pillai 
1099*b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping,
1100*b56a2d8aSVineeth Remanan Pillai 				   pgoff_t start, unsigned int nr_entries,
1101*b56a2d8aSVineeth Remanan Pillai 				   struct page **entries, pgoff_t *indices,
1102*b56a2d8aSVineeth Remanan Pillai 				   bool frontswap)
1103478922e2SMatthew Wilcox {
1104*b56a2d8aSVineeth Remanan Pillai 	XA_STATE(xas, &mapping->i_pages, start);
1105*b56a2d8aSVineeth Remanan Pillai 	struct page *page;
1106*b56a2d8aSVineeth Remanan Pillai 	unsigned int ret = 0;
1107*b56a2d8aSVineeth Remanan Pillai 
1108*b56a2d8aSVineeth Remanan Pillai 	if (!nr_entries)
1109*b56a2d8aSVineeth Remanan Pillai 		return 0;
1110478922e2SMatthew Wilcox 
1111478922e2SMatthew Wilcox 	rcu_read_lock();
1112*b56a2d8aSVineeth Remanan Pillai 	xas_for_each(&xas, page, ULONG_MAX) {
1113*b56a2d8aSVineeth Remanan Pillai 		if (xas_retry(&xas, page))
11145b9c98f3SMike Kravetz 			continue;
1115*b56a2d8aSVineeth Remanan Pillai 
1116*b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1117478922e2SMatthew Wilcox 			continue;
1118*b56a2d8aSVineeth Remanan Pillai 
1119*b56a2d8aSVineeth Remanan Pillai 		if (frontswap) {
1120*b56a2d8aSVineeth Remanan Pillai 			swp_entry_t entry = radix_to_swp_entry(page);
1121*b56a2d8aSVineeth Remanan Pillai 
1122*b56a2d8aSVineeth Remanan Pillai 			if (!frontswap_test(swap_info[swp_type(entry)],
1123*b56a2d8aSVineeth Remanan Pillai 					    swp_offset(entry)))
1124*b56a2d8aSVineeth Remanan Pillai 				continue;
1125*b56a2d8aSVineeth Remanan Pillai 		}
1126*b56a2d8aSVineeth Remanan Pillai 
1127*b56a2d8aSVineeth Remanan Pillai 		indices[ret] = xas.xa_index;
1128*b56a2d8aSVineeth Remanan Pillai 		entries[ret] = page;
1129*b56a2d8aSVineeth Remanan Pillai 
1130*b56a2d8aSVineeth Remanan Pillai 		if (need_resched()) {
1131e21a2955SMatthew Wilcox 			xas_pause(&xas);
1132478922e2SMatthew Wilcox 			cond_resched_rcu();
1133478922e2SMatthew Wilcox 		}
1134*b56a2d8aSVineeth Remanan Pillai 		if (++ret == nr_entries)
1135*b56a2d8aSVineeth Remanan Pillai 			break;
1136*b56a2d8aSVineeth Remanan Pillai 	}
1137478922e2SMatthew Wilcox 	rcu_read_unlock();
1138e21a2955SMatthew Wilcox 
1139*b56a2d8aSVineeth Remanan Pillai 	return ret;
1140*b56a2d8aSVineeth Remanan Pillai }
1141*b56a2d8aSVineeth Remanan Pillai 
1142*b56a2d8aSVineeth Remanan Pillai /*
1143*b56a2d8aSVineeth Remanan Pillai  * Move the swapped pages for an inode to page cache. Returns the count
1144*b56a2d8aSVineeth Remanan Pillai  * of pages swapped in, or the error in case of failure.
1145*b56a2d8aSVineeth Remanan Pillai  */
1146*b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1147*b56a2d8aSVineeth Remanan Pillai 				    pgoff_t *indices)
1148*b56a2d8aSVineeth Remanan Pillai {
1149*b56a2d8aSVineeth Remanan Pillai 	int i = 0;
1150*b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
1151*b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1152*b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1153*b56a2d8aSVineeth Remanan Pillai 
1154*b56a2d8aSVineeth Remanan Pillai 	for (i = 0; i < pvec.nr; i++) {
1155*b56a2d8aSVineeth Remanan Pillai 		struct page *page = pvec.pages[i];
1156*b56a2d8aSVineeth Remanan Pillai 
1157*b56a2d8aSVineeth Remanan Pillai 		if (!xa_is_value(page))
1158*b56a2d8aSVineeth Remanan Pillai 			continue;
1159*b56a2d8aSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, indices[i],
1160*b56a2d8aSVineeth Remanan Pillai 					  &page, SGP_CACHE,
1161*b56a2d8aSVineeth Remanan Pillai 					  mapping_gfp_mask(mapping),
1162*b56a2d8aSVineeth Remanan Pillai 					  NULL, NULL);
1163*b56a2d8aSVineeth Remanan Pillai 		if (error == 0) {
1164*b56a2d8aSVineeth Remanan Pillai 			unlock_page(page);
1165*b56a2d8aSVineeth Remanan Pillai 			put_page(page);
1166*b56a2d8aSVineeth Remanan Pillai 			ret++;
1167*b56a2d8aSVineeth Remanan Pillai 		}
1168*b56a2d8aSVineeth Remanan Pillai 		if (error == -ENOMEM)
1169*b56a2d8aSVineeth Remanan Pillai 			break;
1170*b56a2d8aSVineeth Remanan Pillai 		error = 0;
1171*b56a2d8aSVineeth Remanan Pillai 	}
1172*b56a2d8aSVineeth Remanan Pillai 	return error ? error : ret;
1173478922e2SMatthew Wilcox }
1174478922e2SMatthew Wilcox 
117546f65ec1SHugh Dickins /*
117646f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
117746f65ec1SHugh Dickins  */
1178*b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1179*b56a2d8aSVineeth Remanan Pillai 			     bool frontswap, unsigned long *fs_pages_to_unuse)
11801da177e4SLinus Torvalds {
1181*b56a2d8aSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1182*b56a2d8aSVineeth Remanan Pillai 	pgoff_t start = 0;
1183*b56a2d8aSVineeth Remanan Pillai 	struct pagevec pvec;
1184*b56a2d8aSVineeth Remanan Pillai 	pgoff_t indices[PAGEVEC_SIZE];
1185*b56a2d8aSVineeth Remanan Pillai 	bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1186*b56a2d8aSVineeth Remanan Pillai 	int ret = 0;
11871da177e4SLinus Torvalds 
1188*b56a2d8aSVineeth Remanan Pillai 	pagevec_init(&pvec);
1189*b56a2d8aSVineeth Remanan Pillai 	do {
1190*b56a2d8aSVineeth Remanan Pillai 		unsigned int nr_entries = PAGEVEC_SIZE;
11912e0e26c7SHugh Dickins 
1192*b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1193*b56a2d8aSVineeth Remanan Pillai 			nr_entries = *fs_pages_to_unuse;
11942e0e26c7SHugh Dickins 
1195*b56a2d8aSVineeth Remanan Pillai 		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1196*b56a2d8aSVineeth Remanan Pillai 						  pvec.pages, indices,
1197*b56a2d8aSVineeth Remanan Pillai 						  frontswap);
1198*b56a2d8aSVineeth Remanan Pillai 		if (pvec.nr == 0) {
1199*b56a2d8aSVineeth Remanan Pillai 			ret = 0;
1200778dd893SHugh Dickins 			break;
1201*b56a2d8aSVineeth Remanan Pillai 		}
1202*b56a2d8aSVineeth Remanan Pillai 
1203*b56a2d8aSVineeth Remanan Pillai 		ret = shmem_unuse_swap_entries(inode, pvec, indices);
1204*b56a2d8aSVineeth Remanan Pillai 		if (ret < 0)
1205*b56a2d8aSVineeth Remanan Pillai 			break;
1206*b56a2d8aSVineeth Remanan Pillai 
1207*b56a2d8aSVineeth Remanan Pillai 		if (frontswap_partial) {
1208*b56a2d8aSVineeth Remanan Pillai 			*fs_pages_to_unuse -= ret;
1209*b56a2d8aSVineeth Remanan Pillai 			if (*fs_pages_to_unuse == 0) {
1210*b56a2d8aSVineeth Remanan Pillai 				ret = FRONTSWAP_PAGES_UNUSED;
1211*b56a2d8aSVineeth Remanan Pillai 				break;
1212*b56a2d8aSVineeth Remanan Pillai 			}
1213*b56a2d8aSVineeth Remanan Pillai 		}
1214*b56a2d8aSVineeth Remanan Pillai 
1215*b56a2d8aSVineeth Remanan Pillai 		start = indices[pvec.nr - 1];
1216*b56a2d8aSVineeth Remanan Pillai 	} while (true);
1217*b56a2d8aSVineeth Remanan Pillai 
1218*b56a2d8aSVineeth Remanan Pillai 	return ret;
1219*b56a2d8aSVineeth Remanan Pillai }
1220*b56a2d8aSVineeth Remanan Pillai 
1221*b56a2d8aSVineeth Remanan Pillai /*
1222*b56a2d8aSVineeth Remanan Pillai  * Read all the shared memory data that resides in the swap
1223*b56a2d8aSVineeth Remanan Pillai  * device 'type' back into memory, so the swap device can be
1224*b56a2d8aSVineeth Remanan Pillai  * unused.
1225*b56a2d8aSVineeth Remanan Pillai  */
1226*b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
1227*b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
1228*b56a2d8aSVineeth Remanan Pillai {
1229*b56a2d8aSVineeth Remanan Pillai 	struct shmem_inode_info *info, *next;
1230*b56a2d8aSVineeth Remanan Pillai 	struct inode *inode;
1231*b56a2d8aSVineeth Remanan Pillai 	struct inode *prev_inode = NULL;
1232*b56a2d8aSVineeth Remanan Pillai 	int error = 0;
1233*b56a2d8aSVineeth Remanan Pillai 
1234*b56a2d8aSVineeth Remanan Pillai 	if (list_empty(&shmem_swaplist))
1235*b56a2d8aSVineeth Remanan Pillai 		return 0;
1236*b56a2d8aSVineeth Remanan Pillai 
1237*b56a2d8aSVineeth Remanan Pillai 	mutex_lock(&shmem_swaplist_mutex);
1238*b56a2d8aSVineeth Remanan Pillai 
1239*b56a2d8aSVineeth Remanan Pillai 	/*
1240*b56a2d8aSVineeth Remanan Pillai 	 * The extra refcount on the inode is necessary to safely dereference
1241*b56a2d8aSVineeth Remanan Pillai 	 * p->next after re-acquiring the lock. New shmem inodes with swap
1242*b56a2d8aSVineeth Remanan Pillai 	 * get added to the end of the list and we will scan them all.
1243*b56a2d8aSVineeth Remanan Pillai 	 */
1244*b56a2d8aSVineeth Remanan Pillai 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1245*b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped) {
1246*b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1247*b56a2d8aSVineeth Remanan Pillai 			continue;
1248*b56a2d8aSVineeth Remanan Pillai 		}
1249*b56a2d8aSVineeth Remanan Pillai 
1250*b56a2d8aSVineeth Remanan Pillai 		inode = igrab(&info->vfs_inode);
1251*b56a2d8aSVineeth Remanan Pillai 		if (!inode)
1252*b56a2d8aSVineeth Remanan Pillai 			continue;
1253*b56a2d8aSVineeth Remanan Pillai 
1254*b56a2d8aSVineeth Remanan Pillai 		mutex_unlock(&shmem_swaplist_mutex);
1255*b56a2d8aSVineeth Remanan Pillai 		if (prev_inode)
1256*b56a2d8aSVineeth Remanan Pillai 			iput(prev_inode);
1257*b56a2d8aSVineeth Remanan Pillai 		prev_inode = inode;
1258*b56a2d8aSVineeth Remanan Pillai 
1259*b56a2d8aSVineeth Remanan Pillai 		error = shmem_unuse_inode(inode, type, frontswap,
1260*b56a2d8aSVineeth Remanan Pillai 					  fs_pages_to_unuse);
1261*b56a2d8aSVineeth Remanan Pillai 		cond_resched();
1262*b56a2d8aSVineeth Remanan Pillai 
1263*b56a2d8aSVineeth Remanan Pillai 		mutex_lock(&shmem_swaplist_mutex);
1264*b56a2d8aSVineeth Remanan Pillai 		next = list_next_entry(info, swaplist);
1265*b56a2d8aSVineeth Remanan Pillai 		if (!info->swapped)
1266*b56a2d8aSVineeth Remanan Pillai 			list_del_init(&info->swaplist);
1267*b56a2d8aSVineeth Remanan Pillai 		if (error)
1268*b56a2d8aSVineeth Remanan Pillai 			break;
12691da177e4SLinus Torvalds 	}
1270cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1271778dd893SHugh Dickins 
1272*b56a2d8aSVineeth Remanan Pillai 	if (prev_inode)
1273*b56a2d8aSVineeth Remanan Pillai 		iput(prev_inode);
1274*b56a2d8aSVineeth Remanan Pillai 
1275778dd893SHugh Dickins 	return error;
12761da177e4SLinus Torvalds }
12771da177e4SLinus Torvalds 
12781da177e4SLinus Torvalds /*
12791da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
12801da177e4SLinus Torvalds  */
12811da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
12821da177e4SLinus Torvalds {
12831da177e4SLinus Torvalds 	struct shmem_inode_info *info;
12841da177e4SLinus Torvalds 	struct address_space *mapping;
12851da177e4SLinus Torvalds 	struct inode *inode;
12866922c0c7SHugh Dickins 	swp_entry_t swap;
12876922c0c7SHugh Dickins 	pgoff_t index;
12881da177e4SLinus Torvalds 
1289800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
12901da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
12911da177e4SLinus Torvalds 	mapping = page->mapping;
12921da177e4SLinus Torvalds 	index = page->index;
12931da177e4SLinus Torvalds 	inode = mapping->host;
12941da177e4SLinus Torvalds 	info = SHMEM_I(inode);
12951da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
12961da177e4SLinus Torvalds 		goto redirty;
1297d9fe526aSHugh Dickins 	if (!total_swap_pages)
12981da177e4SLinus Torvalds 		goto redirty;
12991da177e4SLinus Torvalds 
1300d9fe526aSHugh Dickins 	/*
130197b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
130297b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
130397b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
130497b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
130597b713baSChristoph Hellwig 	 * threads or sync.
1306d9fe526aSHugh Dickins 	 */
130748f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
130848f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
130948f170fbSHugh Dickins 		goto redirty;
131048f170fbSHugh Dickins 	}
13111635f6a7SHugh Dickins 
13121635f6a7SHugh Dickins 	/*
13131635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13141635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
13151635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
13161aac1400SHugh Dickins 	 *
13171aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
13181aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
13191aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
13201aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
13211aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
13221635f6a7SHugh Dickins 	 */
13231635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
13241aac1400SHugh Dickins 		if (inode->i_private) {
13251aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
13261aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
13271aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
13281aac1400SHugh Dickins 			if (shmem_falloc &&
13298e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
13301aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
13311aac1400SHugh Dickins 			    index < shmem_falloc->next)
13321aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
13331aac1400SHugh Dickins 			else
13341aac1400SHugh Dickins 				shmem_falloc = NULL;
13351aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
13361aac1400SHugh Dickins 			if (shmem_falloc)
13371aac1400SHugh Dickins 				goto redirty;
13381aac1400SHugh Dickins 		}
13391635f6a7SHugh Dickins 		clear_highpage(page);
13401635f6a7SHugh Dickins 		flush_dcache_page(page);
13411635f6a7SHugh Dickins 		SetPageUptodate(page);
13421635f6a7SHugh Dickins 	}
13431635f6a7SHugh Dickins 
134438d8b4e6SHuang Ying 	swap = get_swap_page(page);
134548f170fbSHugh Dickins 	if (!swap.val)
134648f170fbSHugh Dickins 		goto redirty;
1347d9fe526aSHugh Dickins 
1348b1dea800SHugh Dickins 	/*
1349b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
13506922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
13516922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1352b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
13536922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
13546922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1355b1dea800SHugh Dickins 	 */
1356b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
135705bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
1358*b56a2d8aSVineeth Remanan Pillai 		list_add(&info->swaplist, &shmem_swaplist);
1359b1dea800SHugh Dickins 
136048f170fbSHugh Dickins 	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
13614595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1362267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1363267a4c76SHugh Dickins 		info->swapped++;
13644595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1365267a4c76SHugh Dickins 
1366aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
13676922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
13686922c0c7SHugh Dickins 
13696922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1370d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
13719fab5619SHugh Dickins 		swap_writepage(page, wbc);
13721da177e4SLinus Torvalds 		return 0;
13731da177e4SLinus Torvalds 	}
13741da177e4SLinus Torvalds 
13756922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
137675f6d6d2SMinchan Kim 	put_swap_page(page, swap);
13771da177e4SLinus Torvalds redirty:
13781da177e4SLinus Torvalds 	set_page_dirty(page);
1379d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1380d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1381d9fe526aSHugh Dickins 	unlock_page(page);
1382d9fe526aSHugh Dickins 	return 0;
13831da177e4SLinus Torvalds }
13841da177e4SLinus Torvalds 
138575edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
138671fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1387680d794bSakpm@linux-foundation.org {
1388680d794bSakpm@linux-foundation.org 	char buffer[64];
1389680d794bSakpm@linux-foundation.org 
139071fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1391095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1392095f1fc4SLee Schermerhorn 
1393a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1394095f1fc4SLee Schermerhorn 
1395095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1396680d794bSakpm@linux-foundation.org }
139771fe804bSLee Schermerhorn 
139871fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
139971fe804bSLee Schermerhorn {
140071fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
140171fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
140271fe804bSLee Schermerhorn 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
140371fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
140471fe804bSLee Schermerhorn 		mpol_get(mpol);
140571fe804bSLee Schermerhorn 		spin_unlock(&sbinfo->stat_lock);
140671fe804bSLee Schermerhorn 	}
140771fe804bSLee Schermerhorn 	return mpol;
140871fe804bSLee Schermerhorn }
140975edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
141075edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
141175edd345SHugh Dickins {
141275edd345SHugh Dickins }
141375edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
141475edd345SHugh Dickins {
141575edd345SHugh Dickins 	return NULL;
141675edd345SHugh Dickins }
141775edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
141875edd345SHugh Dickins #ifndef CONFIG_NUMA
141975edd345SHugh Dickins #define vm_policy vm_private_data
142075edd345SHugh Dickins #endif
1421680d794bSakpm@linux-foundation.org 
1422800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1423800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1424800d8c63SKirill A. Shutemov {
1425800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
14262c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1427800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1428800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1429800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1430800d8c63SKirill A. Shutemov }
1431800d8c63SKirill A. Shutemov 
1432800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1433800d8c63SKirill A. Shutemov {
1434800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1435800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1436800d8c63SKirill A. Shutemov }
1437800d8c63SKirill A. Shutemov 
143841ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
143941ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
14401da177e4SLinus Torvalds {
14411da177e4SLinus Torvalds 	struct vm_area_struct pvma;
144218a2f371SMel Gorman 	struct page *page;
1443e9e9b7ecSMinchan Kim 	struct vm_fault vmf;
14441da177e4SLinus Torvalds 
1445800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1446e9e9b7ecSMinchan Kim 	vmf.vma = &pvma;
1447e9e9b7ecSMinchan Kim 	vmf.address = 0;
1448e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1449800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
145018a2f371SMel Gorman 
1451800d8c63SKirill A. Shutemov 	return page;
1452800d8c63SKirill A. Shutemov }
145318a2f371SMel Gorman 
1454800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp,
1455800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1456800d8c63SKirill A. Shutemov {
1457800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
14587b8d046fSMatthew Wilcox 	struct address_space *mapping = info->vfs_inode.i_mapping;
14597b8d046fSMatthew Wilcox 	pgoff_t hindex;
1460800d8c63SKirill A. Shutemov 	struct page *page;
1461800d8c63SKirill A. Shutemov 
1462e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1463800d8c63SKirill A. Shutemov 		return NULL;
1464800d8c63SKirill A. Shutemov 
14654620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
14667b8d046fSMatthew Wilcox 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
14677b8d046fSMatthew Wilcox 								XA_PRESENT))
1468800d8c63SKirill A. Shutemov 		return NULL;
1469800d8c63SKirill A. Shutemov 
1470800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1471800d8c63SKirill A. Shutemov 	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1472356ff8a9SDavid Rientjes 			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1473800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1474800d8c63SKirill A. Shutemov 	if (page)
1475800d8c63SKirill A. Shutemov 		prep_transhuge_page(page);
147618a2f371SMel Gorman 	return page;
147718a2f371SMel Gorman }
147818a2f371SMel Gorman 
147918a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp,
148018a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
148118a2f371SMel Gorman {
148218a2f371SMel Gorman 	struct vm_area_struct pvma;
148318a2f371SMel Gorman 	struct page *page;
148418a2f371SMel Gorman 
1485800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1486800d8c63SKirill A. Shutemov 	page = alloc_page_vma(gfp, &pvma, 0);
1487800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
148818a2f371SMel Gorman 
1489800d8c63SKirill A. Shutemov 	return page;
1490800d8c63SKirill A. Shutemov }
1491800d8c63SKirill A. Shutemov 
1492800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
14930f079694SMike Rapoport 		struct inode *inode,
1494800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1495800d8c63SKirill A. Shutemov {
14960f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
1497800d8c63SKirill A. Shutemov 	struct page *page;
1498800d8c63SKirill A. Shutemov 	int nr;
1499800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1500800d8c63SKirill A. Shutemov 
1501e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1502800d8c63SKirill A. Shutemov 		huge = false;
1503800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1504800d8c63SKirill A. Shutemov 
15050f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1506800d8c63SKirill A. Shutemov 		goto failed;
1507800d8c63SKirill A. Shutemov 
1508800d8c63SKirill A. Shutemov 	if (huge)
1509800d8c63SKirill A. Shutemov 		page = shmem_alloc_hugepage(gfp, info, index);
1510800d8c63SKirill A. Shutemov 	else
1511800d8c63SKirill A. Shutemov 		page = shmem_alloc_page(gfp, info, index);
151275edd345SHugh Dickins 	if (page) {
151375edd345SHugh Dickins 		__SetPageLocked(page);
151475edd345SHugh Dickins 		__SetPageSwapBacked(page);
1515800d8c63SKirill A. Shutemov 		return page;
151675edd345SHugh Dickins 	}
151718a2f371SMel Gorman 
1518800d8c63SKirill A. Shutemov 	err = -ENOMEM;
15190f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1520800d8c63SKirill A. Shutemov failed:
1521800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
15221da177e4SLinus Torvalds }
152371fe804bSLee Schermerhorn 
15241da177e4SLinus Torvalds /*
1525bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1526bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1527bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1528bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1529bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1530bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1531bde05d1cSHugh Dickins  *
1532bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1533bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1534bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1535bde05d1cSHugh Dickins  */
1536bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1537bde05d1cSHugh Dickins {
1538bde05d1cSHugh Dickins 	return page_zonenum(page) > gfp_zone(gfp);
1539bde05d1cSHugh Dickins }
1540bde05d1cSHugh Dickins 
1541bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1542bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1543bde05d1cSHugh Dickins {
1544bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
1545bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1546c1cb20d4SYu Zhao 	swp_entry_t entry;
1547bde05d1cSHugh Dickins 	pgoff_t swap_index;
1548bde05d1cSHugh Dickins 	int error;
1549bde05d1cSHugh Dickins 
1550bde05d1cSHugh Dickins 	oldpage = *pagep;
1551c1cb20d4SYu Zhao 	entry.val = page_private(oldpage);
1552c1cb20d4SYu Zhao 	swap_index = swp_offset(entry);
1553bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1554bde05d1cSHugh Dickins 
1555bde05d1cSHugh Dickins 	/*
1556bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1557bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1558bde05d1cSHugh Dickins 	 */
1559bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1560bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1561bde05d1cSHugh Dickins 	if (!newpage)
1562bde05d1cSHugh Dickins 		return -ENOMEM;
1563bde05d1cSHugh Dickins 
156409cbfeafSKirill A. Shutemov 	get_page(newpage);
1565bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
15660142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1567bde05d1cSHugh Dickins 
15689956edf3SHugh Dickins 	__SetPageLocked(newpage);
15699956edf3SHugh Dickins 	__SetPageSwapBacked(newpage);
1570bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1571c1cb20d4SYu Zhao 	set_page_private(newpage, entry.val);
1572bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1573bde05d1cSHugh Dickins 
1574bde05d1cSHugh Dickins 	/*
1575bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1576bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1577bde05d1cSHugh Dickins 	 */
1578b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
157962f945b6SMatthew Wilcox 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
15800142ef6cSHugh Dickins 	if (!error) {
158111fb9989SMel Gorman 		__inc_node_page_state(newpage, NR_FILE_PAGES);
158211fb9989SMel Gorman 		__dec_node_page_state(oldpage, NR_FILE_PAGES);
15830142ef6cSHugh Dickins 	}
1584b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1585bde05d1cSHugh Dickins 
15860142ef6cSHugh Dickins 	if (unlikely(error)) {
15870142ef6cSHugh Dickins 		/*
15880142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
15890142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
15900142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
15910142ef6cSHugh Dickins 		 */
15920142ef6cSHugh Dickins 		oldpage = newpage;
15930142ef6cSHugh Dickins 	} else {
15946a93ca8fSJohannes Weiner 		mem_cgroup_migrate(oldpage, newpage);
1595bde05d1cSHugh Dickins 		lru_cache_add_anon(newpage);
15960142ef6cSHugh Dickins 		*pagep = newpage;
15970142ef6cSHugh Dickins 	}
1598bde05d1cSHugh Dickins 
1599bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1600bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1601bde05d1cSHugh Dickins 
1602bde05d1cSHugh Dickins 	unlock_page(oldpage);
160309cbfeafSKirill A. Shutemov 	put_page(oldpage);
160409cbfeafSKirill A. Shutemov 	put_page(oldpage);
16050142ef6cSHugh Dickins 	return error;
1606bde05d1cSHugh Dickins }
1607bde05d1cSHugh Dickins 
1608bde05d1cSHugh Dickins /*
1609c5bf121eSVineeth Remanan Pillai  * Swap in the page pointed to by *pagep.
1610c5bf121eSVineeth Remanan Pillai  * Caller has to make sure that *pagep contains a valid swapped page.
1611c5bf121eSVineeth Remanan Pillai  * Returns 0 and the page in pagep if success. On failure, returns the
1612c5bf121eSVineeth Remanan Pillai  * the error code and NULL in *pagep.
16131da177e4SLinus Torvalds  */
1614c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1615c5bf121eSVineeth Remanan Pillai 			     struct page **pagep, enum sgp_type sgp,
1616c5bf121eSVineeth Remanan Pillai 			     gfp_t gfp, struct vm_area_struct *vma,
16172b740303SSouptick Joarder 			     vm_fault_t *fault_type)
16181da177e4SLinus Torvalds {
16191da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
162023f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
1621c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
162200501b53SJohannes Weiner 	struct mem_cgroup *memcg;
162327ab7006SHugh Dickins 	struct page *page;
16241da177e4SLinus Torvalds 	swp_entry_t swap;
16251da177e4SLinus Torvalds 	int error;
16261da177e4SLinus Torvalds 
1627c5bf121eSVineeth Remanan Pillai 	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1628c5bf121eSVineeth Remanan Pillai 	swap = radix_to_swp_entry(*pagep);
1629c5bf121eSVineeth Remanan Pillai 	*pagep = NULL;
163054af6042SHugh Dickins 
16311da177e4SLinus Torvalds 	/* Look it up and read it in.. */
1632ec560175SHuang Ying 	page = lookup_swap_cache(swap, NULL, 0);
163327ab7006SHugh Dickins 	if (!page) {
16349e18eb29SAndres Lagar-Cavilla 		/* Or update major stats only when swapin succeeds?? */
16359e18eb29SAndres Lagar-Cavilla 		if (fault_type) {
163668da9f05SHugh Dickins 			*fault_type |= VM_FAULT_MAJOR;
16379e18eb29SAndres Lagar-Cavilla 			count_vm_event(PGMAJFAULT);
16382262185cSRoman Gushchin 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
16399e18eb29SAndres Lagar-Cavilla 		}
16409e18eb29SAndres Lagar-Cavilla 		/* Here we actually start the io */
164141ffe5d5SHugh Dickins 		page = shmem_swapin(swap, gfp, info, index);
164227ab7006SHugh Dickins 		if (!page) {
16431da177e4SLinus Torvalds 			error = -ENOMEM;
164454af6042SHugh Dickins 			goto failed;
1645285b2c4fSHugh Dickins 		}
16461da177e4SLinus Torvalds 	}
16471da177e4SLinus Torvalds 
16481da177e4SLinus Torvalds 	/* We have to do this with page locked to prevent races */
164954af6042SHugh Dickins 	lock_page(page);
16500142ef6cSHugh Dickins 	if (!PageSwapCache(page) || page_private(page) != swap.val ||
1651d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap)) {
1652c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1653d1899228SHugh Dickins 		goto unlock;
1654bde05d1cSHugh Dickins 	}
165527ab7006SHugh Dickins 	if (!PageUptodate(page)) {
16561da177e4SLinus Torvalds 		error = -EIO;
165754af6042SHugh Dickins 		goto failed;
165854af6042SHugh Dickins 	}
165954af6042SHugh Dickins 	wait_on_page_writeback(page);
166054af6042SHugh Dickins 
1661bde05d1cSHugh Dickins 	if (shmem_should_replace_page(page, gfp)) {
1662bde05d1cSHugh Dickins 		error = shmem_replace_page(&page, gfp, info, index);
1663bde05d1cSHugh Dickins 		if (error)
166454af6042SHugh Dickins 			goto failed;
16651da177e4SLinus Torvalds 	}
16661da177e4SLinus Torvalds 
16672cf85583STejun Heo 	error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1668f627c2f5SKirill A. Shutemov 					    false);
1669d1899228SHugh Dickins 	if (!error) {
167054af6042SHugh Dickins 		error = shmem_add_to_page_cache(page, mapping, index,
1671552446a4SMatthew Wilcox 						swp_to_radix_entry(swap), gfp);
1672215c02bcSHugh Dickins 		/*
1673215c02bcSHugh Dickins 		 * We already confirmed swap under page lock, and make
1674215c02bcSHugh Dickins 		 * no memory allocation here, so usually no possibility
1675215c02bcSHugh Dickins 		 * of error; but free_swap_and_cache() only trylocks a
1676215c02bcSHugh Dickins 		 * page, so it is just possible that the entry has been
1677215c02bcSHugh Dickins 		 * truncated or holepunched since swap was confirmed.
1678215c02bcSHugh Dickins 		 * shmem_undo_range() will have done some of the
1679215c02bcSHugh Dickins 		 * unaccounting, now delete_from_swap_cache() will do
168093aa7d95SVladimir Davydov 		 * the rest.
1681215c02bcSHugh Dickins 		 */
168200501b53SJohannes Weiner 		if (error) {
1683f627c2f5SKirill A. Shutemov 			mem_cgroup_cancel_charge(page, memcg, false);
1684215c02bcSHugh Dickins 			delete_from_swap_cache(page);
1685d1899228SHugh Dickins 		}
168600501b53SJohannes Weiner 	}
168754af6042SHugh Dickins 	if (error)
168854af6042SHugh Dickins 		goto failed;
168954af6042SHugh Dickins 
1690f627c2f5SKirill A. Shutemov 	mem_cgroup_commit_charge(page, memcg, true, false);
169100501b53SJohannes Weiner 
16924595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
169354af6042SHugh Dickins 	info->swapped--;
169454af6042SHugh Dickins 	shmem_recalc_inode(inode);
16954595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
169627ab7006SHugh Dickins 
169766d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
169866d2f4d2SHugh Dickins 		mark_page_accessed(page);
169966d2f4d2SHugh Dickins 
170027ab7006SHugh Dickins 	delete_from_swap_cache(page);
170127ab7006SHugh Dickins 	set_page_dirty(page);
170227ab7006SHugh Dickins 	swap_free(swap);
170327ab7006SHugh Dickins 
1704c5bf121eSVineeth Remanan Pillai 	*pagep = page;
1705c5bf121eSVineeth Remanan Pillai 	return 0;
1706c5bf121eSVineeth Remanan Pillai failed:
1707c5bf121eSVineeth Remanan Pillai 	if (!shmem_confirm_swap(mapping, index, swap))
1708c5bf121eSVineeth Remanan Pillai 		error = -EEXIST;
1709c5bf121eSVineeth Remanan Pillai unlock:
1710c5bf121eSVineeth Remanan Pillai 	if (page) {
1711c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1712c5bf121eSVineeth Remanan Pillai 		put_page(page);
1713c5bf121eSVineeth Remanan Pillai 	}
1714c5bf121eSVineeth Remanan Pillai 
1715c5bf121eSVineeth Remanan Pillai 	return error;
1716c5bf121eSVineeth Remanan Pillai }
1717c5bf121eSVineeth Remanan Pillai 
1718c5bf121eSVineeth Remanan Pillai /*
1719c5bf121eSVineeth Remanan Pillai  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1720c5bf121eSVineeth Remanan Pillai  *
1721c5bf121eSVineeth Remanan Pillai  * If we allocate a new one we do not mark it dirty. That's up to the
1722c5bf121eSVineeth Remanan Pillai  * vm. If we swap it in we mark it dirty since we also free the swap
1723c5bf121eSVineeth Remanan Pillai  * entry since a page cannot live in both the swap and page cache.
1724c5bf121eSVineeth Remanan Pillai  *
1725c5bf121eSVineeth Remanan Pillai  * fault_mm and fault_type are only supplied by shmem_fault:
1726c5bf121eSVineeth Remanan Pillai  * otherwise they are NULL.
1727c5bf121eSVineeth Remanan Pillai  */
1728c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1729c5bf121eSVineeth Remanan Pillai 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1730c5bf121eSVineeth Remanan Pillai 	struct vm_area_struct *vma, struct vm_fault *vmf,
1731c5bf121eSVineeth Remanan Pillai 			vm_fault_t *fault_type)
1732c5bf121eSVineeth Remanan Pillai {
1733c5bf121eSVineeth Remanan Pillai 	struct address_space *mapping = inode->i_mapping;
1734c5bf121eSVineeth Remanan Pillai 	struct shmem_inode_info *info = SHMEM_I(inode);
1735c5bf121eSVineeth Remanan Pillai 	struct shmem_sb_info *sbinfo;
1736c5bf121eSVineeth Remanan Pillai 	struct mm_struct *charge_mm;
1737c5bf121eSVineeth Remanan Pillai 	struct mem_cgroup *memcg;
1738c5bf121eSVineeth Remanan Pillai 	struct page *page;
1739c5bf121eSVineeth Remanan Pillai 	enum sgp_type sgp_huge = sgp;
1740c5bf121eSVineeth Remanan Pillai 	pgoff_t hindex = index;
1741c5bf121eSVineeth Remanan Pillai 	int error;
1742c5bf121eSVineeth Remanan Pillai 	int once = 0;
1743c5bf121eSVineeth Remanan Pillai 	int alloced = 0;
1744c5bf121eSVineeth Remanan Pillai 
1745c5bf121eSVineeth Remanan Pillai 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1746c5bf121eSVineeth Remanan Pillai 		return -EFBIG;
1747c5bf121eSVineeth Remanan Pillai 	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1748c5bf121eSVineeth Remanan Pillai 		sgp = SGP_CACHE;
1749c5bf121eSVineeth Remanan Pillai repeat:
1750c5bf121eSVineeth Remanan Pillai 	if (sgp <= SGP_CACHE &&
1751c5bf121eSVineeth Remanan Pillai 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1752c5bf121eSVineeth Remanan Pillai 		return -EINVAL;
1753c5bf121eSVineeth Remanan Pillai 	}
1754c5bf121eSVineeth Remanan Pillai 
1755c5bf121eSVineeth Remanan Pillai 	sbinfo = SHMEM_SB(inode->i_sb);
1756c5bf121eSVineeth Remanan Pillai 	charge_mm = vma ? vma->vm_mm : current->mm;
1757c5bf121eSVineeth Remanan Pillai 
1758c5bf121eSVineeth Remanan Pillai 	page = find_lock_entry(mapping, index);
1759c5bf121eSVineeth Remanan Pillai 	if (xa_is_value(page)) {
1760c5bf121eSVineeth Remanan Pillai 		error = shmem_swapin_page(inode, index, &page,
1761c5bf121eSVineeth Remanan Pillai 					  sgp, gfp, vma, fault_type);
1762c5bf121eSVineeth Remanan Pillai 		if (error == -EEXIST)
1763c5bf121eSVineeth Remanan Pillai 			goto repeat;
1764c5bf121eSVineeth Remanan Pillai 
1765c5bf121eSVineeth Remanan Pillai 		*pagep = page;
1766c5bf121eSVineeth Remanan Pillai 		return error;
1767c5bf121eSVineeth Remanan Pillai 	}
1768c5bf121eSVineeth Remanan Pillai 
1769c5bf121eSVineeth Remanan Pillai 	if (page && sgp == SGP_WRITE)
1770c5bf121eSVineeth Remanan Pillai 		mark_page_accessed(page);
1771c5bf121eSVineeth Remanan Pillai 
1772c5bf121eSVineeth Remanan Pillai 	/* fallocated page? */
1773c5bf121eSVineeth Remanan Pillai 	if (page && !PageUptodate(page)) {
1774c5bf121eSVineeth Remanan Pillai 		if (sgp != SGP_READ)
1775c5bf121eSVineeth Remanan Pillai 			goto clear;
1776c5bf121eSVineeth Remanan Pillai 		unlock_page(page);
1777c5bf121eSVineeth Remanan Pillai 		put_page(page);
1778c5bf121eSVineeth Remanan Pillai 		page = NULL;
1779c5bf121eSVineeth Remanan Pillai 	}
1780c5bf121eSVineeth Remanan Pillai 	if (page || sgp == SGP_READ) {
1781c5bf121eSVineeth Remanan Pillai 		*pagep = page;
1782c5bf121eSVineeth Remanan Pillai 		return 0;
1783c5bf121eSVineeth Remanan Pillai 	}
1784c5bf121eSVineeth Remanan Pillai 
1785c5bf121eSVineeth Remanan Pillai 	/*
1786c5bf121eSVineeth Remanan Pillai 	 * Fast cache lookup did not find it:
1787c5bf121eSVineeth Remanan Pillai 	 * bring it back from swap or allocate.
1788c5bf121eSVineeth Remanan Pillai 	 */
1789c5bf121eSVineeth Remanan Pillai 
1790cfda0526SMike Rapoport 	if (vma && userfaultfd_missing(vma)) {
1791cfda0526SMike Rapoport 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1792cfda0526SMike Rapoport 		return 0;
1793cfda0526SMike Rapoport 	}
1794cfda0526SMike Rapoport 
1795800d8c63SKirill A. Shutemov 	/* shmem_symlink() */
1796800d8c63SKirill A. Shutemov 	if (mapping->a_ops != &shmem_aops)
1797800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1798657e3038SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1799800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1800800d8c63SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
1801800d8c63SKirill A. Shutemov 		goto alloc_huge;
1802800d8c63SKirill A. Shutemov 	switch (sbinfo->huge) {
1803800d8c63SKirill A. Shutemov 		loff_t i_size;
1804800d8c63SKirill A. Shutemov 		pgoff_t off;
1805800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
1806800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1807800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
1808800d8c63SKirill A. Shutemov 		off = round_up(index, HPAGE_PMD_NR);
1809800d8c63SKirill A. Shutemov 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1810800d8c63SKirill A. Shutemov 		if (i_size >= HPAGE_PMD_SIZE &&
1811800d8c63SKirill A. Shutemov 		    i_size >> PAGE_SHIFT >= off)
1812800d8c63SKirill A. Shutemov 			goto alloc_huge;
1813800d8c63SKirill A. Shutemov 		/* fallthrough */
1814800d8c63SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
1815657e3038SKirill A. Shutemov 		if (sgp_huge == SGP_HUGE)
1816657e3038SKirill A. Shutemov 			goto alloc_huge;
1817657e3038SKirill A. Shutemov 		/* TODO: implement fadvise() hints */
1818800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
181959a16eadSHugh Dickins 	}
18201da177e4SLinus Torvalds 
1821800d8c63SKirill A. Shutemov alloc_huge:
18220f079694SMike Rapoport 	page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1823800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1824c5bf121eSVineeth Remanan Pillai alloc_nohuge:
1825c5bf121eSVineeth Remanan Pillai 		page = shmem_alloc_and_acct_page(gfp, inode,
1826800d8c63SKirill A. Shutemov 						 index, false);
182754af6042SHugh Dickins 	}
1828800d8c63SKirill A. Shutemov 	if (IS_ERR(page)) {
1829779750d2SKirill A. Shutemov 		int retry = 5;
1830c5bf121eSVineeth Remanan Pillai 
1831800d8c63SKirill A. Shutemov 		error = PTR_ERR(page);
1832800d8c63SKirill A. Shutemov 		page = NULL;
1833779750d2SKirill A. Shutemov 		if (error != -ENOSPC)
1834c5bf121eSVineeth Remanan Pillai 			goto unlock;
1835779750d2SKirill A. Shutemov 		/*
1836c5bf121eSVineeth Remanan Pillai 		 * Try to reclaim some space by splitting a huge page
1837779750d2SKirill A. Shutemov 		 * beyond i_size on the filesystem.
1838779750d2SKirill A. Shutemov 		 */
1839779750d2SKirill A. Shutemov 		while (retry--) {
1840779750d2SKirill A. Shutemov 			int ret;
1841c5bf121eSVineeth Remanan Pillai 
1842779750d2SKirill A. Shutemov 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1843779750d2SKirill A. Shutemov 			if (ret == SHRINK_STOP)
1844779750d2SKirill A. Shutemov 				break;
1845779750d2SKirill A. Shutemov 			if (ret)
1846779750d2SKirill A. Shutemov 				goto alloc_nohuge;
1847779750d2SKirill A. Shutemov 		}
1848c5bf121eSVineeth Remanan Pillai 		goto unlock;
1849800d8c63SKirill A. Shutemov 	}
1850800d8c63SKirill A. Shutemov 
1851800d8c63SKirill A. Shutemov 	if (PageTransHuge(page))
1852800d8c63SKirill A. Shutemov 		hindex = round_down(index, HPAGE_PMD_NR);
1853800d8c63SKirill A. Shutemov 	else
1854800d8c63SKirill A. Shutemov 		hindex = index;
1855800d8c63SKirill A. Shutemov 
185666d2f4d2SHugh Dickins 	if (sgp == SGP_WRITE)
1857eb39d618SHugh Dickins 		__SetPageReferenced(page);
185866d2f4d2SHugh Dickins 
18592cf85583STejun Heo 	error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1860800d8c63SKirill A. Shutemov 					    PageTransHuge(page));
186154af6042SHugh Dickins 	if (error)
1862800d8c63SKirill A. Shutemov 		goto unacct;
1863800d8c63SKirill A. Shutemov 	error = shmem_add_to_page_cache(page, mapping, hindex,
1864552446a4SMatthew Wilcox 					NULL, gfp & GFP_RECLAIM_MASK);
1865b065b432SHugh Dickins 	if (error) {
1866800d8c63SKirill A. Shutemov 		mem_cgroup_cancel_charge(page, memcg,
1867800d8c63SKirill A. Shutemov 					 PageTransHuge(page));
1868800d8c63SKirill A. Shutemov 		goto unacct;
1869b065b432SHugh Dickins 	}
1870800d8c63SKirill A. Shutemov 	mem_cgroup_commit_charge(page, memcg, false,
1871800d8c63SKirill A. Shutemov 				 PageTransHuge(page));
187254af6042SHugh Dickins 	lru_cache_add_anon(page);
187354af6042SHugh Dickins 
18744595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
1875800d8c63SKirill A. Shutemov 	info->alloced += 1 << compound_order(page);
1876800d8c63SKirill A. Shutemov 	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
187754af6042SHugh Dickins 	shmem_recalc_inode(inode);
18784595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
18791635f6a7SHugh Dickins 	alloced = true;
188054af6042SHugh Dickins 
1881779750d2SKirill A. Shutemov 	if (PageTransHuge(page) &&
1882779750d2SKirill A. Shutemov 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1883779750d2SKirill A. Shutemov 			hindex + HPAGE_PMD_NR - 1) {
1884779750d2SKirill A. Shutemov 		/*
1885779750d2SKirill A. Shutemov 		 * Part of the huge page is beyond i_size: subject
1886779750d2SKirill A. Shutemov 		 * to shrink under memory pressure.
1887779750d2SKirill A. Shutemov 		 */
1888779750d2SKirill A. Shutemov 		spin_lock(&sbinfo->shrinklist_lock);
1889d041353dSCong Wang 		/*
1890d041353dSCong Wang 		 * _careful to defend against unlocked access to
1891d041353dSCong Wang 		 * ->shrink_list in shmem_unused_huge_shrink()
1892d041353dSCong Wang 		 */
1893d041353dSCong Wang 		if (list_empty_careful(&info->shrinklist)) {
1894779750d2SKirill A. Shutemov 			list_add_tail(&info->shrinklist,
1895779750d2SKirill A. Shutemov 				      &sbinfo->shrinklist);
1896779750d2SKirill A. Shutemov 			sbinfo->shrinklist_len++;
1897779750d2SKirill A. Shutemov 		}
1898779750d2SKirill A. Shutemov 		spin_unlock(&sbinfo->shrinklist_lock);
1899779750d2SKirill A. Shutemov 	}
1900779750d2SKirill A. Shutemov 
1901ec9516fbSHugh Dickins 	/*
19021635f6a7SHugh Dickins 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
19031635f6a7SHugh Dickins 	 */
19041635f6a7SHugh Dickins 	if (sgp == SGP_FALLOC)
19051635f6a7SHugh Dickins 		sgp = SGP_WRITE;
19061635f6a7SHugh Dickins clear:
19071635f6a7SHugh Dickins 	/*
19081635f6a7SHugh Dickins 	 * Let SGP_WRITE caller clear ends if write does not fill page;
19091635f6a7SHugh Dickins 	 * but SGP_FALLOC on a page fallocated earlier must initialize
19101635f6a7SHugh Dickins 	 * it now, lest undo on failure cancel our earlier guarantee.
1911ec9516fbSHugh Dickins 	 */
1912800d8c63SKirill A. Shutemov 	if (sgp != SGP_WRITE && !PageUptodate(page)) {
1913800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
1914800d8c63SKirill A. Shutemov 		int i;
1915800d8c63SKirill A. Shutemov 
1916800d8c63SKirill A. Shutemov 		for (i = 0; i < (1 << compound_order(head)); i++) {
1917800d8c63SKirill A. Shutemov 			clear_highpage(head + i);
1918800d8c63SKirill A. Shutemov 			flush_dcache_page(head + i);
1919800d8c63SKirill A. Shutemov 		}
1920800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
1921ec9516fbSHugh Dickins 	}
1922bde05d1cSHugh Dickins 
192354af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
192475edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
192509cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1926267a4c76SHugh Dickins 		if (alloced) {
1927267a4c76SHugh Dickins 			ClearPageDirty(page);
1928267a4c76SHugh Dickins 			delete_from_page_cache(page);
19294595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1930267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
19314595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
1932267a4c76SHugh Dickins 		}
193354af6042SHugh Dickins 		error = -EINVAL;
1934267a4c76SHugh Dickins 		goto unlock;
1935ff36b801SShaohua Li 	}
1936800d8c63SKirill A. Shutemov 	*pagep = page + index - hindex;
193754af6042SHugh Dickins 	return 0;
1938d00806b1SNick Piggin 
1939d0217ac0SNick Piggin 	/*
194054af6042SHugh Dickins 	 * Error recovery.
19411da177e4SLinus Torvalds 	 */
194254af6042SHugh Dickins unacct:
19430f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
1944800d8c63SKirill A. Shutemov 
1945800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
1946800d8c63SKirill A. Shutemov 		unlock_page(page);
1947800d8c63SKirill A. Shutemov 		put_page(page);
1948800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1949800d8c63SKirill A. Shutemov 	}
1950d1899228SHugh Dickins unlock:
195127ab7006SHugh Dickins 	if (page) {
195254af6042SHugh Dickins 		unlock_page(page);
195309cbfeafSKirill A. Shutemov 		put_page(page);
195454af6042SHugh Dickins 	}
195554af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
19564595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
195754af6042SHugh Dickins 		shmem_recalc_inode(inode);
19584595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
19591da177e4SLinus Torvalds 		goto repeat;
1960d8dc74f2SAdrian Bunk 	}
19617f4446eeSMatthew Wilcox 	if (error == -EEXIST)
196254af6042SHugh Dickins 		goto repeat;
196354af6042SHugh Dickins 	return error;
19641da177e4SLinus Torvalds }
19651da177e4SLinus Torvalds 
196610d20bd2SLinus Torvalds /*
196710d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
196810d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
196910d20bd2SLinus Torvalds  * target.
197010d20bd2SLinus Torvalds  */
1971ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
197210d20bd2SLinus Torvalds {
197310d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
19742055da97SIngo Molnar 	list_del_init(&wait->entry);
197510d20bd2SLinus Torvalds 	return ret;
197610d20bd2SLinus Torvalds }
197710d20bd2SLinus Torvalds 
197820acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
19791da177e4SLinus Torvalds {
198011bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
1981496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
19829e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1983657e3038SKirill A. Shutemov 	enum sgp_type sgp;
198420acce67SSouptick Joarder 	int err;
198520acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
19861da177e4SLinus Torvalds 
1987f00cdc6dSHugh Dickins 	/*
1988f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
1989f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
1990f00cdc6dSHugh Dickins 	 * locks writers out with its hold on i_mutex.  So refrain from
19918e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
19928e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
19938e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
19948e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
19958e205f77SHugh Dickins 	 *
19968e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
19978e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
19988e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
19998e205f77SHugh Dickins 	 *
20008e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
20018e205f77SHugh Dickins 	 * standard mutex or completion: but we cannot take i_mutex in fault,
20028e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
2003f00cdc6dSHugh Dickins 	 */
2004f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
2005f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
2006f00cdc6dSHugh Dickins 
2007f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2008f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
20098e205f77SHugh Dickins 		if (shmem_falloc &&
20108e205f77SHugh Dickins 		    shmem_falloc->waitq &&
20118e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
20128e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
20138e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
201410d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
20158e205f77SHugh Dickins 
20168e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
2017f00cdc6dSHugh Dickins 			if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
2018f00cdc6dSHugh Dickins 			   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
20198e205f77SHugh Dickins 				/* It's polite to up mmap_sem if we can */
2020f00cdc6dSHugh Dickins 				up_read(&vma->vm_mm->mmap_sem);
20218e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
2022f00cdc6dSHugh Dickins 			}
20238e205f77SHugh Dickins 
20248e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
20258e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
20268e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
20278e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20288e205f77SHugh Dickins 			schedule();
20298e205f77SHugh Dickins 
20308e205f77SHugh Dickins 			/*
20318e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
20328e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
20338e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
20348e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
20358e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
20368e205f77SHugh Dickins 			 */
20378e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
20388e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
20398e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20408e205f77SHugh Dickins 			return ret;
2041f00cdc6dSHugh Dickins 		}
20428e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2043f00cdc6dSHugh Dickins 	}
2044f00cdc6dSHugh Dickins 
2045657e3038SKirill A. Shutemov 	sgp = SGP_CACHE;
204618600332SMichal Hocko 
204718600332SMichal Hocko 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
204818600332SMichal Hocko 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2049657e3038SKirill A. Shutemov 		sgp = SGP_NOHUGE;
205018600332SMichal Hocko 	else if (vma->vm_flags & VM_HUGEPAGE)
205118600332SMichal Hocko 		sgp = SGP_HUGE;
2052657e3038SKirill A. Shutemov 
205320acce67SSouptick Joarder 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2054cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
205520acce67SSouptick Joarder 	if (err)
205620acce67SSouptick Joarder 		return vmf_error(err);
205768da9f05SHugh Dickins 	return ret;
20581da177e4SLinus Torvalds }
20591da177e4SLinus Torvalds 
2060c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2061c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2062c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2063c01d5b30SHugh Dickins {
2064c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2065c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2066c01d5b30SHugh Dickins 	unsigned long addr;
2067c01d5b30SHugh Dickins 	unsigned long offset;
2068c01d5b30SHugh Dickins 	unsigned long inflated_len;
2069c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2070c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2071c01d5b30SHugh Dickins 
2072c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2073c01d5b30SHugh Dickins 		return -ENOMEM;
2074c01d5b30SHugh Dickins 
2075c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2076c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2077c01d5b30SHugh Dickins 
2078e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2079c01d5b30SHugh Dickins 		return addr;
2080c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2081c01d5b30SHugh Dickins 		return addr;
2082c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2083c01d5b30SHugh Dickins 		return addr;
2084c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2085c01d5b30SHugh Dickins 		return addr;
2086c01d5b30SHugh Dickins 
2087c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2088c01d5b30SHugh Dickins 		return addr;
2089c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2090c01d5b30SHugh Dickins 		return addr;
2091c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2092c01d5b30SHugh Dickins 		return addr;
2093c01d5b30SHugh Dickins 	/*
2094c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2095c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2096c01d5b30SHugh Dickins 	 * But if caller specified an address hint, respect that as before.
2097c01d5b30SHugh Dickins 	 */
2098c01d5b30SHugh Dickins 	if (uaddr)
2099c01d5b30SHugh Dickins 		return addr;
2100c01d5b30SHugh Dickins 
2101c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2102c01d5b30SHugh Dickins 		struct super_block *sb;
2103c01d5b30SHugh Dickins 
2104c01d5b30SHugh Dickins 		if (file) {
2105c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2106c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2107c01d5b30SHugh Dickins 		} else {
2108c01d5b30SHugh Dickins 			/*
2109c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2110c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2111c01d5b30SHugh Dickins 			 */
2112c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2113c01d5b30SHugh Dickins 				return addr;
2114c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2115c01d5b30SHugh Dickins 		}
21163089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2117c01d5b30SHugh Dickins 			return addr;
2118c01d5b30SHugh Dickins 	}
2119c01d5b30SHugh Dickins 
2120c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2121c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2122c01d5b30SHugh Dickins 		return addr;
2123c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2124c01d5b30SHugh Dickins 		return addr;
2125c01d5b30SHugh Dickins 
2126c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2127c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2128c01d5b30SHugh Dickins 		return addr;
2129c01d5b30SHugh Dickins 	if (inflated_len < len)
2130c01d5b30SHugh Dickins 		return addr;
2131c01d5b30SHugh Dickins 
2132c01d5b30SHugh Dickins 	inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2133c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2134c01d5b30SHugh Dickins 		return addr;
2135c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2136c01d5b30SHugh Dickins 		return addr;
2137c01d5b30SHugh Dickins 
2138c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2139c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2140c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2141c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2142c01d5b30SHugh Dickins 
2143c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2144c01d5b30SHugh Dickins 		return addr;
2145c01d5b30SHugh Dickins 	return inflated_addr;
2146c01d5b30SHugh Dickins }
2147c01d5b30SHugh Dickins 
21481da177e4SLinus Torvalds #ifdef CONFIG_NUMA
214941ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
21501da177e4SLinus Torvalds {
2151496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
215241ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
21531da177e4SLinus Torvalds }
21541da177e4SLinus Torvalds 
2155d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2156d8dc74f2SAdrian Bunk 					  unsigned long addr)
21571da177e4SLinus Torvalds {
2158496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
215941ffe5d5SHugh Dickins 	pgoff_t index;
21601da177e4SLinus Torvalds 
216141ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
216241ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
21631da177e4SLinus Torvalds }
21641da177e4SLinus Torvalds #endif
21651da177e4SLinus Torvalds 
21661da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user)
21671da177e4SLinus Torvalds {
2168496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
21691da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
21701da177e4SLinus Torvalds 	int retval = -ENOMEM;
21711da177e4SLinus Torvalds 
21724595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
21731da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
21741da177e4SLinus Torvalds 		if (!user_shm_lock(inode->i_size, user))
21751da177e4SLinus Torvalds 			goto out_nomem;
21761da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
217789e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
21781da177e4SLinus Torvalds 	}
21791da177e4SLinus Torvalds 	if (!lock && (info->flags & VM_LOCKED) && user) {
21801da177e4SLinus Torvalds 		user_shm_unlock(inode->i_size, user);
21811da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
218289e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
21831da177e4SLinus Torvalds 	}
21841da177e4SLinus Torvalds 	retval = 0;
218589e004eaSLee Schermerhorn 
21861da177e4SLinus Torvalds out_nomem:
21874595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
21881da177e4SLinus Torvalds 	return retval;
21891da177e4SLinus Torvalds }
21901da177e4SLinus Torvalds 
21919b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
21921da177e4SLinus Torvalds {
21931da177e4SLinus Torvalds 	file_accessed(file);
21941da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
2195e496cf3dSKirill A. Shutemov 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2196f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2197f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
2198f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
2199f3f0e1d2SKirill A. Shutemov 	}
22001da177e4SLinus Torvalds 	return 0;
22011da177e4SLinus Torvalds }
22021da177e4SLinus Torvalds 
2203454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
220409208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
22051da177e4SLinus Torvalds {
22061da177e4SLinus Torvalds 	struct inode *inode;
22071da177e4SLinus Torvalds 	struct shmem_inode_info *info;
22081da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
22091da177e4SLinus Torvalds 
22105b04c689SPavel Emelyanov 	if (shmem_reserve_inode(sb))
22111da177e4SLinus Torvalds 		return NULL;
22121da177e4SLinus Torvalds 
22131da177e4SLinus Torvalds 	inode = new_inode(sb);
22141da177e4SLinus Torvalds 	if (inode) {
221585fe4025SChristoph Hellwig 		inode->i_ino = get_next_ino();
2216454abafeSDmitry Monakhov 		inode_init_owner(inode, dir, mode);
22171da177e4SLinus Torvalds 		inode->i_blocks = 0;
2218078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
221946c9a946SArnd Bergmann 		inode->i_generation = prandom_u32();
22201da177e4SLinus Torvalds 		info = SHMEM_I(inode);
22211da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
22221da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
222340e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
22240b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2225779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
22261da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
222738f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
222872c04902SAl Viro 		cache_no_acl(inode);
22291da177e4SLinus Torvalds 
22301da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
22311da177e4SLinus Torvalds 		default:
223239f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
22331da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
22341da177e4SLinus Torvalds 			break;
22351da177e4SLinus Torvalds 		case S_IFREG:
223614fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
22371da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
22381da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
223971fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
224071fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
22411da177e4SLinus Torvalds 			break;
22421da177e4SLinus Torvalds 		case S_IFDIR:
2243d8c76e6fSDave Hansen 			inc_nlink(inode);
22441da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
22451da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
22461da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
22471da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
22481da177e4SLinus Torvalds 			break;
22491da177e4SLinus Torvalds 		case S_IFLNK:
22501da177e4SLinus Torvalds 			/*
22511da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
22521da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
22531da177e4SLinus Torvalds 			 */
225471fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
22551da177e4SLinus Torvalds 			break;
22561da177e4SLinus Torvalds 		}
2257b45d71fbSJoel Fernandes (Google) 
2258b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
22595b04c689SPavel Emelyanov 	} else
22605b04c689SPavel Emelyanov 		shmem_free_inode(sb);
22611da177e4SLinus Torvalds 	return inode;
22621da177e4SLinus Torvalds }
22631da177e4SLinus Torvalds 
22640cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping)
22650cd6144aSJohannes Weiner {
2266f8005451SHugh Dickins 	return mapping->a_ops == &shmem_aops;
22670cd6144aSJohannes Weiner }
22680cd6144aSJohannes Weiner 
22698d103963SMike Rapoport static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
22704c27fe4cSMike Rapoport 				  pmd_t *dst_pmd,
22714c27fe4cSMike Rapoport 				  struct vm_area_struct *dst_vma,
22724c27fe4cSMike Rapoport 				  unsigned long dst_addr,
22734c27fe4cSMike Rapoport 				  unsigned long src_addr,
22748d103963SMike Rapoport 				  bool zeropage,
22754c27fe4cSMike Rapoport 				  struct page **pagep)
22764c27fe4cSMike Rapoport {
22774c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
22784c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
22794c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
22804c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
22814c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
22824c27fe4cSMike Rapoport 	struct mem_cgroup *memcg;
22834c27fe4cSMike Rapoport 	spinlock_t *ptl;
22844c27fe4cSMike Rapoport 	void *page_kaddr;
22854c27fe4cSMike Rapoport 	struct page *page;
22864c27fe4cSMike Rapoport 	pte_t _dst_pte, *dst_pte;
22874c27fe4cSMike Rapoport 	int ret;
2288e2a50c1fSAndrea Arcangeli 	pgoff_t offset, max_off;
22894c27fe4cSMike Rapoport 
22904c27fe4cSMike Rapoport 	ret = -ENOMEM;
22910f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, 1))
22924c27fe4cSMike Rapoport 		goto out;
22934c27fe4cSMike Rapoport 
2294cb658a45SAndrea Arcangeli 	if (!*pagep) {
22954c27fe4cSMike Rapoport 		page = shmem_alloc_page(gfp, info, pgoff);
22964c27fe4cSMike Rapoport 		if (!page)
22970f079694SMike Rapoport 			goto out_unacct_blocks;
22984c27fe4cSMike Rapoport 
22998d103963SMike Rapoport 		if (!zeropage) {	/* mcopy_atomic */
23004c27fe4cSMike Rapoport 			page_kaddr = kmap_atomic(page);
23018d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
23028d103963SMike Rapoport 					     (const void __user *)src_addr,
23034c27fe4cSMike Rapoport 					     PAGE_SIZE);
23044c27fe4cSMike Rapoport 			kunmap_atomic(page_kaddr);
23054c27fe4cSMike Rapoport 
23064c27fe4cSMike Rapoport 			/* fallback to copy_from_user outside mmap_sem */
23074c27fe4cSMike Rapoport 			if (unlikely(ret)) {
23084c27fe4cSMike Rapoport 				*pagep = page;
23090f079694SMike Rapoport 				shmem_inode_unacct_blocks(inode, 1);
23104c27fe4cSMike Rapoport 				/* don't free the page */
23119e368259SAndrea Arcangeli 				return -ENOENT;
23124c27fe4cSMike Rapoport 			}
23138d103963SMike Rapoport 		} else {		/* mfill_zeropage_atomic */
23148d103963SMike Rapoport 			clear_highpage(page);
23158d103963SMike Rapoport 		}
23164c27fe4cSMike Rapoport 	} else {
23174c27fe4cSMike Rapoport 		page = *pagep;
23184c27fe4cSMike Rapoport 		*pagep = NULL;
23194c27fe4cSMike Rapoport 	}
23204c27fe4cSMike Rapoport 
23219cc90c66SAndrea Arcangeli 	VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
23229cc90c66SAndrea Arcangeli 	__SetPageLocked(page);
23239cc90c66SAndrea Arcangeli 	__SetPageSwapBacked(page);
2324a425d358SAndrea Arcangeli 	__SetPageUptodate(page);
23259cc90c66SAndrea Arcangeli 
2326e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2327e2a50c1fSAndrea Arcangeli 	offset = linear_page_index(dst_vma, dst_addr);
2328e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2329e2a50c1fSAndrea Arcangeli 	if (unlikely(offset >= max_off))
2330e2a50c1fSAndrea Arcangeli 		goto out_release;
2331e2a50c1fSAndrea Arcangeli 
23322cf85583STejun Heo 	ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
23334c27fe4cSMike Rapoport 	if (ret)
23344c27fe4cSMike Rapoport 		goto out_release;
23354c27fe4cSMike Rapoport 
2336552446a4SMatthew Wilcox 	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2337552446a4SMatthew Wilcox 						gfp & GFP_RECLAIM_MASK);
23384c27fe4cSMike Rapoport 	if (ret)
23394c27fe4cSMike Rapoport 		goto out_release_uncharge;
23404c27fe4cSMike Rapoport 
23414c27fe4cSMike Rapoport 	mem_cgroup_commit_charge(page, memcg, false, false);
23424c27fe4cSMike Rapoport 
23434c27fe4cSMike Rapoport 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
23444c27fe4cSMike Rapoport 	if (dst_vma->vm_flags & VM_WRITE)
23454c27fe4cSMike Rapoport 		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2346dcf7fe9dSAndrea Arcangeli 	else {
2347dcf7fe9dSAndrea Arcangeli 		/*
2348dcf7fe9dSAndrea Arcangeli 		 * We don't set the pte dirty if the vma has no
2349dcf7fe9dSAndrea Arcangeli 		 * VM_WRITE permission, so mark the page dirty or it
2350dcf7fe9dSAndrea Arcangeli 		 * could be freed from under us. We could do it
2351dcf7fe9dSAndrea Arcangeli 		 * unconditionally before unlock_page(), but doing it
2352dcf7fe9dSAndrea Arcangeli 		 * only if VM_WRITE is not set is faster.
2353dcf7fe9dSAndrea Arcangeli 		 */
2354dcf7fe9dSAndrea Arcangeli 		set_page_dirty(page);
2355dcf7fe9dSAndrea Arcangeli 	}
23564c27fe4cSMike Rapoport 
23574c27fe4cSMike Rapoport 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2358e2a50c1fSAndrea Arcangeli 
2359e2a50c1fSAndrea Arcangeli 	ret = -EFAULT;
2360e2a50c1fSAndrea Arcangeli 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2361e2a50c1fSAndrea Arcangeli 	if (unlikely(offset >= max_off))
2362e2a50c1fSAndrea Arcangeli 		goto out_release_uncharge_unlock;
2363e2a50c1fSAndrea Arcangeli 
2364e2a50c1fSAndrea Arcangeli 	ret = -EEXIST;
23654c27fe4cSMike Rapoport 	if (!pte_none(*dst_pte))
23664c27fe4cSMike Rapoport 		goto out_release_uncharge_unlock;
23674c27fe4cSMike Rapoport 
23684c27fe4cSMike Rapoport 	lru_cache_add_anon(page);
23694c27fe4cSMike Rapoport 
23704c27fe4cSMike Rapoport 	spin_lock(&info->lock);
23714c27fe4cSMike Rapoport 	info->alloced++;
23724c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
23734c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
23744c27fe4cSMike Rapoport 	spin_unlock(&info->lock);
23754c27fe4cSMike Rapoport 
23764c27fe4cSMike Rapoport 	inc_mm_counter(dst_mm, mm_counter_file(page));
23774c27fe4cSMike Rapoport 	page_add_file_rmap(page, false);
23784c27fe4cSMike Rapoport 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
23794c27fe4cSMike Rapoport 
23804c27fe4cSMike Rapoport 	/* No need to invalidate - it was non-present before */
23814c27fe4cSMike Rapoport 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
23824c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
2383e2a50c1fSAndrea Arcangeli 	unlock_page(page);
23844c27fe4cSMike Rapoport 	ret = 0;
23854c27fe4cSMike Rapoport out:
23864c27fe4cSMike Rapoport 	return ret;
23874c27fe4cSMike Rapoport out_release_uncharge_unlock:
23884c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
2389dcf7fe9dSAndrea Arcangeli 	ClearPageDirty(page);
2390e2a50c1fSAndrea Arcangeli 	delete_from_page_cache(page);
23914c27fe4cSMike Rapoport out_release_uncharge:
23924c27fe4cSMike Rapoport 	mem_cgroup_cancel_charge(page, memcg, false);
23934c27fe4cSMike Rapoport out_release:
23949cc90c66SAndrea Arcangeli 	unlock_page(page);
23954c27fe4cSMike Rapoport 	put_page(page);
23964c27fe4cSMike Rapoport out_unacct_blocks:
23970f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
23984c27fe4cSMike Rapoport 	goto out;
23994c27fe4cSMike Rapoport }
24004c27fe4cSMike Rapoport 
24018d103963SMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
24028d103963SMike Rapoport 			   pmd_t *dst_pmd,
24038d103963SMike Rapoport 			   struct vm_area_struct *dst_vma,
24048d103963SMike Rapoport 			   unsigned long dst_addr,
24058d103963SMike Rapoport 			   unsigned long src_addr,
24068d103963SMike Rapoport 			   struct page **pagep)
24078d103963SMike Rapoport {
24088d103963SMike Rapoport 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
24098d103963SMike Rapoport 				      dst_addr, src_addr, false, pagep);
24108d103963SMike Rapoport }
24118d103963SMike Rapoport 
24128d103963SMike Rapoport int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
24138d103963SMike Rapoport 			     pmd_t *dst_pmd,
24148d103963SMike Rapoport 			     struct vm_area_struct *dst_vma,
24158d103963SMike Rapoport 			     unsigned long dst_addr)
24168d103963SMike Rapoport {
24178d103963SMike Rapoport 	struct page *page = NULL;
24188d103963SMike Rapoport 
24198d103963SMike Rapoport 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
24208d103963SMike Rapoport 				      dst_addr, 0, true, &page);
24218d103963SMike Rapoport }
24228d103963SMike Rapoport 
24231da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
242492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
242569f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
24261da177e4SLinus Torvalds 
24276d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
24286d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
24296d9d88d0SJarkko Sakkinen #else
24306d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
24316d9d88d0SJarkko Sakkinen #endif
24326d9d88d0SJarkko Sakkinen 
24331da177e4SLinus Torvalds static int
2434800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
2435800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
2436800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
24371da177e4SLinus Torvalds {
2438800d15a5SNick Piggin 	struct inode *inode = mapping->host;
243940e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
244009cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
244140e041a2SDavid Herrmann 
244240e041a2SDavid Herrmann 	/* i_mutex is held by caller */
24433f472cc9SSteven Rostedt (VMware) 	if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
244440e041a2SDavid Herrmann 		if (info->seals & F_SEAL_WRITE)
244540e041a2SDavid Herrmann 			return -EPERM;
244640e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
244740e041a2SDavid Herrmann 			return -EPERM;
244840e041a2SDavid Herrmann 	}
244940e041a2SDavid Herrmann 
24509e18eb29SAndres Lagar-Cavilla 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2451800d15a5SNick Piggin }
2452800d15a5SNick Piggin 
2453800d15a5SNick Piggin static int
2454800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2455800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2456800d15a5SNick Piggin 			struct page *page, void *fsdata)
2457800d15a5SNick Piggin {
2458800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2459800d15a5SNick Piggin 
2460800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2461800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2462800d15a5SNick Piggin 
2463ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2464800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2465800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2466800d8c63SKirill A. Shutemov 			int i;
2467800d8c63SKirill A. Shutemov 
2468800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2469800d8c63SKirill A. Shutemov 				if (head + i == page)
2470800d8c63SKirill A. Shutemov 					continue;
2471800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2472800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2473800d8c63SKirill A. Shutemov 			}
2474800d8c63SKirill A. Shutemov 		}
247509cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
247609cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2477ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
247809cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2479ec9516fbSHugh Dickins 		}
2480800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2481ec9516fbSHugh Dickins 	}
2482d3602444SHugh Dickins 	set_page_dirty(page);
24836746aff7SWu Fengguang 	unlock_page(page);
248409cbfeafSKirill A. Shutemov 	put_page(page);
2485d3602444SHugh Dickins 
2486800d15a5SNick Piggin 	return copied;
24871da177e4SLinus Torvalds }
24881da177e4SLinus Torvalds 
24892ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
24901da177e4SLinus Torvalds {
24916e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
24926e58e79dSAl Viro 	struct inode *inode = file_inode(file);
24931da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
249441ffe5d5SHugh Dickins 	pgoff_t index;
249541ffe5d5SHugh Dickins 	unsigned long offset;
2496a0ee5ec5SHugh Dickins 	enum sgp_type sgp = SGP_READ;
2497f7c1d074SGeert Uytterhoeven 	int error = 0;
2498cb66a7a1SAl Viro 	ssize_t retval = 0;
24996e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2500a0ee5ec5SHugh Dickins 
2501a0ee5ec5SHugh Dickins 	/*
2502a0ee5ec5SHugh Dickins 	 * Might this read be for a stacking filesystem?  Then when reading
2503a0ee5ec5SHugh Dickins 	 * holes of a sparse file, we actually need to allocate those pages,
2504a0ee5ec5SHugh Dickins 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2505a0ee5ec5SHugh Dickins 	 */
2506777eda2cSAl Viro 	if (!iter_is_iovec(to))
250775edd345SHugh Dickins 		sgp = SGP_CACHE;
25081da177e4SLinus Torvalds 
250909cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
251009cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
25111da177e4SLinus Torvalds 
25121da177e4SLinus Torvalds 	for (;;) {
25131da177e4SLinus Torvalds 		struct page *page = NULL;
251441ffe5d5SHugh Dickins 		pgoff_t end_index;
251541ffe5d5SHugh Dickins 		unsigned long nr, ret;
25161da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
25171da177e4SLinus Torvalds 
251809cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25191da177e4SLinus Torvalds 		if (index > end_index)
25201da177e4SLinus Torvalds 			break;
25211da177e4SLinus Torvalds 		if (index == end_index) {
252209cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25231da177e4SLinus Torvalds 			if (nr <= offset)
25241da177e4SLinus Torvalds 				break;
25251da177e4SLinus Torvalds 		}
25261da177e4SLinus Torvalds 
25279e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, index, &page, sgp);
25286e58e79dSAl Viro 		if (error) {
25296e58e79dSAl Viro 			if (error == -EINVAL)
25306e58e79dSAl Viro 				error = 0;
25311da177e4SLinus Torvalds 			break;
25321da177e4SLinus Torvalds 		}
253375edd345SHugh Dickins 		if (page) {
253475edd345SHugh Dickins 			if (sgp == SGP_CACHE)
253575edd345SHugh Dickins 				set_page_dirty(page);
2536d3602444SHugh Dickins 			unlock_page(page);
253775edd345SHugh Dickins 		}
25381da177e4SLinus Torvalds 
25391da177e4SLinus Torvalds 		/*
25401da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
25411b1dcc1bSJes Sorensen 		 * are called without i_mutex protection against truncate
25421da177e4SLinus Torvalds 		 */
254309cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
25441da177e4SLinus Torvalds 		i_size = i_size_read(inode);
254509cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
25461da177e4SLinus Torvalds 		if (index == end_index) {
254709cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
25481da177e4SLinus Torvalds 			if (nr <= offset) {
25491da177e4SLinus Torvalds 				if (page)
255009cbfeafSKirill A. Shutemov 					put_page(page);
25511da177e4SLinus Torvalds 				break;
25521da177e4SLinus Torvalds 			}
25531da177e4SLinus Torvalds 		}
25541da177e4SLinus Torvalds 		nr -= offset;
25551da177e4SLinus Torvalds 
25561da177e4SLinus Torvalds 		if (page) {
25571da177e4SLinus Torvalds 			/*
25581da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
25591da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
25601da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
25611da177e4SLinus Torvalds 			 */
25621da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
25631da177e4SLinus Torvalds 				flush_dcache_page(page);
25641da177e4SLinus Torvalds 			/*
25651da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
25661da177e4SLinus Torvalds 			 */
25671da177e4SLinus Torvalds 			if (!offset)
25681da177e4SLinus Torvalds 				mark_page_accessed(page);
2569b5810039SNick Piggin 		} else {
25701da177e4SLinus Torvalds 			page = ZERO_PAGE(0);
257109cbfeafSKirill A. Shutemov 			get_page(page);
2572b5810039SNick Piggin 		}
25731da177e4SLinus Torvalds 
25741da177e4SLinus Torvalds 		/*
25751da177e4SLinus Torvalds 		 * Ok, we have the page, and it's up-to-date, so
25761da177e4SLinus Torvalds 		 * now we can copy it to user space...
25771da177e4SLinus Torvalds 		 */
25782ba5bbedSAl Viro 		ret = copy_page_to_iter(page, offset, nr, to);
25796e58e79dSAl Viro 		retval += ret;
25801da177e4SLinus Torvalds 		offset += ret;
258109cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
258209cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
25831da177e4SLinus Torvalds 
258409cbfeafSKirill A. Shutemov 		put_page(page);
25852ba5bbedSAl Viro 		if (!iov_iter_count(to))
25861da177e4SLinus Torvalds 			break;
25876e58e79dSAl Viro 		if (ret < nr) {
25886e58e79dSAl Viro 			error = -EFAULT;
25896e58e79dSAl Viro 			break;
25906e58e79dSAl Viro 		}
25911da177e4SLinus Torvalds 		cond_resched();
25921da177e4SLinus Torvalds 	}
25931da177e4SLinus Torvalds 
259409cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
25956e58e79dSAl Viro 	file_accessed(file);
25966e58e79dSAl Viro 	return retval ? retval : error;
25971da177e4SLinus Torvalds }
25981da177e4SLinus Torvalds 
2599220f2ac9SHugh Dickins /*
26007f4446eeSMatthew Wilcox  * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2601220f2ac9SHugh Dickins  */
2602220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2603965c8e59SAndrew Morton 				    pgoff_t index, pgoff_t end, int whence)
2604220f2ac9SHugh Dickins {
2605220f2ac9SHugh Dickins 	struct page *page;
2606220f2ac9SHugh Dickins 	struct pagevec pvec;
2607220f2ac9SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
2608220f2ac9SHugh Dickins 	bool done = false;
2609220f2ac9SHugh Dickins 	int i;
2610220f2ac9SHugh Dickins 
261186679820SMel Gorman 	pagevec_init(&pvec);
2612220f2ac9SHugh Dickins 	pvec.nr = 1;		/* start small: we may be there already */
2613220f2ac9SHugh Dickins 	while (!done) {
26140cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
2615220f2ac9SHugh Dickins 					pvec.nr, pvec.pages, indices);
2616220f2ac9SHugh Dickins 		if (!pvec.nr) {
2617965c8e59SAndrew Morton 			if (whence == SEEK_DATA)
2618220f2ac9SHugh Dickins 				index = end;
2619220f2ac9SHugh Dickins 			break;
2620220f2ac9SHugh Dickins 		}
2621220f2ac9SHugh Dickins 		for (i = 0; i < pvec.nr; i++, index++) {
2622220f2ac9SHugh Dickins 			if (index < indices[i]) {
2623965c8e59SAndrew Morton 				if (whence == SEEK_HOLE) {
2624220f2ac9SHugh Dickins 					done = true;
2625220f2ac9SHugh Dickins 					break;
2626220f2ac9SHugh Dickins 				}
2627220f2ac9SHugh Dickins 				index = indices[i];
2628220f2ac9SHugh Dickins 			}
2629220f2ac9SHugh Dickins 			page = pvec.pages[i];
26303159f943SMatthew Wilcox 			if (page && !xa_is_value(page)) {
2631220f2ac9SHugh Dickins 				if (!PageUptodate(page))
2632220f2ac9SHugh Dickins 					page = NULL;
2633220f2ac9SHugh Dickins 			}
2634220f2ac9SHugh Dickins 			if (index >= end ||
2635965c8e59SAndrew Morton 			    (page && whence == SEEK_DATA) ||
2636965c8e59SAndrew Morton 			    (!page && whence == SEEK_HOLE)) {
2637220f2ac9SHugh Dickins 				done = true;
2638220f2ac9SHugh Dickins 				break;
2639220f2ac9SHugh Dickins 			}
2640220f2ac9SHugh Dickins 		}
26410cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
2642220f2ac9SHugh Dickins 		pagevec_release(&pvec);
2643220f2ac9SHugh Dickins 		pvec.nr = PAGEVEC_SIZE;
2644220f2ac9SHugh Dickins 		cond_resched();
2645220f2ac9SHugh Dickins 	}
2646220f2ac9SHugh Dickins 	return index;
2647220f2ac9SHugh Dickins }
2648220f2ac9SHugh Dickins 
2649965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2650220f2ac9SHugh Dickins {
2651220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2652220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2653220f2ac9SHugh Dickins 	pgoff_t start, end;
2654220f2ac9SHugh Dickins 	loff_t new_offset;
2655220f2ac9SHugh Dickins 
2656965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2657965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2658220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
26595955102cSAl Viro 	inode_lock(inode);
2660220f2ac9SHugh Dickins 	/* We're holding i_mutex so we can access i_size directly */
2661220f2ac9SHugh Dickins 
26621a413646SYufen Yu 	if (offset < 0 || offset >= inode->i_size)
2663220f2ac9SHugh Dickins 		offset = -ENXIO;
2664220f2ac9SHugh Dickins 	else {
266509cbfeafSKirill A. Shutemov 		start = offset >> PAGE_SHIFT;
266609cbfeafSKirill A. Shutemov 		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2667965c8e59SAndrew Morton 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
266809cbfeafSKirill A. Shutemov 		new_offset <<= PAGE_SHIFT;
2669220f2ac9SHugh Dickins 		if (new_offset > offset) {
2670220f2ac9SHugh Dickins 			if (new_offset < inode->i_size)
2671220f2ac9SHugh Dickins 				offset = new_offset;
2672965c8e59SAndrew Morton 			else if (whence == SEEK_DATA)
2673220f2ac9SHugh Dickins 				offset = -ENXIO;
2674220f2ac9SHugh Dickins 			else
2675220f2ac9SHugh Dickins 				offset = inode->i_size;
2676220f2ac9SHugh Dickins 		}
2677220f2ac9SHugh Dickins 	}
2678220f2ac9SHugh Dickins 
2679387aae6fSHugh Dickins 	if (offset >= 0)
268046a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
26815955102cSAl Viro 	inode_unlock(inode);
2682220f2ac9SHugh Dickins 	return offset;
2683220f2ac9SHugh Dickins }
2684220f2ac9SHugh Dickins 
268583e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
268683e4fa9cSHugh Dickins 							 loff_t len)
268783e4fa9cSHugh Dickins {
2688496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2689e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
269040e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
26911aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2692e2d12e22SHugh Dickins 	pgoff_t start, index, end;
2693e2d12e22SHugh Dickins 	int error;
269483e4fa9cSHugh Dickins 
269513ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
269613ace4d0SHugh Dickins 		return -EOPNOTSUPP;
269713ace4d0SHugh Dickins 
26985955102cSAl Viro 	inode_lock(inode);
269983e4fa9cSHugh Dickins 
270083e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
270183e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
270283e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
270383e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
27048e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
270583e4fa9cSHugh Dickins 
270640e041a2SDavid Herrmann 		/* protected by i_mutex */
270740e041a2SDavid Herrmann 		if (info->seals & F_SEAL_WRITE) {
270840e041a2SDavid Herrmann 			error = -EPERM;
270940e041a2SDavid Herrmann 			goto out;
271040e041a2SDavid Herrmann 		}
271140e041a2SDavid Herrmann 
27128e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2713f00cdc6dSHugh Dickins 		shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2714f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2715f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2716f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2717f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2718f00cdc6dSHugh Dickins 
271983e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
272083e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
272183e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
272283e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
272383e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
27248e205f77SHugh Dickins 
27258e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
27268e205f77SHugh Dickins 		inode->i_private = NULL;
27278e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
27282055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
27298e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
273083e4fa9cSHugh Dickins 		error = 0;
27318e205f77SHugh Dickins 		goto out;
273283e4fa9cSHugh Dickins 	}
273383e4fa9cSHugh Dickins 
2734e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2735e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2736e2d12e22SHugh Dickins 	if (error)
2737e2d12e22SHugh Dickins 		goto out;
2738e2d12e22SHugh Dickins 
273940e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
274040e041a2SDavid Herrmann 		error = -EPERM;
274140e041a2SDavid Herrmann 		goto out;
274240e041a2SDavid Herrmann 	}
274340e041a2SDavid Herrmann 
274409cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
274509cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2746e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2747e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2748e2d12e22SHugh Dickins 		error = -ENOSPC;
2749e2d12e22SHugh Dickins 		goto out;
2750e2d12e22SHugh Dickins 	}
2751e2d12e22SHugh Dickins 
27528e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
27531aac1400SHugh Dickins 	shmem_falloc.start = start;
27541aac1400SHugh Dickins 	shmem_falloc.next  = start;
27551aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
27561aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
27571aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27581aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
27591aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
27601aac1400SHugh Dickins 
2761e2d12e22SHugh Dickins 	for (index = start; index < end; index++) {
2762e2d12e22SHugh Dickins 		struct page *page;
2763e2d12e22SHugh Dickins 
2764e2d12e22SHugh Dickins 		/*
2765e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2766e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2767e2d12e22SHugh Dickins 		 */
2768e2d12e22SHugh Dickins 		if (signal_pending(current))
2769e2d12e22SHugh Dickins 			error = -EINTR;
27701aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
27711aac1400SHugh Dickins 			error = -ENOMEM;
2772e2d12e22SHugh Dickins 		else
27739e18eb29SAndres Lagar-Cavilla 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2774e2d12e22SHugh Dickins 		if (error) {
27751635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
27767f556567SHugh Dickins 			if (index > start) {
27771635f6a7SHugh Dickins 				shmem_undo_range(inode,
277809cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2779b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
27807f556567SHugh Dickins 			}
27811aac1400SHugh Dickins 			goto undone;
2782e2d12e22SHugh Dickins 		}
2783e2d12e22SHugh Dickins 
2784e2d12e22SHugh Dickins 		/*
27851aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
27861aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
27871aac1400SHugh Dickins 		 */
27881aac1400SHugh Dickins 		shmem_falloc.next++;
27891aac1400SHugh Dickins 		if (!PageUptodate(page))
27901aac1400SHugh Dickins 			shmem_falloc.nr_falloced++;
27911aac1400SHugh Dickins 
27921aac1400SHugh Dickins 		/*
27931635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
27941635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
27951635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
2796e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
2797e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2798e2d12e22SHugh Dickins 		 */
2799e2d12e22SHugh Dickins 		set_page_dirty(page);
2800e2d12e22SHugh Dickins 		unlock_page(page);
280109cbfeafSKirill A. Shutemov 		put_page(page);
2802e2d12e22SHugh Dickins 		cond_resched();
2803e2d12e22SHugh Dickins 	}
2804e2d12e22SHugh Dickins 
2805e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2806e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
2807078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
28081aac1400SHugh Dickins undone:
28091aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
28101aac1400SHugh Dickins 	inode->i_private = NULL;
28111aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2812e2d12e22SHugh Dickins out:
28135955102cSAl Viro 	inode_unlock(inode);
281483e4fa9cSHugh Dickins 	return error;
281583e4fa9cSHugh Dickins }
281683e4fa9cSHugh Dickins 
2817726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
28181da177e4SLinus Torvalds {
2819726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
28201da177e4SLinus Torvalds 
28211da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
282209cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
28231da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
28240edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
28251da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
282641ffe5d5SHugh Dickins 		buf->f_bavail =
282741ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
282841ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
28290edd73b3SHugh Dickins 	}
28300edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
28311da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
28321da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
28331da177e4SLinus Torvalds 	}
28341da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
28351da177e4SLinus Torvalds 	return 0;
28361da177e4SLinus Torvalds }
28371da177e4SLinus Torvalds 
28381da177e4SLinus Torvalds /*
28391da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
28401da177e4SLinus Torvalds  */
28411da177e4SLinus Torvalds static int
28421a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
28431da177e4SLinus Torvalds {
28440b0a0806SHugh Dickins 	struct inode *inode;
28451da177e4SLinus Torvalds 	int error = -ENOSPC;
28461da177e4SLinus Torvalds 
2847454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
28481da177e4SLinus Torvalds 	if (inode) {
2849feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2850feda821eSChristoph Hellwig 		if (error)
2851feda821eSChristoph Hellwig 			goto out_iput;
28522a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
28539d8f13baSMimi Zohar 						     &dentry->d_name,
28546d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2855feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2856feda821eSChristoph Hellwig 			goto out_iput;
285737ec43cdSMimi Zohar 
2858718deb6bSAl Viro 		error = 0;
28591da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2860078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
28611da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
28621da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
28631da177e4SLinus Torvalds 	}
28641da177e4SLinus Torvalds 	return error;
2865feda821eSChristoph Hellwig out_iput:
2866feda821eSChristoph Hellwig 	iput(inode);
2867feda821eSChristoph Hellwig 	return error;
28681da177e4SLinus Torvalds }
28691da177e4SLinus Torvalds 
287060545d0dSAl Viro static int
287160545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
287260545d0dSAl Viro {
287360545d0dSAl Viro 	struct inode *inode;
287460545d0dSAl Viro 	int error = -ENOSPC;
287560545d0dSAl Viro 
287660545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
287760545d0dSAl Viro 	if (inode) {
287860545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
287960545d0dSAl Viro 						     NULL,
288060545d0dSAl Viro 						     shmem_initxattrs, NULL);
2881feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2882feda821eSChristoph Hellwig 			goto out_iput;
2883feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2884feda821eSChristoph Hellwig 		if (error)
2885feda821eSChristoph Hellwig 			goto out_iput;
288660545d0dSAl Viro 		d_tmpfile(dentry, inode);
288760545d0dSAl Viro 	}
288860545d0dSAl Viro 	return error;
2889feda821eSChristoph Hellwig out_iput:
2890feda821eSChristoph Hellwig 	iput(inode);
2891feda821eSChristoph Hellwig 	return error;
289260545d0dSAl Viro }
289360545d0dSAl Viro 
289418bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
28951da177e4SLinus Torvalds {
28961da177e4SLinus Torvalds 	int error;
28971da177e4SLinus Torvalds 
28981da177e4SLinus Torvalds 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
28991da177e4SLinus Torvalds 		return error;
2900d8c76e6fSDave Hansen 	inc_nlink(dir);
29011da177e4SLinus Torvalds 	return 0;
29021da177e4SLinus Torvalds }
29031da177e4SLinus Torvalds 
29044acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2905ebfc3b49SAl Viro 		bool excl)
29061da177e4SLinus Torvalds {
29071da177e4SLinus Torvalds 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
29081da177e4SLinus Torvalds }
29091da177e4SLinus Torvalds 
29101da177e4SLinus Torvalds /*
29111da177e4SLinus Torvalds  * Link a file..
29121da177e4SLinus Torvalds  */
29131da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
29141da177e4SLinus Torvalds {
291575c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
291629b00e60SDarrick J. Wong 	int ret = 0;
29171da177e4SLinus Torvalds 
29181da177e4SLinus Torvalds 	/*
29191da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
29201da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
29211da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
29221062af92SDarrick J. Wong 	 * But if an O_TMPFILE file is linked into the tmpfs, the
29231062af92SDarrick J. Wong 	 * first link must skip that, to get the accounting right.
29241da177e4SLinus Torvalds 	 */
29251062af92SDarrick J. Wong 	if (inode->i_nlink) {
29265b04c689SPavel Emelyanov 		ret = shmem_reserve_inode(inode->i_sb);
29275b04c689SPavel Emelyanov 		if (ret)
29285b04c689SPavel Emelyanov 			goto out;
29291062af92SDarrick J. Wong 	}
29301da177e4SLinus Torvalds 
29311da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
2932078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2933d8c76e6fSDave Hansen 	inc_nlink(inode);
29347de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
29351da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
29361da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
29375b04c689SPavel Emelyanov out:
29385b04c689SPavel Emelyanov 	return ret;
29391da177e4SLinus Torvalds }
29401da177e4SLinus Torvalds 
29411da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
29421da177e4SLinus Torvalds {
294375c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
29441da177e4SLinus Torvalds 
29455b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
29465b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
29471da177e4SLinus Torvalds 
29481da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
2949078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
29509a53c3a7SDave Hansen 	drop_nlink(inode);
29511da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
29521da177e4SLinus Torvalds 	return 0;
29531da177e4SLinus Torvalds }
29541da177e4SLinus Torvalds 
29551da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
29561da177e4SLinus Torvalds {
29571da177e4SLinus Torvalds 	if (!simple_empty(dentry))
29581da177e4SLinus Torvalds 		return -ENOTEMPTY;
29591da177e4SLinus Torvalds 
296075c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
29619a53c3a7SDave Hansen 	drop_nlink(dir);
29621da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
29631da177e4SLinus Torvalds }
29641da177e4SLinus Torvalds 
296537456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
296637456771SMiklos Szeredi {
2967e36cb0b8SDavid Howells 	bool old_is_dir = d_is_dir(old_dentry);
2968e36cb0b8SDavid Howells 	bool new_is_dir = d_is_dir(new_dentry);
296937456771SMiklos Szeredi 
297037456771SMiklos Szeredi 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
297137456771SMiklos Szeredi 		if (old_is_dir) {
297237456771SMiklos Szeredi 			drop_nlink(old_dir);
297337456771SMiklos Szeredi 			inc_nlink(new_dir);
297437456771SMiklos Szeredi 		} else {
297537456771SMiklos Szeredi 			drop_nlink(new_dir);
297637456771SMiklos Szeredi 			inc_nlink(old_dir);
297737456771SMiklos Szeredi 		}
297837456771SMiklos Szeredi 	}
297937456771SMiklos Szeredi 	old_dir->i_ctime = old_dir->i_mtime =
298037456771SMiklos Szeredi 	new_dir->i_ctime = new_dir->i_mtime =
298175c3cfa8SDavid Howells 	d_inode(old_dentry)->i_ctime =
2982078cd827SDeepa Dinamani 	d_inode(new_dentry)->i_ctime = current_time(old_dir);
298337456771SMiklos Szeredi 
298437456771SMiklos Szeredi 	return 0;
298537456771SMiklos Szeredi }
298637456771SMiklos Szeredi 
298746fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
298846fdb794SMiklos Szeredi {
298946fdb794SMiklos Szeredi 	struct dentry *whiteout;
299046fdb794SMiklos Szeredi 	int error;
299146fdb794SMiklos Szeredi 
299246fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
299346fdb794SMiklos Szeredi 	if (!whiteout)
299446fdb794SMiklos Szeredi 		return -ENOMEM;
299546fdb794SMiklos Szeredi 
299646fdb794SMiklos Szeredi 	error = shmem_mknod(old_dir, whiteout,
299746fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
299846fdb794SMiklos Szeredi 	dput(whiteout);
299946fdb794SMiklos Szeredi 	if (error)
300046fdb794SMiklos Szeredi 		return error;
300146fdb794SMiklos Szeredi 
300246fdb794SMiklos Szeredi 	/*
300346fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
300446fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
300546fdb794SMiklos Szeredi 	 *
300646fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
300746fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
300846fdb794SMiklos Szeredi 	 */
300946fdb794SMiklos Szeredi 	d_rehash(whiteout);
301046fdb794SMiklos Szeredi 	return 0;
301146fdb794SMiklos Szeredi }
301246fdb794SMiklos Szeredi 
30131da177e4SLinus Torvalds /*
30141da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
30151da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
30161da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
30171da177e4SLinus Torvalds  * gets overwritten.
30181da177e4SLinus Torvalds  */
30193b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
30201da177e4SLinus Torvalds {
302175c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
30221da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
30231da177e4SLinus Torvalds 
302446fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
30253b69ff51SMiklos Szeredi 		return -EINVAL;
30263b69ff51SMiklos Szeredi 
302737456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
302837456771SMiklos Szeredi 		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
302937456771SMiklos Szeredi 
30301da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
30311da177e4SLinus Torvalds 		return -ENOTEMPTY;
30321da177e4SLinus Torvalds 
303346fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
303446fdb794SMiklos Szeredi 		int error;
303546fdb794SMiklos Szeredi 
303646fdb794SMiklos Szeredi 		error = shmem_whiteout(old_dir, old_dentry);
303746fdb794SMiklos Szeredi 		if (error)
303846fdb794SMiklos Szeredi 			return error;
303946fdb794SMiklos Szeredi 	}
304046fdb794SMiklos Szeredi 
304175c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
30421da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
3043b928095bSMiklos Szeredi 		if (they_are_dirs) {
304475c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
30459a53c3a7SDave Hansen 			drop_nlink(old_dir);
3046b928095bSMiklos Szeredi 		}
30471da177e4SLinus Torvalds 	} else if (they_are_dirs) {
30489a53c3a7SDave Hansen 		drop_nlink(old_dir);
3049d8c76e6fSDave Hansen 		inc_nlink(new_dir);
30501da177e4SLinus Torvalds 	}
30511da177e4SLinus Torvalds 
30521da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
30531da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
30541da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
30551da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
3056078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
30571da177e4SLinus Torvalds 	return 0;
30581da177e4SLinus Torvalds }
30591da177e4SLinus Torvalds 
30601da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
30611da177e4SLinus Torvalds {
30621da177e4SLinus Torvalds 	int error;
30631da177e4SLinus Torvalds 	int len;
30641da177e4SLinus Torvalds 	struct inode *inode;
30659276aad6SHugh Dickins 	struct page *page;
30661da177e4SLinus Torvalds 
30671da177e4SLinus Torvalds 	len = strlen(symname) + 1;
306809cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
30691da177e4SLinus Torvalds 		return -ENAMETOOLONG;
30701da177e4SLinus Torvalds 
30710825a6f9SJoe Perches 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
30720825a6f9SJoe Perches 				VM_NORESERVE);
30731da177e4SLinus Torvalds 	if (!inode)
30741da177e4SLinus Torvalds 		return -ENOSPC;
30751da177e4SLinus Torvalds 
30769d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
30776d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3078570bc1c2SStephen Smalley 	if (error) {
3079570bc1c2SStephen Smalley 		if (error != -EOPNOTSUPP) {
3080570bc1c2SStephen Smalley 			iput(inode);
3081570bc1c2SStephen Smalley 			return error;
3082570bc1c2SStephen Smalley 		}
3083570bc1c2SStephen Smalley 		error = 0;
3084570bc1c2SStephen Smalley 	}
3085570bc1c2SStephen Smalley 
30861da177e4SLinus Torvalds 	inode->i_size = len-1;
308769f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
30883ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
30893ed47db3SAl Viro 		if (!inode->i_link) {
309069f07ec9SHugh Dickins 			iput(inode);
309169f07ec9SHugh Dickins 			return -ENOMEM;
309269f07ec9SHugh Dickins 		}
309369f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
30941da177e4SLinus Torvalds 	} else {
3095e8ecde25SAl Viro 		inode_nohighmem(inode);
30969e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
30971da177e4SLinus Torvalds 		if (error) {
30981da177e4SLinus Torvalds 			iput(inode);
30991da177e4SLinus Torvalds 			return error;
31001da177e4SLinus Torvalds 		}
310114fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
31021da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
310321fc61c7SAl Viro 		memcpy(page_address(page), symname, len);
3104ec9516fbSHugh Dickins 		SetPageUptodate(page);
31051da177e4SLinus Torvalds 		set_page_dirty(page);
31066746aff7SWu Fengguang 		unlock_page(page);
310709cbfeafSKirill A. Shutemov 		put_page(page);
31081da177e4SLinus Torvalds 	}
31091da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3110078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
31111da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
31121da177e4SLinus Torvalds 	dget(dentry);
31131da177e4SLinus Torvalds 	return 0;
31141da177e4SLinus Torvalds }
31151da177e4SLinus Torvalds 
3116fceef393SAl Viro static void shmem_put_link(void *arg)
3117fceef393SAl Viro {
3118fceef393SAl Viro 	mark_page_accessed(arg);
3119fceef393SAl Viro 	put_page(arg);
3120fceef393SAl Viro }
3121fceef393SAl Viro 
31226b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3123fceef393SAl Viro 				  struct inode *inode,
3124fceef393SAl Viro 				  struct delayed_call *done)
31251da177e4SLinus Torvalds {
31261da177e4SLinus Torvalds 	struct page *page = NULL;
31276b255391SAl Viro 	int error;
31286a6c9904SAl Viro 	if (!dentry) {
31296a6c9904SAl Viro 		page = find_get_page(inode->i_mapping, 0);
31306a6c9904SAl Viro 		if (!page)
31316b255391SAl Viro 			return ERR_PTR(-ECHILD);
31326a6c9904SAl Viro 		if (!PageUptodate(page)) {
31336a6c9904SAl Viro 			put_page(page);
31346a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
31356a6c9904SAl Viro 		}
31366a6c9904SAl Viro 	} else {
31379e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3138680baacbSAl Viro 		if (error)
3139680baacbSAl Viro 			return ERR_PTR(error);
3140d3602444SHugh Dickins 		unlock_page(page);
31411da177e4SLinus Torvalds 	}
3142fceef393SAl Viro 	set_delayed_call(done, shmem_put_link, page);
314321fc61c7SAl Viro 	return page_address(page);
31441da177e4SLinus Torvalds }
31451da177e4SLinus Torvalds 
3146b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3147b09e0fa4SEric Paris /*
3148b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3149b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3150b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3151b09e0fa4SEric Paris  * filesystem level, though.
3152b09e0fa4SEric Paris  */
3153b09e0fa4SEric Paris 
31546d9d88d0SJarkko Sakkinen /*
31556d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
31566d9d88d0SJarkko Sakkinen  */
31576d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
31586d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
31596d9d88d0SJarkko Sakkinen 			    void *fs_info)
31606d9d88d0SJarkko Sakkinen {
31616d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
31626d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
316338f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
31646d9d88d0SJarkko Sakkinen 	size_t len;
31656d9d88d0SJarkko Sakkinen 
31666d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
316738f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
31686d9d88d0SJarkko Sakkinen 		if (!new_xattr)
31696d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31706d9d88d0SJarkko Sakkinen 
31716d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
31726d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
31736d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
31746d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
31756d9d88d0SJarkko Sakkinen 			kfree(new_xattr);
31766d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31776d9d88d0SJarkko Sakkinen 		}
31786d9d88d0SJarkko Sakkinen 
31796d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
31806d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
31816d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
31826d9d88d0SJarkko Sakkinen 		       xattr->name, len);
31836d9d88d0SJarkko Sakkinen 
318438f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
31856d9d88d0SJarkko Sakkinen 	}
31866d9d88d0SJarkko Sakkinen 
31876d9d88d0SJarkko Sakkinen 	return 0;
31886d9d88d0SJarkko Sakkinen }
31896d9d88d0SJarkko Sakkinen 
3190aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3191b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3192b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3193aa7c5241SAndreas Gruenbacher {
3194b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3195aa7c5241SAndreas Gruenbacher 
3196aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3197aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3198aa7c5241SAndreas Gruenbacher }
3199aa7c5241SAndreas Gruenbacher 
3200aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
320159301226SAl Viro 				   struct dentry *unused, struct inode *inode,
320259301226SAl Viro 				   const char *name, const void *value,
320359301226SAl Viro 				   size_t size, int flags)
3204aa7c5241SAndreas Gruenbacher {
320559301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3206aa7c5241SAndreas Gruenbacher 
3207aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3208aa7c5241SAndreas Gruenbacher 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
3209aa7c5241SAndreas Gruenbacher }
3210aa7c5241SAndreas Gruenbacher 
3211aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3212aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3213aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3214aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3215aa7c5241SAndreas Gruenbacher };
3216aa7c5241SAndreas Gruenbacher 
3217aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3218aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3219aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3220aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3221aa7c5241SAndreas Gruenbacher };
3222aa7c5241SAndreas Gruenbacher 
3223b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3224b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3225feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3226feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3227b09e0fa4SEric Paris #endif
3228aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3229aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3230b09e0fa4SEric Paris 	NULL
3231b09e0fa4SEric Paris };
3232b09e0fa4SEric Paris 
3233b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3234b09e0fa4SEric Paris {
323575c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3236786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3237b09e0fa4SEric Paris }
3238b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3239b09e0fa4SEric Paris 
324069f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
32416b255391SAl Viro 	.get_link	= simple_get_link,
3242b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3243b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3244b09e0fa4SEric Paris #endif
32451da177e4SLinus Torvalds };
32461da177e4SLinus Torvalds 
324792e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
32486b255391SAl Viro 	.get_link	= shmem_get_link,
3249b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3250b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
325139f0247dSAndreas Gruenbacher #endif
3252b09e0fa4SEric Paris };
325339f0247dSAndreas Gruenbacher 
325491828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
325591828a40SDavid M. Grimes {
325691828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
325791828a40SDavid M. Grimes }
325891828a40SDavid M. Grimes 
325991828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
326091828a40SDavid M. Grimes {
326191828a40SDavid M. Grimes 	__u32 *fh = vfh;
326291828a40SDavid M. Grimes 	__u64 inum = fh[2];
326391828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
326491828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
326591828a40SDavid M. Grimes }
326691828a40SDavid M. Grimes 
326712ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
326812ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
326912ba780dSAmir Goldstein {
327012ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
327112ba780dSAmir Goldstein 
327212ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
327312ba780dSAmir Goldstein }
327412ba780dSAmir Goldstein 
327512ba780dSAmir Goldstein 
3276480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3277480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
327891828a40SDavid M. Grimes {
327991828a40SDavid M. Grimes 	struct inode *inode;
3280480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
328135c2a7f4SHugh Dickins 	u64 inum;
328291828a40SDavid M. Grimes 
3283480b116cSChristoph Hellwig 	if (fh_len < 3)
3284480b116cSChristoph Hellwig 		return NULL;
3285480b116cSChristoph Hellwig 
328635c2a7f4SHugh Dickins 	inum = fid->raw[2];
328735c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
328835c2a7f4SHugh Dickins 
3289480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3290480b116cSChristoph Hellwig 			shmem_match, fid->raw);
329191828a40SDavid M. Grimes 	if (inode) {
329212ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
329391828a40SDavid M. Grimes 		iput(inode);
329491828a40SDavid M. Grimes 	}
329591828a40SDavid M. Grimes 
3296480b116cSChristoph Hellwig 	return dentry;
329791828a40SDavid M. Grimes }
329891828a40SDavid M. Grimes 
3299b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3300b0b0382bSAl Viro 				struct inode *parent)
330191828a40SDavid M. Grimes {
33025fe0c237SAneesh Kumar K.V 	if (*len < 3) {
33035fe0c237SAneesh Kumar K.V 		*len = 3;
330494e07a75SNamjae Jeon 		return FILEID_INVALID;
33055fe0c237SAneesh Kumar K.V 	}
330691828a40SDavid M. Grimes 
33071d3382cbSAl Viro 	if (inode_unhashed(inode)) {
330891828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
330991828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
331091828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
331191828a40SDavid M. Grimes 		 * to do it once
331291828a40SDavid M. Grimes 		 */
331391828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
331491828a40SDavid M. Grimes 		spin_lock(&lock);
33151d3382cbSAl Viro 		if (inode_unhashed(inode))
331691828a40SDavid M. Grimes 			__insert_inode_hash(inode,
331791828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
331891828a40SDavid M. Grimes 		spin_unlock(&lock);
331991828a40SDavid M. Grimes 	}
332091828a40SDavid M. Grimes 
332191828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
332291828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
332391828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
332491828a40SDavid M. Grimes 
332591828a40SDavid M. Grimes 	*len = 3;
332691828a40SDavid M. Grimes 	return 1;
332791828a40SDavid M. Grimes }
332891828a40SDavid M. Grimes 
332939655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
333091828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
333191828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3332480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
333391828a40SDavid M. Grimes };
333491828a40SDavid M. Grimes 
3335680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3336680d794bSakpm@linux-foundation.org 			       bool remount)
33371da177e4SLinus Torvalds {
33381da177e4SLinus Torvalds 	char *this_char, *value, *rest;
333949cd0a5cSGreg Thelen 	struct mempolicy *mpol = NULL;
33408751e039SEric W. Biederman 	uid_t uid;
33418751e039SEric W. Biederman 	gid_t gid;
33421da177e4SLinus Torvalds 
3343b00dc3adSHugh Dickins 	while (options != NULL) {
3344b00dc3adSHugh Dickins 		this_char = options;
3345b00dc3adSHugh Dickins 		for (;;) {
3346b00dc3adSHugh Dickins 			/*
3347b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3348b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3349b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3350b00dc3adSHugh Dickins 			 */
3351b00dc3adSHugh Dickins 			options = strchr(options, ',');
3352b00dc3adSHugh Dickins 			if (options == NULL)
3353b00dc3adSHugh Dickins 				break;
3354b00dc3adSHugh Dickins 			options++;
3355b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3356b00dc3adSHugh Dickins 				options[-1] = '\0';
3357b00dc3adSHugh Dickins 				break;
3358b00dc3adSHugh Dickins 			}
3359b00dc3adSHugh Dickins 		}
33601da177e4SLinus Torvalds 		if (!*this_char)
33611da177e4SLinus Torvalds 			continue;
33621da177e4SLinus Torvalds 		if ((value = strchr(this_char,'=')) != NULL) {
33631da177e4SLinus Torvalds 			*value++ = 0;
33641da177e4SLinus Torvalds 		} else {
33651170532bSJoe Perches 			pr_err("tmpfs: No value for mount option '%s'\n",
33661da177e4SLinus Torvalds 			       this_char);
336749cd0a5cSGreg Thelen 			goto error;
33681da177e4SLinus Torvalds 		}
33691da177e4SLinus Torvalds 
33701da177e4SLinus Torvalds 		if (!strcmp(this_char,"size")) {
33711da177e4SLinus Torvalds 			unsigned long long size;
33721da177e4SLinus Torvalds 			size = memparse(value,&rest);
33731da177e4SLinus Torvalds 			if (*rest == '%') {
33741da177e4SLinus Torvalds 				size <<= PAGE_SHIFT;
3375ca79b0c2SArun KS 				size *= totalram_pages();
33761da177e4SLinus Torvalds 				do_div(size, 100);
33771da177e4SLinus Torvalds 				rest++;
33781da177e4SLinus Torvalds 			}
33791da177e4SLinus Torvalds 			if (*rest)
33801da177e4SLinus Torvalds 				goto bad_val;
3381680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks =
338209cbfeafSKirill A. Shutemov 				DIV_ROUND_UP(size, PAGE_SIZE);
33831da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"nr_blocks")) {
3384680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks = memparse(value, &rest);
33851da177e4SLinus Torvalds 			if (*rest)
33861da177e4SLinus Torvalds 				goto bad_val;
33871da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"nr_inodes")) {
3388680d794bSakpm@linux-foundation.org 			sbinfo->max_inodes = memparse(value, &rest);
33891da177e4SLinus Torvalds 			if (*rest)
33901da177e4SLinus Torvalds 				goto bad_val;
33911da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"mode")) {
3392680d794bSakpm@linux-foundation.org 			if (remount)
33931da177e4SLinus Torvalds 				continue;
3394680d794bSakpm@linux-foundation.org 			sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
33951da177e4SLinus Torvalds 			if (*rest)
33961da177e4SLinus Torvalds 				goto bad_val;
33971da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"uid")) {
3398680d794bSakpm@linux-foundation.org 			if (remount)
33991da177e4SLinus Torvalds 				continue;
34008751e039SEric W. Biederman 			uid = simple_strtoul(value, &rest, 0);
34011da177e4SLinus Torvalds 			if (*rest)
34021da177e4SLinus Torvalds 				goto bad_val;
34038751e039SEric W. Biederman 			sbinfo->uid = make_kuid(current_user_ns(), uid);
34048751e039SEric W. Biederman 			if (!uid_valid(sbinfo->uid))
34058751e039SEric W. Biederman 				goto bad_val;
34061da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"gid")) {
3407680d794bSakpm@linux-foundation.org 			if (remount)
34081da177e4SLinus Torvalds 				continue;
34098751e039SEric W. Biederman 			gid = simple_strtoul(value, &rest, 0);
34101da177e4SLinus Torvalds 			if (*rest)
34111da177e4SLinus Torvalds 				goto bad_val;
34128751e039SEric W. Biederman 			sbinfo->gid = make_kgid(current_user_ns(), gid);
34138751e039SEric W. Biederman 			if (!gid_valid(sbinfo->gid))
34148751e039SEric W. Biederman 				goto bad_val;
3415e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
34165a6e75f8SKirill A. Shutemov 		} else if (!strcmp(this_char, "huge")) {
34175a6e75f8SKirill A. Shutemov 			int huge;
34185a6e75f8SKirill A. Shutemov 			huge = shmem_parse_huge(value);
34195a6e75f8SKirill A. Shutemov 			if (huge < 0)
34205a6e75f8SKirill A. Shutemov 				goto bad_val;
34215a6e75f8SKirill A. Shutemov 			if (!has_transparent_hugepage() &&
34225a6e75f8SKirill A. Shutemov 					huge != SHMEM_HUGE_NEVER)
34235a6e75f8SKirill A. Shutemov 				goto bad_val;
34245a6e75f8SKirill A. Shutemov 			sbinfo->huge = huge;
34255a6e75f8SKirill A. Shutemov #endif
34265a6e75f8SKirill A. Shutemov #ifdef CONFIG_NUMA
34277339ff83SRobin Holt 		} else if (!strcmp(this_char,"mpol")) {
342849cd0a5cSGreg Thelen 			mpol_put(mpol);
342949cd0a5cSGreg Thelen 			mpol = NULL;
343049cd0a5cSGreg Thelen 			if (mpol_parse_str(value, &mpol))
34317339ff83SRobin Holt 				goto bad_val;
34325a6e75f8SKirill A. Shutemov #endif
34331da177e4SLinus Torvalds 		} else {
34341170532bSJoe Perches 			pr_err("tmpfs: Bad mount option %s\n", this_char);
343549cd0a5cSGreg Thelen 			goto error;
34361da177e4SLinus Torvalds 		}
34371da177e4SLinus Torvalds 	}
343849cd0a5cSGreg Thelen 	sbinfo->mpol = mpol;
34391da177e4SLinus Torvalds 	return 0;
34401da177e4SLinus Torvalds 
34411da177e4SLinus Torvalds bad_val:
34421170532bSJoe Perches 	pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
34431da177e4SLinus Torvalds 	       value, this_char);
344449cd0a5cSGreg Thelen error:
344549cd0a5cSGreg Thelen 	mpol_put(mpol);
34461da177e4SLinus Torvalds 	return 1;
34471da177e4SLinus Torvalds 
34481da177e4SLinus Torvalds }
34491da177e4SLinus Torvalds 
34501da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
34511da177e4SLinus Torvalds {
34521da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3453680d794bSakpm@linux-foundation.org 	struct shmem_sb_info config = *sbinfo;
34540edd73b3SHugh Dickins 	unsigned long inodes;
34550edd73b3SHugh Dickins 	int error = -EINVAL;
34561da177e4SLinus Torvalds 
34575f00110fSGreg Thelen 	config.mpol = NULL;
3458680d794bSakpm@linux-foundation.org 	if (shmem_parse_options(data, &config, true))
34590edd73b3SHugh Dickins 		return error;
34600edd73b3SHugh Dickins 
34610edd73b3SHugh Dickins 	spin_lock(&sbinfo->stat_lock);
34620edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
34637e496299STim Chen 	if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
34640edd73b3SHugh Dickins 		goto out;
3465680d794bSakpm@linux-foundation.org 	if (config.max_inodes < inodes)
34660edd73b3SHugh Dickins 		goto out;
34670edd73b3SHugh Dickins 	/*
346854af6042SHugh Dickins 	 * Those tests disallow limited->unlimited while any are in use;
34690edd73b3SHugh Dickins 	 * but we must separately disallow unlimited->limited, because
34700edd73b3SHugh Dickins 	 * in that case we have no record of how much is already in use.
34710edd73b3SHugh Dickins 	 */
3472680d794bSakpm@linux-foundation.org 	if (config.max_blocks && !sbinfo->max_blocks)
34730edd73b3SHugh Dickins 		goto out;
3474680d794bSakpm@linux-foundation.org 	if (config.max_inodes && !sbinfo->max_inodes)
34750edd73b3SHugh Dickins 		goto out;
34760edd73b3SHugh Dickins 
34770edd73b3SHugh Dickins 	error = 0;
34785a6e75f8SKirill A. Shutemov 	sbinfo->huge = config.huge;
3479680d794bSakpm@linux-foundation.org 	sbinfo->max_blocks  = config.max_blocks;
3480680d794bSakpm@linux-foundation.org 	sbinfo->max_inodes  = config.max_inodes;
3481680d794bSakpm@linux-foundation.org 	sbinfo->free_inodes = config.max_inodes - inodes;
348271fe804bSLee Schermerhorn 
34835f00110fSGreg Thelen 	/*
34845f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
34855f00110fSGreg Thelen 	 */
34865f00110fSGreg Thelen 	if (config.mpol) {
348771fe804bSLee Schermerhorn 		mpol_put(sbinfo->mpol);
348871fe804bSLee Schermerhorn 		sbinfo->mpol = config.mpol;	/* transfers initial ref */
34895f00110fSGreg Thelen 	}
34900edd73b3SHugh Dickins out:
34910edd73b3SHugh Dickins 	spin_unlock(&sbinfo->stat_lock);
34920edd73b3SHugh Dickins 	return error;
34931da177e4SLinus Torvalds }
3494680d794bSakpm@linux-foundation.org 
349534c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3496680d794bSakpm@linux-foundation.org {
349734c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3498680d794bSakpm@linux-foundation.org 
3499680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3500680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
350109cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3502680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3503680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
35040825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
350509208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
35068751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
35078751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
35088751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
35098751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
35108751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
35118751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3512e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
35135a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
35145a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
35155a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
35165a6e75f8SKirill A. Shutemov #endif
351771fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3518680d794bSakpm@linux-foundation.org 	return 0;
3519680d794bSakpm@linux-foundation.org }
35209183df25SDavid Herrmann 
3521680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
35221da177e4SLinus Torvalds 
35231da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
35241da177e4SLinus Torvalds {
3525602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3526602586a8SHugh Dickins 
3527602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
352849cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3529602586a8SHugh Dickins 	kfree(sbinfo);
35301da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
35311da177e4SLinus Torvalds }
35321da177e4SLinus Torvalds 
35332b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent)
35341da177e4SLinus Torvalds {
35351da177e4SLinus Torvalds 	struct inode *inode;
35360edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3537680d794bSakpm@linux-foundation.org 	int err = -ENOMEM;
3538680d794bSakpm@linux-foundation.org 
3539680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3540425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3541680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3542680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3543680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3544680d794bSakpm@linux-foundation.org 
35450825a6f9SJoe Perches 	sbinfo->mode = 0777 | S_ISVTX;
354676aac0e9SDavid Howells 	sbinfo->uid = current_fsuid();
354776aac0e9SDavid Howells 	sbinfo->gid = current_fsgid();
3548680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
35491da177e4SLinus Torvalds 
35500edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
35511da177e4SLinus Torvalds 	/*
35521da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
35531da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
35541da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
35551da177e4SLinus Torvalds 	 */
35561751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3557680d794bSakpm@linux-foundation.org 		sbinfo->max_blocks = shmem_default_max_blocks();
3558680d794bSakpm@linux-foundation.org 		sbinfo->max_inodes = shmem_default_max_inodes();
3559680d794bSakpm@linux-foundation.org 		if (shmem_parse_options(data, sbinfo, false)) {
3560680d794bSakpm@linux-foundation.org 			err = -EINVAL;
3561680d794bSakpm@linux-foundation.org 			goto failed;
3562680d794bSakpm@linux-foundation.org 		}
3563ca4e0519SAl Viro 	} else {
35641751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
35651da177e4SLinus Torvalds 	}
356691828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
35671751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOSEC;
35680edd73b3SHugh Dickins #else
35691751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
35700edd73b3SHugh Dickins #endif
35711da177e4SLinus Torvalds 
35721da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
3573908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3574602586a8SHugh Dickins 		goto failed;
3575680d794bSakpm@linux-foundation.org 	sbinfo->free_inodes = sbinfo->max_inodes;
3576779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3577779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
35781da177e4SLinus Torvalds 
3579285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
358009cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
358109cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
35821da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
35831da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3584cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3585b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
358639f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3587b09e0fa4SEric Paris #endif
3588b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
35891751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
359039f0247dSAndreas Gruenbacher #endif
35912b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
35920edd73b3SHugh Dickins 
3593454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
35941da177e4SLinus Torvalds 	if (!inode)
35951da177e4SLinus Torvalds 		goto failed;
3596680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3597680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3598318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3599318ceed0SAl Viro 	if (!sb->s_root)
360048fde701SAl Viro 		goto failed;
36011da177e4SLinus Torvalds 	return 0;
36021da177e4SLinus Torvalds 
36031da177e4SLinus Torvalds failed:
36041da177e4SLinus Torvalds 	shmem_put_super(sb);
36051da177e4SLinus Torvalds 	return err;
36061da177e4SLinus Torvalds }
36071da177e4SLinus Torvalds 
3608fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
36091da177e4SLinus Torvalds 
36101da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
36111da177e4SLinus Torvalds {
361241ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
361341ffe5d5SHugh Dickins 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
361441ffe5d5SHugh Dickins 	if (!info)
36151da177e4SLinus Torvalds 		return NULL;
361641ffe5d5SHugh Dickins 	return &info->vfs_inode;
36171da177e4SLinus Torvalds }
36181da177e4SLinus Torvalds 
361941ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head)
3620fa0d7e3dSNick Piggin {
3621fa0d7e3dSNick Piggin 	struct inode *inode = container_of(head, struct inode, i_rcu);
362284e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
36233ed47db3SAl Viro 		kfree(inode->i_link);
3624fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3625fa0d7e3dSNick Piggin }
3626fa0d7e3dSNick Piggin 
36271da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
36281da177e4SLinus Torvalds {
362909208d15SAl Viro 	if (S_ISREG(inode->i_mode))
36301da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
363141ffe5d5SHugh Dickins 	call_rcu(&inode->i_rcu, shmem_destroy_callback);
36321da177e4SLinus Torvalds }
36331da177e4SLinus Torvalds 
363441ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
36351da177e4SLinus Torvalds {
363641ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
363741ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
36381da177e4SLinus Torvalds }
36391da177e4SLinus Torvalds 
36409a8ec03eSweiping zhang static void shmem_init_inodecache(void)
36411da177e4SLinus Torvalds {
36421da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
36431da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
36445d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
36451da177e4SLinus Torvalds }
36461da177e4SLinus Torvalds 
364741ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
36481da177e4SLinus Torvalds {
36491a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
36501da177e4SLinus Torvalds }
36511da177e4SLinus Torvalds 
3652f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = {
36531da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
365476719325SKen Chen 	.set_page_dirty	= __set_page_dirty_no_writeback,
36551da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3656800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3657800d15a5SNick Piggin 	.write_end	= shmem_write_end,
36581da177e4SLinus Torvalds #endif
36591c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
3660304dbdb7SLee Schermerhorn 	.migratepage	= migrate_page,
36611c93923cSAndrew Morton #endif
3662aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
36631da177e4SLinus Torvalds };
36641da177e4SLinus Torvalds 
366515ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
36661da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3667c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
36681da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3669220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
36702ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
36718174202bSAl Viro 	.write_iter	= generic_file_write_iter,
36721b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
367382c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3674f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
367583e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
36761da177e4SLinus Torvalds #endif
36771da177e4SLinus Torvalds };
36781da177e4SLinus Torvalds 
367992e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
368044a30220SYu Zhao 	.getattr	= shmem_getattr,
368194c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3682b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3683b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3684feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3685b09e0fa4SEric Paris #endif
36861da177e4SLinus Torvalds };
36871da177e4SLinus Torvalds 
368892e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
36891da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
36901da177e4SLinus Torvalds 	.create		= shmem_create,
36911da177e4SLinus Torvalds 	.lookup		= simple_lookup,
36921da177e4SLinus Torvalds 	.link		= shmem_link,
36931da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
36941da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
36951da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
36961da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
36971da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
36982773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
369960545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
37001da177e4SLinus Torvalds #endif
3701b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3702b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3703b09e0fa4SEric Paris #endif
370439f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
370594c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3706feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
370739f0247dSAndreas Gruenbacher #endif
370839f0247dSAndreas Gruenbacher };
370939f0247dSAndreas Gruenbacher 
371092e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3711b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3712b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3713b09e0fa4SEric Paris #endif
371439f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
371594c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3716feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
371739f0247dSAndreas Gruenbacher #endif
37181da177e4SLinus Torvalds };
37191da177e4SLinus Torvalds 
3720759b9775SHugh Dickins static const struct super_operations shmem_ops = {
37211da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
37221da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
37231da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
37241da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
37251da177e4SLinus Torvalds 	.remount_fs	= shmem_remount_fs,
3726680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
37271da177e4SLinus Torvalds #endif
37281f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
37291da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
37301da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3731779750d2SKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3732779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3733779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3734779750d2SKirill A. Shutemov #endif
37351da177e4SLinus Torvalds };
37361da177e4SLinus Torvalds 
3737f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
373854cb8821SNick Piggin 	.fault		= shmem_fault,
3739d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
37401da177e4SLinus Torvalds #ifdef CONFIG_NUMA
37411da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
37421da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
37431da177e4SLinus Torvalds #endif
37441da177e4SLinus Torvalds };
37451da177e4SLinus Torvalds 
37463c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type,
37473c26ff6eSAl Viro 	int flags, const char *dev_name, void *data)
37481da177e4SLinus Torvalds {
37493c26ff6eSAl Viro 	return mount_nodev(fs_type, flags, data, shmem_fill_super);
37501da177e4SLinus Torvalds }
37511da177e4SLinus Torvalds 
375241ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
37531da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
37541da177e4SLinus Torvalds 	.name		= "tmpfs",
37553c26ff6eSAl Viro 	.mount		= shmem_mount,
37561da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
37572b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
37581da177e4SLinus Torvalds };
37591da177e4SLinus Torvalds 
376041ffe5d5SHugh Dickins int __init shmem_init(void)
37611da177e4SLinus Torvalds {
37621da177e4SLinus Torvalds 	int error;
37631da177e4SLinus Torvalds 
376416203a7aSRob Landley 	/* If rootfs called this, don't re-init */
376516203a7aSRob Landley 	if (shmem_inode_cachep)
376616203a7aSRob Landley 		return 0;
376716203a7aSRob Landley 
37689a8ec03eSweiping zhang 	shmem_init_inodecache();
37691da177e4SLinus Torvalds 
377041ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
37711da177e4SLinus Torvalds 	if (error) {
37721170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
37731da177e4SLinus Torvalds 		goto out2;
37741da177e4SLinus Torvalds 	}
377595dc112aSGreg Kroah-Hartman 
3776ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
37771da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
37781da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
37791170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
37801da177e4SLinus Torvalds 		goto out1;
37811da177e4SLinus Torvalds 	}
37825a6e75f8SKirill A. Shutemov 
3783e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3784435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
37855a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
37865a6e75f8SKirill A. Shutemov 	else
37875a6e75f8SKirill A. Shutemov 		shmem_huge = 0; /* just in case it was patched */
37885a6e75f8SKirill A. Shutemov #endif
37891da177e4SLinus Torvalds 	return 0;
37901da177e4SLinus Torvalds 
37911da177e4SLinus Torvalds out1:
379241ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
37931da177e4SLinus Torvalds out2:
379441ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
37951da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
37961da177e4SLinus Torvalds 	return error;
37971da177e4SLinus Torvalds }
3798853ac43aSMatt Mackall 
3799e496cf3dSKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
38005a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
38015a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, char *buf)
38025a6e75f8SKirill A. Shutemov {
38035a6e75f8SKirill A. Shutemov 	int values[] = {
38045a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
38055a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
38065a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
38075a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
38085a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
38095a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
38105a6e75f8SKirill A. Shutemov 	};
38115a6e75f8SKirill A. Shutemov 	int i, count;
38125a6e75f8SKirill A. Shutemov 
38135a6e75f8SKirill A. Shutemov 	for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
38145a6e75f8SKirill A. Shutemov 		const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
38155a6e75f8SKirill A. Shutemov 
38165a6e75f8SKirill A. Shutemov 		count += sprintf(buf + count, fmt,
38175a6e75f8SKirill A. Shutemov 				shmem_format_huge(values[i]));
38185a6e75f8SKirill A. Shutemov 	}
38195a6e75f8SKirill A. Shutemov 	buf[count - 1] = '\n';
38205a6e75f8SKirill A. Shutemov 	return count;
38215a6e75f8SKirill A. Shutemov }
38225a6e75f8SKirill A. Shutemov 
38235a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
38245a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
38255a6e75f8SKirill A. Shutemov {
38265a6e75f8SKirill A. Shutemov 	char tmp[16];
38275a6e75f8SKirill A. Shutemov 	int huge;
38285a6e75f8SKirill A. Shutemov 
38295a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
38305a6e75f8SKirill A. Shutemov 		return -EINVAL;
38315a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
38325a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
38335a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
38345a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
38355a6e75f8SKirill A. Shutemov 
38365a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
38375a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
38385a6e75f8SKirill A. Shutemov 		return -EINVAL;
38395a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
38405a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
38415a6e75f8SKirill A. Shutemov 		return -EINVAL;
38425a6e75f8SKirill A. Shutemov 
38435a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
3844435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
38455a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
38465a6e75f8SKirill A. Shutemov 	return count;
38475a6e75f8SKirill A. Shutemov }
38485a6e75f8SKirill A. Shutemov 
38495a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr =
38505a6e75f8SKirill A. Shutemov 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
38513b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3852f3f0e1d2SKirill A. Shutemov 
38533b33719cSArnd Bergmann #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3854f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma)
3855f3f0e1d2SKirill A. Shutemov {
3856f3f0e1d2SKirill A. Shutemov 	struct inode *inode = file_inode(vma->vm_file);
3857f3f0e1d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3858f3f0e1d2SKirill A. Shutemov 	loff_t i_size;
3859f3f0e1d2SKirill A. Shutemov 	pgoff_t off;
3860f3f0e1d2SKirill A. Shutemov 
3861f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
3862f3f0e1d2SKirill A. Shutemov 		return true;
3863f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY)
3864f3f0e1d2SKirill A. Shutemov 		return false;
3865f3f0e1d2SKirill A. Shutemov 	switch (sbinfo->huge) {
3866f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_NEVER:
3867f3f0e1d2SKirill A. Shutemov 			return false;
3868f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ALWAYS:
3869f3f0e1d2SKirill A. Shutemov 			return true;
3870f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_WITHIN_SIZE:
3871f3f0e1d2SKirill A. Shutemov 			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
3872f3f0e1d2SKirill A. Shutemov 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
3873f3f0e1d2SKirill A. Shutemov 			if (i_size >= HPAGE_PMD_SIZE &&
3874f3f0e1d2SKirill A. Shutemov 					i_size >> PAGE_SHIFT >= off)
3875f3f0e1d2SKirill A. Shutemov 				return true;
3876c8402871SGustavo A. R. Silva 			/* fall through */
3877f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ADVISE:
3878f3f0e1d2SKirill A. Shutemov 			/* TODO: implement fadvise() hints */
3879f3f0e1d2SKirill A. Shutemov 			return (vma->vm_flags & VM_HUGEPAGE);
3880f3f0e1d2SKirill A. Shutemov 		default:
3881f3f0e1d2SKirill A. Shutemov 			VM_BUG_ON(1);
3882f3f0e1d2SKirill A. Shutemov 			return false;
3883f3f0e1d2SKirill A. Shutemov 	}
3884f3f0e1d2SKirill A. Shutemov }
38853b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
38865a6e75f8SKirill A. Shutemov 
3887853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
3888853ac43aSMatt Mackall 
3889853ac43aSMatt Mackall /*
3890853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3891853ac43aSMatt Mackall  *
3892853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
3893853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
3894853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
3895853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
3896853ac43aSMatt Mackall  */
3897853ac43aSMatt Mackall 
389841ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
3899853ac43aSMatt Mackall 	.name		= "tmpfs",
39003c26ff6eSAl Viro 	.mount		= ramfs_mount,
3901853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
39022b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
3903853ac43aSMatt Mackall };
3904853ac43aSMatt Mackall 
390541ffe5d5SHugh Dickins int __init shmem_init(void)
3906853ac43aSMatt Mackall {
390741ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3908853ac43aSMatt Mackall 
390941ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
3910853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
3911853ac43aSMatt Mackall 
3912853ac43aSMatt Mackall 	return 0;
3913853ac43aSMatt Mackall }
3914853ac43aSMatt Mackall 
3915*b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap,
3916*b56a2d8aSVineeth Remanan Pillai 		unsigned long *fs_pages_to_unuse)
3917853ac43aSMatt Mackall {
3918853ac43aSMatt Mackall 	return 0;
3919853ac43aSMatt Mackall }
3920853ac43aSMatt Mackall 
39213f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user)
39223f96b79aSHugh Dickins {
39233f96b79aSHugh Dickins 	return 0;
39243f96b79aSHugh Dickins }
39253f96b79aSHugh Dickins 
392624513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
392724513264SHugh Dickins {
392824513264SHugh Dickins }
392924513264SHugh Dickins 
3930c01d5b30SHugh Dickins #ifdef CONFIG_MMU
3931c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
3932c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
3933c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
3934c01d5b30SHugh Dickins {
3935c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
3936c01d5b30SHugh Dickins }
3937c01d5b30SHugh Dickins #endif
3938c01d5b30SHugh Dickins 
393941ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
394094c1e62dSHugh Dickins {
394141ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
394294c1e62dSHugh Dickins }
394394c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
394494c1e62dSHugh Dickins 
3945853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
39460b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
3947454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
39480b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
39490b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
3950853ac43aSMatt Mackall 
3951853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
3952853ac43aSMatt Mackall 
3953853ac43aSMatt Mackall /* common code */
39541da177e4SLinus Torvalds 
3955703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
3956c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
39571da177e4SLinus Torvalds {
39581da177e4SLinus Torvalds 	struct inode *inode;
395993dec2daSAl Viro 	struct file *res;
39601da177e4SLinus Torvalds 
3961703321b6SMatthew Auld 	if (IS_ERR(mnt))
3962703321b6SMatthew Auld 		return ERR_CAST(mnt);
39631da177e4SLinus Torvalds 
3964285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
39651da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
39661da177e4SLinus Torvalds 
39671da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
39681da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
39691da177e4SLinus Torvalds 
397093dec2daSAl Viro 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
397193dec2daSAl Viro 				flags);
3972dac2d1f6SAl Viro 	if (unlikely(!inode)) {
3973dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
3974dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
3975dac2d1f6SAl Viro 	}
3976c7277090SEric Paris 	inode->i_flags |= i_flags;
39771da177e4SLinus Torvalds 	inode->i_size = size;
39786d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
397926567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
398093dec2daSAl Viro 	if (!IS_ERR(res))
398193dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
39824b42af81SAl Viro 				&shmem_file_operations);
39836b4d0b27SAl Viro 	if (IS_ERR(res))
398493dec2daSAl Viro 		iput(inode);
39856b4d0b27SAl Viro 	return res;
39861da177e4SLinus Torvalds }
3987c7277090SEric Paris 
3988c7277090SEric Paris /**
3989c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3990c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
3991c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
3992e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
3993e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
3994c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
3995c7277090SEric Paris  * @size: size to be set for the file
3996c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3997c7277090SEric Paris  */
3998c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
3999c7277090SEric Paris {
4000703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4001c7277090SEric Paris }
4002c7277090SEric Paris 
4003c7277090SEric Paris /**
4004c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
4005c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
4006c7277090SEric Paris  * @size: size to be set for the file
4007c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4008c7277090SEric Paris  */
4009c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4010c7277090SEric Paris {
4011703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4012c7277090SEric Paris }
4013395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
40141da177e4SLinus Torvalds 
401546711810SRandy Dunlap /**
4016703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4017703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
4018703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
4019703321b6SMatthew Auld  * @size: size to be set for the file
4020703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4021703321b6SMatthew Auld  */
4022703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4023703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
4024703321b6SMatthew Auld {
4025703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
4026703321b6SMatthew Auld }
4027703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4028703321b6SMatthew Auld 
4029703321b6SMatthew Auld /**
40301da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
40311da177e4SLinus Torvalds  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
40321da177e4SLinus Torvalds  */
40331da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
40341da177e4SLinus Torvalds {
40351da177e4SLinus Torvalds 	struct file *file;
40361da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
40371da177e4SLinus Torvalds 
403866fc1303SHugh Dickins 	/*
403966fc1303SHugh Dickins 	 * Cloning a new file under mmap_sem leads to a lock ordering conflict
404066fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
404166fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
404266fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
404366fc1303SHugh Dickins 	 */
4044703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
40451da177e4SLinus Torvalds 	if (IS_ERR(file))
40461da177e4SLinus Torvalds 		return PTR_ERR(file);
40471da177e4SLinus Torvalds 
40481da177e4SLinus Torvalds 	if (vma->vm_file)
40491da177e4SLinus Torvalds 		fput(vma->vm_file);
40501da177e4SLinus Torvalds 	vma->vm_file = file;
40511da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
4052f3f0e1d2SKirill A. Shutemov 
4053e496cf3dSKirill A. Shutemov 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4054f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4055f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
4056f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
4057f3f0e1d2SKirill A. Shutemov 	}
4058f3f0e1d2SKirill A. Shutemov 
40591da177e4SLinus Torvalds 	return 0;
40601da177e4SLinus Torvalds }
4061d9d90e5eSHugh Dickins 
4062d9d90e5eSHugh Dickins /**
4063d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4064d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4065d9d90e5eSHugh Dickins  * @index:	the page index
4066d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4067d9d90e5eSHugh Dickins  *
4068d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4069d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
4070d9d90e5eSHugh Dickins  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4071d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4072d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4073d9d90e5eSHugh Dickins  *
407468da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
407568da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4076d9d90e5eSHugh Dickins  */
4077d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4078d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4079d9d90e5eSHugh Dickins {
408068da9f05SHugh Dickins #ifdef CONFIG_SHMEM
408168da9f05SHugh Dickins 	struct inode *inode = mapping->host;
40829276aad6SHugh Dickins 	struct page *page;
408368da9f05SHugh Dickins 	int error;
408468da9f05SHugh Dickins 
408568da9f05SHugh Dickins 	BUG_ON(mapping->a_ops != &shmem_aops);
40869e18eb29SAndres Lagar-Cavilla 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4087cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
408868da9f05SHugh Dickins 	if (error)
408968da9f05SHugh Dickins 		page = ERR_PTR(error);
409068da9f05SHugh Dickins 	else
409168da9f05SHugh Dickins 		unlock_page(page);
409268da9f05SHugh Dickins 	return page;
409368da9f05SHugh Dickins #else
409468da9f05SHugh Dickins 	/*
409568da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
409668da9f05SHugh Dickins 	 */
4097d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
409868da9f05SHugh Dickins #endif
4099d9d90e5eSHugh Dickins }
4100d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4101