xref: /openbmc/linux/mm/shmem.c (revision 62f945b6a7b8cda6d1f35941eb374276f7b8749a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28250297edSAndrew Morton #include <linux/ramfs.h>
29caefba17SHugh Dickins #include <linux/pagemap.h>
30853ac43aSMatt Mackall #include <linux/file.h>
31853ac43aSMatt Mackall #include <linux/mm.h>
3246c9a946SArnd Bergmann #include <linux/random.h>
33174cd4b1SIngo Molnar #include <linux/sched/signal.h>
34b95f1b31SPaul Gortmaker #include <linux/export.h>
35853ac43aSMatt Mackall #include <linux/swap.h>
36e2e40f2cSChristoph Hellwig #include <linux/uio.h>
37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h>
38749df87bSMike Kravetz #include <linux/hugetlb.h>
39853ac43aSMatt Mackall 
4095cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
4195cc09d6SAndrea Arcangeli 
42853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
43853ac43aSMatt Mackall 
44853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
451da177e4SLinus Torvalds /*
461da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
471da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
481da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
491da177e4SLinus Torvalds  */
501da177e4SLinus Torvalds 
5139f0247dSAndreas Gruenbacher #include <linux/xattr.h>
52a5694255SChristoph Hellwig #include <linux/exportfs.h>
531c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
54feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h>
551da177e4SLinus Torvalds #include <linux/mman.h>
561da177e4SLinus Torvalds #include <linux/string.h>
571da177e4SLinus Torvalds #include <linux/slab.h>
581da177e4SLinus Torvalds #include <linux/backing-dev.h>
591da177e4SLinus Torvalds #include <linux/shmem_fs.h>
601da177e4SLinus Torvalds #include <linux/writeback.h>
611da177e4SLinus Torvalds #include <linux/blkdev.h>
62bda97eabSHugh Dickins #include <linux/pagevec.h>
6341ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
6483e4fa9cSHugh Dickins #include <linux/falloc.h>
65708e3508SHugh Dickins #include <linux/splice.h>
661da177e4SLinus Torvalds #include <linux/security.h>
671da177e4SLinus Torvalds #include <linux/swapops.h>
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/namei.h>
70b00dc3adSHugh Dickins #include <linux/ctype.h>
71304dbdb7SLee Schermerhorn #include <linux/migrate.h>
72c1f60a5aSChristoph Lameter #include <linux/highmem.h>
73680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
7492562927SMimi Zohar #include <linux/magic.h>
759183df25SDavid Herrmann #include <linux/syscalls.h>
7640e041a2SDavid Herrmann #include <linux/fcntl.h>
779183df25SDavid Herrmann #include <uapi/linux/memfd.h>
78cfda0526SMike Rapoport #include <linux/userfaultfd_k.h>
794c27fe4cSMike Rapoport #include <linux/rmap.h>
802b4db796SAmir Goldstein #include <linux/uuid.h>
81304dbdb7SLee Schermerhorn 
827c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
831da177e4SLinus Torvalds #include <asm/pgtable.h>
841da177e4SLinus Torvalds 
85dd56b046SMel Gorman #include "internal.h"
86dd56b046SMel Gorman 
8709cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
8809cbfeafSKirill A. Shutemov #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
891da177e4SLinus Torvalds 
901da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
911da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
921da177e4SLinus Torvalds 
9369f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9469f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
9569f07ec9SHugh Dickins 
961aac1400SHugh Dickins /*
97f00cdc6dSHugh Dickins  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98f00cdc6dSHugh Dickins  * inode->i_private (with i_mutex making sure that it has only one user at
99f00cdc6dSHugh Dickins  * a time): we would prefer not to enlarge the shmem inode just for that.
1001aac1400SHugh Dickins  */
1011aac1400SHugh Dickins struct shmem_falloc {
1028e205f77SHugh Dickins 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
1031aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
1041aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
1051aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
1061aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
1071aac1400SHugh Dickins };
1081aac1400SHugh Dickins 
109b76db735SAndrew Morton #ifdef CONFIG_TMPFS
110680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
111680d794bSakpm@linux-foundation.org {
112680d794bSakpm@linux-foundation.org 	return totalram_pages / 2;
113680d794bSakpm@linux-foundation.org }
114680d794bSakpm@linux-foundation.org 
115680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
116680d794bSakpm@linux-foundation.org {
117680d794bSakpm@linux-foundation.org 	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
118680d794bSakpm@linux-foundation.org }
119b76db735SAndrew Morton #endif
120680d794bSakpm@linux-foundation.org 
121bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
122bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
123bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index);
12468da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1259e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp,
126cfda0526SMike Rapoport 		gfp_t gfp, struct vm_area_struct *vma,
1272b740303SSouptick Joarder 		struct vm_fault *vmf, vm_fault_t *fault_type);
12868da9f05SHugh Dickins 
129f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index,
1309e18eb29SAndres Lagar-Cavilla 		struct page **pagep, enum sgp_type sgp)
13168da9f05SHugh Dickins {
13268da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
133cfda0526SMike Rapoport 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
13468da9f05SHugh Dickins }
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1371da177e4SLinus Torvalds {
1381da177e4SLinus Torvalds 	return sb->s_fs_info;
1391da177e4SLinus Torvalds }
1401da177e4SLinus Torvalds 
1411da177e4SLinus Torvalds /*
1421da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1431da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1441da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1451da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1461da177e4SLinus Torvalds  */
1471da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1481da177e4SLinus Torvalds {
1490b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
150191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1511da177e4SLinus Torvalds }
1521da177e4SLinus Torvalds 
1531da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1541da177e4SLinus Torvalds {
1550b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1561da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1571da177e4SLinus Torvalds }
1581da177e4SLinus Torvalds 
15977142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags,
16077142517SKonstantin Khlebnikov 		loff_t oldsize, loff_t newsize)
16177142517SKonstantin Khlebnikov {
16277142517SKonstantin Khlebnikov 	if (!(flags & VM_NORESERVE)) {
16377142517SKonstantin Khlebnikov 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
16477142517SKonstantin Khlebnikov 			return security_vm_enough_memory_mm(current->mm,
16577142517SKonstantin Khlebnikov 					VM_ACCT(newsize) - VM_ACCT(oldsize));
16677142517SKonstantin Khlebnikov 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
16777142517SKonstantin Khlebnikov 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
16877142517SKonstantin Khlebnikov 	}
16977142517SKonstantin Khlebnikov 	return 0;
17077142517SKonstantin Khlebnikov }
17177142517SKonstantin Khlebnikov 
1721da177e4SLinus Torvalds /*
1731da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
17475edd345SHugh Dickins  * pages are allocated, in order to allow large sparse files.
1751da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1761da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1771da177e4SLinus Torvalds  */
178800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages)
1791da177e4SLinus Torvalds {
180800d8c63SKirill A. Shutemov 	if (!(flags & VM_NORESERVE))
181800d8c63SKirill A. Shutemov 		return 0;
182800d8c63SKirill A. Shutemov 
183800d8c63SKirill A. Shutemov 	return security_vm_enough_memory_mm(current->mm,
184800d8c63SKirill A. Shutemov 			pages * VM_ACCT(PAGE_SIZE));
1851da177e4SLinus Torvalds }
1861da177e4SLinus Torvalds 
1871da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
1881da177e4SLinus Torvalds {
1890b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
19009cbfeafSKirill A. Shutemov 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
1911da177e4SLinus Torvalds }
1921da177e4SLinus Torvalds 
1930f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
1940f079694SMike Rapoport {
1950f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
1960f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1970f079694SMike Rapoport 
1980f079694SMike Rapoport 	if (shmem_acct_block(info->flags, pages))
1990f079694SMike Rapoport 		return false;
2000f079694SMike Rapoport 
2010f079694SMike Rapoport 	if (sbinfo->max_blocks) {
2020f079694SMike Rapoport 		if (percpu_counter_compare(&sbinfo->used_blocks,
2030f079694SMike Rapoport 					   sbinfo->max_blocks - pages) > 0)
2040f079694SMike Rapoport 			goto unacct;
2050f079694SMike Rapoport 		percpu_counter_add(&sbinfo->used_blocks, pages);
2060f079694SMike Rapoport 	}
2070f079694SMike Rapoport 
2080f079694SMike Rapoport 	return true;
2090f079694SMike Rapoport 
2100f079694SMike Rapoport unacct:
2110f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2120f079694SMike Rapoport 	return false;
2130f079694SMike Rapoport }
2140f079694SMike Rapoport 
2150f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
2160f079694SMike Rapoport {
2170f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
2180f079694SMike Rapoport 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2190f079694SMike Rapoport 
2200f079694SMike Rapoport 	if (sbinfo->max_blocks)
2210f079694SMike Rapoport 		percpu_counter_sub(&sbinfo->used_blocks, pages);
2220f079694SMike Rapoport 	shmem_unacct_blocks(info->flags, pages);
2230f079694SMike Rapoport }
2240f079694SMike Rapoport 
225759b9775SHugh Dickins static const struct super_operations shmem_ops;
226f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops;
22715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
22892e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
22992e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
23092e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
231f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
232779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type;
2331da177e4SLinus Torvalds 
234b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma)
235b0506e48SMike Rapoport {
236b0506e48SMike Rapoport 	return vma->vm_ops == &shmem_vm_ops;
237b0506e48SMike Rapoport }
238b0506e48SMike Rapoport 
2391da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
240cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
2411da177e4SLinus Torvalds 
2425b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb)
2435b04c689SPavel Emelyanov {
2445b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2455b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
2465b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2475b04c689SPavel Emelyanov 		if (!sbinfo->free_inodes) {
2485b04c689SPavel Emelyanov 			spin_unlock(&sbinfo->stat_lock);
2495b04c689SPavel Emelyanov 			return -ENOSPC;
2505b04c689SPavel Emelyanov 		}
2515b04c689SPavel Emelyanov 		sbinfo->free_inodes--;
2525b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
2535b04c689SPavel Emelyanov 	}
2545b04c689SPavel Emelyanov 	return 0;
2555b04c689SPavel Emelyanov }
2565b04c689SPavel Emelyanov 
2575b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
2585b04c689SPavel Emelyanov {
2595b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2605b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
2615b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2625b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
2635b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
2645b04c689SPavel Emelyanov 	}
2655b04c689SPavel Emelyanov }
2665b04c689SPavel Emelyanov 
26746711810SRandy Dunlap /**
26841ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
2691da177e4SLinus Torvalds  * @inode: inode to recalc
2701da177e4SLinus Torvalds  *
2711da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
2721da177e4SLinus Torvalds  * undirtied hole pages behind our back.
2731da177e4SLinus Torvalds  *
2741da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
2751da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
2761da177e4SLinus Torvalds  *
2771da177e4SLinus Torvalds  * It has to be called with the spinlock held.
2781da177e4SLinus Torvalds  */
2791da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
2801da177e4SLinus Torvalds {
2811da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
2821da177e4SLinus Torvalds 	long freed;
2831da177e4SLinus Torvalds 
2841da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
2851da177e4SLinus Torvalds 	if (freed > 0) {
2861da177e4SLinus Torvalds 		info->alloced -= freed;
28754af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
2880f079694SMike Rapoport 		shmem_inode_unacct_blocks(inode, freed);
2891da177e4SLinus Torvalds 	}
2901da177e4SLinus Torvalds }
2911da177e4SLinus Torvalds 
292800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages)
293800d8c63SKirill A. Shutemov {
294800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
2954595ef88SKirill A. Shutemov 	unsigned long flags;
296800d8c63SKirill A. Shutemov 
2970f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, pages))
298800d8c63SKirill A. Shutemov 		return false;
299b1cc94abSMike Rapoport 
3004595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
301800d8c63SKirill A. Shutemov 	info->alloced += pages;
302800d8c63SKirill A. Shutemov 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
303800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3044595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
305800d8c63SKirill A. Shutemov 	inode->i_mapping->nrpages += pages;
306800d8c63SKirill A. Shutemov 
307800d8c63SKirill A. Shutemov 	return true;
308800d8c63SKirill A. Shutemov }
309800d8c63SKirill A. Shutemov 
310800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages)
311800d8c63SKirill A. Shutemov {
312800d8c63SKirill A. Shutemov 	struct shmem_inode_info *info = SHMEM_I(inode);
3134595ef88SKirill A. Shutemov 	unsigned long flags;
314800d8c63SKirill A. Shutemov 
3154595ef88SKirill A. Shutemov 	spin_lock_irqsave(&info->lock, flags);
316800d8c63SKirill A. Shutemov 	info->alloced -= pages;
317800d8c63SKirill A. Shutemov 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
318800d8c63SKirill A. Shutemov 	shmem_recalc_inode(inode);
3194595ef88SKirill A. Shutemov 	spin_unlock_irqrestore(&info->lock, flags);
320800d8c63SKirill A. Shutemov 
3210f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, pages);
322800d8c63SKirill A. Shutemov }
323800d8c63SKirill A. Shutemov 
3247a5d0fbbSHugh Dickins /*
325*62f945b6SMatthew Wilcox  * Replace item expected in xarray by a new item, while holding xa_lock.
3267a5d0fbbSHugh Dickins  */
327*62f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping,
3287a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
3297a5d0fbbSHugh Dickins {
330*62f945b6SMatthew Wilcox 	XA_STATE(xas, &mapping->i_pages, index);
3316dbaf22cSJohannes Weiner 	void *item;
3327a5d0fbbSHugh Dickins 
3337a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
3346dbaf22cSJohannes Weiner 	VM_BUG_ON(!replacement);
335*62f945b6SMatthew Wilcox 	item = xas_load(&xas);
3367a5d0fbbSHugh Dickins 	if (item != expected)
3377a5d0fbbSHugh Dickins 		return -ENOENT;
338*62f945b6SMatthew Wilcox 	xas_store(&xas, replacement);
3397a5d0fbbSHugh Dickins 	return 0;
3407a5d0fbbSHugh Dickins }
3417a5d0fbbSHugh Dickins 
3427a5d0fbbSHugh Dickins /*
343d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
344d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
345d1899228SHugh Dickins  *
346d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
347d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
348d1899228SHugh Dickins  */
349d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
350d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
351d1899228SHugh Dickins {
352d1899228SHugh Dickins 	void *item;
353d1899228SHugh Dickins 
354d1899228SHugh Dickins 	rcu_read_lock();
355b93b0163SMatthew Wilcox 	item = radix_tree_lookup(&mapping->i_pages, index);
356d1899228SHugh Dickins 	rcu_read_unlock();
357d1899228SHugh Dickins 	return item == swp_to_radix_entry(swap);
358d1899228SHugh Dickins }
359d1899228SHugh Dickins 
360d1899228SHugh Dickins /*
3615a6e75f8SKirill A. Shutemov  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
3625a6e75f8SKirill A. Shutemov  *
3635a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_NEVER:
3645a6e75f8SKirill A. Shutemov  *	disables huge pages for the mount;
3655a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ALWAYS:
3665a6e75f8SKirill A. Shutemov  *	enables huge pages for the mount;
3675a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_WITHIN_SIZE:
3685a6e75f8SKirill A. Shutemov  *	only allocate huge pages if the page will be fully within i_size,
3695a6e75f8SKirill A. Shutemov  *	also respect fadvise()/madvise() hints;
3705a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_ADVISE:
3715a6e75f8SKirill A. Shutemov  *	only allocate huge pages if requested with fadvise()/madvise();
3725a6e75f8SKirill A. Shutemov  */
3735a6e75f8SKirill A. Shutemov 
3745a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER	0
3755a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS	1
3765a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE	2
3775a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE	3
3785a6e75f8SKirill A. Shutemov 
3795a6e75f8SKirill A. Shutemov /*
3805a6e75f8SKirill A. Shutemov  * Special values.
3815a6e75f8SKirill A. Shutemov  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
3825a6e75f8SKirill A. Shutemov  *
3835a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_DENY:
3845a6e75f8SKirill A. Shutemov  *	disables huge on shm_mnt and all mounts, for emergency use;
3855a6e75f8SKirill A. Shutemov  * SHMEM_HUGE_FORCE:
3865a6e75f8SKirill A. Shutemov  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
3875a6e75f8SKirill A. Shutemov  *
3885a6e75f8SKirill A. Shutemov  */
3895a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY		(-1)
3905a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE	(-2)
3915a6e75f8SKirill A. Shutemov 
392e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3935a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */
3945a6e75f8SKirill A. Shutemov 
3955b9c98f3SMike Kravetz static int shmem_huge __read_mostly;
3965a6e75f8SKirill A. Shutemov 
397f1f5929cSJérémy Lefaure #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
3985a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str)
3995a6e75f8SKirill A. Shutemov {
4005a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "never"))
4015a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_NEVER;
4025a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "always"))
4035a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ALWAYS;
4045a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "within_size"))
4055a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_WITHIN_SIZE;
4065a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "advise"))
4075a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_ADVISE;
4085a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "deny"))
4095a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_DENY;
4105a6e75f8SKirill A. Shutemov 	if (!strcmp(str, "force"))
4115a6e75f8SKirill A. Shutemov 		return SHMEM_HUGE_FORCE;
4125a6e75f8SKirill A. Shutemov 	return -EINVAL;
4135a6e75f8SKirill A. Shutemov }
4145a6e75f8SKirill A. Shutemov 
4155a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge)
4165a6e75f8SKirill A. Shutemov {
4175a6e75f8SKirill A. Shutemov 	switch (huge) {
4185a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_NEVER:
4195a6e75f8SKirill A. Shutemov 		return "never";
4205a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ALWAYS:
4215a6e75f8SKirill A. Shutemov 		return "always";
4225a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_WITHIN_SIZE:
4235a6e75f8SKirill A. Shutemov 		return "within_size";
4245a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_ADVISE:
4255a6e75f8SKirill A. Shutemov 		return "advise";
4265a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_DENY:
4275a6e75f8SKirill A. Shutemov 		return "deny";
4285a6e75f8SKirill A. Shutemov 	case SHMEM_HUGE_FORCE:
4295a6e75f8SKirill A. Shutemov 		return "force";
4305a6e75f8SKirill A. Shutemov 	default:
4315a6e75f8SKirill A. Shutemov 		VM_BUG_ON(1);
4325a6e75f8SKirill A. Shutemov 		return "bad_val";
4335a6e75f8SKirill A. Shutemov 	}
4345a6e75f8SKirill A. Shutemov }
435f1f5929cSJérémy Lefaure #endif
4365a6e75f8SKirill A. Shutemov 
437779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
438779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
439779750d2SKirill A. Shutemov {
440779750d2SKirill A. Shutemov 	LIST_HEAD(list), *pos, *next;
441253fd0f0SKirill A. Shutemov 	LIST_HEAD(to_remove);
442779750d2SKirill A. Shutemov 	struct inode *inode;
443779750d2SKirill A. Shutemov 	struct shmem_inode_info *info;
444779750d2SKirill A. Shutemov 	struct page *page;
445779750d2SKirill A. Shutemov 	unsigned long batch = sc ? sc->nr_to_scan : 128;
446779750d2SKirill A. Shutemov 	int removed = 0, split = 0;
447779750d2SKirill A. Shutemov 
448779750d2SKirill A. Shutemov 	if (list_empty(&sbinfo->shrinklist))
449779750d2SKirill A. Shutemov 		return SHRINK_STOP;
450779750d2SKirill A. Shutemov 
451779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
452779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
453779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
454779750d2SKirill A. Shutemov 
455779750d2SKirill A. Shutemov 		/* pin the inode */
456779750d2SKirill A. Shutemov 		inode = igrab(&info->vfs_inode);
457779750d2SKirill A. Shutemov 
458779750d2SKirill A. Shutemov 		/* inode is about to be evicted */
459779750d2SKirill A. Shutemov 		if (!inode) {
460779750d2SKirill A. Shutemov 			list_del_init(&info->shrinklist);
461779750d2SKirill A. Shutemov 			removed++;
462779750d2SKirill A. Shutemov 			goto next;
463779750d2SKirill A. Shutemov 		}
464779750d2SKirill A. Shutemov 
465779750d2SKirill A. Shutemov 		/* Check if there's anything to gain */
466779750d2SKirill A. Shutemov 		if (round_up(inode->i_size, PAGE_SIZE) ==
467779750d2SKirill A. Shutemov 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
468253fd0f0SKirill A. Shutemov 			list_move(&info->shrinklist, &to_remove);
469779750d2SKirill A. Shutemov 			removed++;
470779750d2SKirill A. Shutemov 			goto next;
471779750d2SKirill A. Shutemov 		}
472779750d2SKirill A. Shutemov 
473779750d2SKirill A. Shutemov 		list_move(&info->shrinklist, &list);
474779750d2SKirill A. Shutemov next:
475779750d2SKirill A. Shutemov 		if (!--batch)
476779750d2SKirill A. Shutemov 			break;
477779750d2SKirill A. Shutemov 	}
478779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
479779750d2SKirill A. Shutemov 
480253fd0f0SKirill A. Shutemov 	list_for_each_safe(pos, next, &to_remove) {
481253fd0f0SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
482253fd0f0SKirill A. Shutemov 		inode = &info->vfs_inode;
483253fd0f0SKirill A. Shutemov 		list_del_init(&info->shrinklist);
484253fd0f0SKirill A. Shutemov 		iput(inode);
485253fd0f0SKirill A. Shutemov 	}
486253fd0f0SKirill A. Shutemov 
487779750d2SKirill A. Shutemov 	list_for_each_safe(pos, next, &list) {
488779750d2SKirill A. Shutemov 		int ret;
489779750d2SKirill A. Shutemov 
490779750d2SKirill A. Shutemov 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
491779750d2SKirill A. Shutemov 		inode = &info->vfs_inode;
492779750d2SKirill A. Shutemov 
493b3cd54b2SKirill A. Shutemov 		if (nr_to_split && split >= nr_to_split)
494b3cd54b2SKirill A. Shutemov 			goto leave;
495779750d2SKirill A. Shutemov 
496b3cd54b2SKirill A. Shutemov 		page = find_get_page(inode->i_mapping,
497779750d2SKirill A. Shutemov 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
498779750d2SKirill A. Shutemov 		if (!page)
499779750d2SKirill A. Shutemov 			goto drop;
500779750d2SKirill A. Shutemov 
501b3cd54b2SKirill A. Shutemov 		/* No huge page at the end of the file: nothing to split */
502779750d2SKirill A. Shutemov 		if (!PageTransHuge(page)) {
503779750d2SKirill A. Shutemov 			put_page(page);
504779750d2SKirill A. Shutemov 			goto drop;
505779750d2SKirill A. Shutemov 		}
506779750d2SKirill A. Shutemov 
507b3cd54b2SKirill A. Shutemov 		/*
508b3cd54b2SKirill A. Shutemov 		 * Leave the inode on the list if we failed to lock
509b3cd54b2SKirill A. Shutemov 		 * the page at this time.
510b3cd54b2SKirill A. Shutemov 		 *
511b3cd54b2SKirill A. Shutemov 		 * Waiting for the lock may lead to deadlock in the
512b3cd54b2SKirill A. Shutemov 		 * reclaim path.
513b3cd54b2SKirill A. Shutemov 		 */
514b3cd54b2SKirill A. Shutemov 		if (!trylock_page(page)) {
515b3cd54b2SKirill A. Shutemov 			put_page(page);
516b3cd54b2SKirill A. Shutemov 			goto leave;
517b3cd54b2SKirill A. Shutemov 		}
518b3cd54b2SKirill A. Shutemov 
519779750d2SKirill A. Shutemov 		ret = split_huge_page(page);
520779750d2SKirill A. Shutemov 		unlock_page(page);
521779750d2SKirill A. Shutemov 		put_page(page);
522779750d2SKirill A. Shutemov 
523b3cd54b2SKirill A. Shutemov 		/* If split failed leave the inode on the list */
524b3cd54b2SKirill A. Shutemov 		if (ret)
525b3cd54b2SKirill A. Shutemov 			goto leave;
526779750d2SKirill A. Shutemov 
527779750d2SKirill A. Shutemov 		split++;
528779750d2SKirill A. Shutemov drop:
529779750d2SKirill A. Shutemov 		list_del_init(&info->shrinklist);
530779750d2SKirill A. Shutemov 		removed++;
531b3cd54b2SKirill A. Shutemov leave:
532779750d2SKirill A. Shutemov 		iput(inode);
533779750d2SKirill A. Shutemov 	}
534779750d2SKirill A. Shutemov 
535779750d2SKirill A. Shutemov 	spin_lock(&sbinfo->shrinklist_lock);
536779750d2SKirill A. Shutemov 	list_splice_tail(&list, &sbinfo->shrinklist);
537779750d2SKirill A. Shutemov 	sbinfo->shrinklist_len -= removed;
538779750d2SKirill A. Shutemov 	spin_unlock(&sbinfo->shrinklist_lock);
539779750d2SKirill A. Shutemov 
540779750d2SKirill A. Shutemov 	return split;
541779750d2SKirill A. Shutemov }
542779750d2SKirill A. Shutemov 
543779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb,
544779750d2SKirill A. Shutemov 		struct shrink_control *sc)
545779750d2SKirill A. Shutemov {
546779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
547779750d2SKirill A. Shutemov 
548779750d2SKirill A. Shutemov 	if (!READ_ONCE(sbinfo->shrinklist_len))
549779750d2SKirill A. Shutemov 		return SHRINK_STOP;
550779750d2SKirill A. Shutemov 
551779750d2SKirill A. Shutemov 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
552779750d2SKirill A. Shutemov }
553779750d2SKirill A. Shutemov 
554779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb,
555779750d2SKirill A. Shutemov 		struct shrink_control *sc)
556779750d2SKirill A. Shutemov {
557779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
558779750d2SKirill A. Shutemov 	return READ_ONCE(sbinfo->shrinklist_len);
559779750d2SKirill A. Shutemov }
560e496cf3dSKirill A. Shutemov #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
5615a6e75f8SKirill A. Shutemov 
5625a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY
5635a6e75f8SKirill A. Shutemov 
564779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
565779750d2SKirill A. Shutemov 		struct shrink_control *sc, unsigned long nr_to_split)
566779750d2SKirill A. Shutemov {
567779750d2SKirill A. Shutemov 	return 0;
568779750d2SKirill A. Shutemov }
569e496cf3dSKirill A. Shutemov #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
5705a6e75f8SKirill A. Shutemov 
57189fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
57289fdcd26SYang Shi {
57389fdcd26SYang Shi 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
57489fdcd26SYang Shi 	    (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
57589fdcd26SYang Shi 	    shmem_huge != SHMEM_HUGE_DENY)
57689fdcd26SYang Shi 		return true;
57789fdcd26SYang Shi 	return false;
57889fdcd26SYang Shi }
57989fdcd26SYang Shi 
5805a6e75f8SKirill A. Shutemov /*
58146f65ec1SHugh Dickins  * Like add_to_page_cache_locked, but error if expected item has gone.
58246f65ec1SHugh Dickins  */
58346f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page,
58446f65ec1SHugh Dickins 				   struct address_space *mapping,
585fed400a1SWang Sheng-Hui 				   pgoff_t index, void *expected)
58646f65ec1SHugh Dickins {
587800d8c63SKirill A. Shutemov 	int error, nr = hpage_nr_pages(page);
58846f65ec1SHugh Dickins 
589800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageTail(page), page);
590800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
591309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
592309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
593800d8c63SKirill A. Shutemov 	VM_BUG_ON(expected && PageTransHuge(page));
59446f65ec1SHugh Dickins 
595800d8c63SKirill A. Shutemov 	page_ref_add(page, nr);
59646f65ec1SHugh Dickins 	page->mapping = mapping;
59746f65ec1SHugh Dickins 	page->index = index;
59846f65ec1SHugh Dickins 
599b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
600800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
601800d8c63SKirill A. Shutemov 		void __rcu **results;
602800d8c63SKirill A. Shutemov 		pgoff_t idx;
603800d8c63SKirill A. Shutemov 		int i;
604800d8c63SKirill A. Shutemov 
605800d8c63SKirill A. Shutemov 		error = 0;
606b93b0163SMatthew Wilcox 		if (radix_tree_gang_lookup_slot(&mapping->i_pages,
607800d8c63SKirill A. Shutemov 					&results, &idx, index, 1) &&
608800d8c63SKirill A. Shutemov 				idx < index + HPAGE_PMD_NR) {
609800d8c63SKirill A. Shutemov 			error = -EEXIST;
610800d8c63SKirill A. Shutemov 		}
611800d8c63SKirill A. Shutemov 
612800d8c63SKirill A. Shutemov 		if (!error) {
613800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
614b93b0163SMatthew Wilcox 				error = radix_tree_insert(&mapping->i_pages,
615800d8c63SKirill A. Shutemov 						index + i, page + i);
616800d8c63SKirill A. Shutemov 				VM_BUG_ON(error);
617800d8c63SKirill A. Shutemov 			}
618800d8c63SKirill A. Shutemov 			count_vm_event(THP_FILE_ALLOC);
619800d8c63SKirill A. Shutemov 		}
620800d8c63SKirill A. Shutemov 	} else if (!expected) {
621b93b0163SMatthew Wilcox 		error = radix_tree_insert(&mapping->i_pages, index, page);
622800d8c63SKirill A. Shutemov 	} else {
623*62f945b6SMatthew Wilcox 		error = shmem_replace_entry(mapping, index, expected, page);
624800d8c63SKirill A. Shutemov 	}
625800d8c63SKirill A. Shutemov 
62646f65ec1SHugh Dickins 	if (!error) {
627800d8c63SKirill A. Shutemov 		mapping->nrpages += nr;
628800d8c63SKirill A. Shutemov 		if (PageTransHuge(page))
62911fb9989SMel Gorman 			__inc_node_page_state(page, NR_SHMEM_THPS);
63011fb9989SMel Gorman 		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
63111fb9989SMel Gorman 		__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
632b93b0163SMatthew Wilcox 		xa_unlock_irq(&mapping->i_pages);
63346f65ec1SHugh Dickins 	} else {
63446f65ec1SHugh Dickins 		page->mapping = NULL;
635b93b0163SMatthew Wilcox 		xa_unlock_irq(&mapping->i_pages);
636800d8c63SKirill A. Shutemov 		page_ref_sub(page, nr);
63746f65ec1SHugh Dickins 	}
63846f65ec1SHugh Dickins 	return error;
63946f65ec1SHugh Dickins }
64046f65ec1SHugh Dickins 
64146f65ec1SHugh Dickins /*
6426922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
6436922c0c7SHugh Dickins  */
6446922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
6456922c0c7SHugh Dickins {
6466922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
6476922c0c7SHugh Dickins 	int error;
6486922c0c7SHugh Dickins 
649800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
650800d8c63SKirill A. Shutemov 
651b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
652*62f945b6SMatthew Wilcox 	error = shmem_replace_entry(mapping, page->index, page, radswap);
6536922c0c7SHugh Dickins 	page->mapping = NULL;
6546922c0c7SHugh Dickins 	mapping->nrpages--;
65511fb9989SMel Gorman 	__dec_node_page_state(page, NR_FILE_PAGES);
65611fb9989SMel Gorman 	__dec_node_page_state(page, NR_SHMEM);
657b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
65809cbfeafSKirill A. Shutemov 	put_page(page);
6596922c0c7SHugh Dickins 	BUG_ON(error);
6606922c0c7SHugh Dickins }
6616922c0c7SHugh Dickins 
6626922c0c7SHugh Dickins /*
6637a5d0fbbSHugh Dickins  * Remove swap entry from radix tree, free the swap and its page cache.
6647a5d0fbbSHugh Dickins  */
6657a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
6667a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
6677a5d0fbbSHugh Dickins {
6686dbaf22cSJohannes Weiner 	void *old;
6697a5d0fbbSHugh Dickins 
670b93b0163SMatthew Wilcox 	xa_lock_irq(&mapping->i_pages);
671b93b0163SMatthew Wilcox 	old = radix_tree_delete_item(&mapping->i_pages, index, radswap);
672b93b0163SMatthew Wilcox 	xa_unlock_irq(&mapping->i_pages);
6736dbaf22cSJohannes Weiner 	if (old != radswap)
6746dbaf22cSJohannes Weiner 		return -ENOENT;
6757a5d0fbbSHugh Dickins 	free_swap_and_cache(radix_to_swp_entry(radswap));
6766dbaf22cSJohannes Weiner 	return 0;
6777a5d0fbbSHugh Dickins }
6787a5d0fbbSHugh Dickins 
6797a5d0fbbSHugh Dickins /*
6806a15a370SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
68148131e03SVlastimil Babka  * given offsets are swapped out.
6826a15a370SVlastimil Babka  *
683b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
6846a15a370SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
6856a15a370SVlastimil Babka  */
68648131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping,
68748131e03SVlastimil Babka 						pgoff_t start, pgoff_t end)
6886a15a370SVlastimil Babka {
6896a15a370SVlastimil Babka 	struct radix_tree_iter iter;
6905b9c98f3SMike Kravetz 	void __rcu **slot;
6916a15a370SVlastimil Babka 	struct page *page;
69248131e03SVlastimil Babka 	unsigned long swapped = 0;
6936a15a370SVlastimil Babka 
6946a15a370SVlastimil Babka 	rcu_read_lock();
6956a15a370SVlastimil Babka 
696b93b0163SMatthew Wilcox 	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
6976a15a370SVlastimil Babka 		if (iter.index >= end)
6986a15a370SVlastimil Babka 			break;
6996a15a370SVlastimil Babka 
7006a15a370SVlastimil Babka 		page = radix_tree_deref_slot(slot);
7016a15a370SVlastimil Babka 
7022cf938aaSMatthew Wilcox 		if (radix_tree_deref_retry(page)) {
7032cf938aaSMatthew Wilcox 			slot = radix_tree_iter_retry(&iter);
7042cf938aaSMatthew Wilcox 			continue;
7052cf938aaSMatthew Wilcox 		}
7066a15a370SVlastimil Babka 
7073159f943SMatthew Wilcox 		if (xa_is_value(page))
7086a15a370SVlastimil Babka 			swapped++;
7096a15a370SVlastimil Babka 
7106a15a370SVlastimil Babka 		if (need_resched()) {
711148deab2SMatthew Wilcox 			slot = radix_tree_iter_resume(slot, &iter);
7126a15a370SVlastimil Babka 			cond_resched_rcu();
7136a15a370SVlastimil Babka 		}
7146a15a370SVlastimil Babka 	}
7156a15a370SVlastimil Babka 
7166a15a370SVlastimil Babka 	rcu_read_unlock();
7176a15a370SVlastimil Babka 
7186a15a370SVlastimil Babka 	return swapped << PAGE_SHIFT;
7196a15a370SVlastimil Babka }
7206a15a370SVlastimil Babka 
7216a15a370SVlastimil Babka /*
72248131e03SVlastimil Babka  * Determine (in bytes) how many of the shmem object's pages mapped by the
72348131e03SVlastimil Babka  * given vma is swapped out.
72448131e03SVlastimil Babka  *
725b93b0163SMatthew Wilcox  * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
72648131e03SVlastimil Babka  * as long as the inode doesn't go away and racy results are not a problem.
72748131e03SVlastimil Babka  */
72848131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma)
72948131e03SVlastimil Babka {
73048131e03SVlastimil Babka 	struct inode *inode = file_inode(vma->vm_file);
73148131e03SVlastimil Babka 	struct shmem_inode_info *info = SHMEM_I(inode);
73248131e03SVlastimil Babka 	struct address_space *mapping = inode->i_mapping;
73348131e03SVlastimil Babka 	unsigned long swapped;
73448131e03SVlastimil Babka 
73548131e03SVlastimil Babka 	/* Be careful as we don't hold info->lock */
73648131e03SVlastimil Babka 	swapped = READ_ONCE(info->swapped);
73748131e03SVlastimil Babka 
73848131e03SVlastimil Babka 	/*
73948131e03SVlastimil Babka 	 * The easier cases are when the shmem object has nothing in swap, or
74048131e03SVlastimil Babka 	 * the vma maps it whole. Then we can simply use the stats that we
74148131e03SVlastimil Babka 	 * already track.
74248131e03SVlastimil Babka 	 */
74348131e03SVlastimil Babka 	if (!swapped)
74448131e03SVlastimil Babka 		return 0;
74548131e03SVlastimil Babka 
74648131e03SVlastimil Babka 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
74748131e03SVlastimil Babka 		return swapped << PAGE_SHIFT;
74848131e03SVlastimil Babka 
74948131e03SVlastimil Babka 	/* Here comes the more involved part */
75048131e03SVlastimil Babka 	return shmem_partial_swap_usage(mapping,
75148131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_start),
75248131e03SVlastimil Babka 			linear_page_index(vma, vma->vm_end));
75348131e03SVlastimil Babka }
75448131e03SVlastimil Babka 
75548131e03SVlastimil Babka /*
75624513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
75724513264SHugh Dickins  */
75824513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
75924513264SHugh Dickins {
76024513264SHugh Dickins 	struct pagevec pvec;
76124513264SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
76224513264SHugh Dickins 	pgoff_t index = 0;
76324513264SHugh Dickins 
76486679820SMel Gorman 	pagevec_init(&pvec);
76524513264SHugh Dickins 	/*
76624513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
76724513264SHugh Dickins 	 */
76824513264SHugh Dickins 	while (!mapping_unevictable(mapping)) {
76924513264SHugh Dickins 		/*
77024513264SHugh Dickins 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
77124513264SHugh Dickins 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
77224513264SHugh Dickins 		 */
7730cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
77424513264SHugh Dickins 					   PAGEVEC_SIZE, pvec.pages, indices);
77524513264SHugh Dickins 		if (!pvec.nr)
77624513264SHugh Dickins 			break;
77724513264SHugh Dickins 		index = indices[pvec.nr - 1] + 1;
7780cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
77924513264SHugh Dickins 		check_move_unevictable_pages(pvec.pages, pvec.nr);
78024513264SHugh Dickins 		pagevec_release(&pvec);
78124513264SHugh Dickins 		cond_resched();
78224513264SHugh Dickins 	}
7837a5d0fbbSHugh Dickins }
7847a5d0fbbSHugh Dickins 
7857a5d0fbbSHugh Dickins /*
7867a5d0fbbSHugh Dickins  * Remove range of pages and swap entries from radix tree, and free them.
7871635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
7887a5d0fbbSHugh Dickins  */
7891635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
7901635f6a7SHugh Dickins 								 bool unfalloc)
7911da177e4SLinus Torvalds {
792285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
7931da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
79409cbfeafSKirill A. Shutemov 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
79509cbfeafSKirill A. Shutemov 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
79609cbfeafSKirill A. Shutemov 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
79709cbfeafSKirill A. Shutemov 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
798bda97eabSHugh Dickins 	struct pagevec pvec;
7997a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
8007a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
801285b2c4fSHugh Dickins 	pgoff_t index;
802bda97eabSHugh Dickins 	int i;
8031da177e4SLinus Torvalds 
80483e4fa9cSHugh Dickins 	if (lend == -1)
80583e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
806bda97eabSHugh Dickins 
80786679820SMel Gorman 	pagevec_init(&pvec);
808bda97eabSHugh Dickins 	index = start;
80983e4fa9cSHugh Dickins 	while (index < end) {
8100cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
81183e4fa9cSHugh Dickins 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
8127a5d0fbbSHugh Dickins 			pvec.pages, indices);
8137a5d0fbbSHugh Dickins 		if (!pvec.nr)
8147a5d0fbbSHugh Dickins 			break;
815bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
816bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
817bda97eabSHugh Dickins 
8187a5d0fbbSHugh Dickins 			index = indices[i];
81983e4fa9cSHugh Dickins 			if (index >= end)
820bda97eabSHugh Dickins 				break;
821bda97eabSHugh Dickins 
8223159f943SMatthew Wilcox 			if (xa_is_value(page)) {
8231635f6a7SHugh Dickins 				if (unfalloc)
8241635f6a7SHugh Dickins 					continue;
8257a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
8267a5d0fbbSHugh Dickins 								index, page);
8277a5d0fbbSHugh Dickins 				continue;
8287a5d0fbbSHugh Dickins 			}
8297a5d0fbbSHugh Dickins 
830800d8c63SKirill A. Shutemov 			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
831800d8c63SKirill A. Shutemov 
832bda97eabSHugh Dickins 			if (!trylock_page(page))
833bda97eabSHugh Dickins 				continue;
834800d8c63SKirill A. Shutemov 
835800d8c63SKirill A. Shutemov 			if (PageTransTail(page)) {
836800d8c63SKirill A. Shutemov 				/* Middle of THP: zero out the page */
837800d8c63SKirill A. Shutemov 				clear_highpage(page);
838800d8c63SKirill A. Shutemov 				unlock_page(page);
839800d8c63SKirill A. Shutemov 				continue;
840800d8c63SKirill A. Shutemov 			} else if (PageTransHuge(page)) {
841800d8c63SKirill A. Shutemov 				if (index == round_down(end, HPAGE_PMD_NR)) {
842800d8c63SKirill A. Shutemov 					/*
843800d8c63SKirill A. Shutemov 					 * Range ends in the middle of THP:
844800d8c63SKirill A. Shutemov 					 * zero out the page
845800d8c63SKirill A. Shutemov 					 */
846800d8c63SKirill A. Shutemov 					clear_highpage(page);
847800d8c63SKirill A. Shutemov 					unlock_page(page);
848800d8c63SKirill A. Shutemov 					continue;
849800d8c63SKirill A. Shutemov 				}
850800d8c63SKirill A. Shutemov 				index += HPAGE_PMD_NR - 1;
851800d8c63SKirill A. Shutemov 				i += HPAGE_PMD_NR - 1;
852800d8c63SKirill A. Shutemov 			}
853800d8c63SKirill A. Shutemov 
8541635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
855800d8c63SKirill A. Shutemov 				VM_BUG_ON_PAGE(PageTail(page), page);
856800d8c63SKirill A. Shutemov 				if (page_mapping(page) == mapping) {
857309381feSSasha Levin 					VM_BUG_ON_PAGE(PageWriteback(page), page);
858bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
8597a5d0fbbSHugh Dickins 				}
8601635f6a7SHugh Dickins 			}
861bda97eabSHugh Dickins 			unlock_page(page);
862bda97eabSHugh Dickins 		}
8630cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
86424513264SHugh Dickins 		pagevec_release(&pvec);
865bda97eabSHugh Dickins 		cond_resched();
866bda97eabSHugh Dickins 		index++;
867bda97eabSHugh Dickins 	}
868bda97eabSHugh Dickins 
86983e4fa9cSHugh Dickins 	if (partial_start) {
870bda97eabSHugh Dickins 		struct page *page = NULL;
8719e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, start - 1, &page, SGP_READ);
872bda97eabSHugh Dickins 		if (page) {
87309cbfeafSKirill A. Shutemov 			unsigned int top = PAGE_SIZE;
87483e4fa9cSHugh Dickins 			if (start > end) {
87583e4fa9cSHugh Dickins 				top = partial_end;
87683e4fa9cSHugh Dickins 				partial_end = 0;
87783e4fa9cSHugh Dickins 			}
87883e4fa9cSHugh Dickins 			zero_user_segment(page, partial_start, top);
879bda97eabSHugh Dickins 			set_page_dirty(page);
880bda97eabSHugh Dickins 			unlock_page(page);
88109cbfeafSKirill A. Shutemov 			put_page(page);
882bda97eabSHugh Dickins 		}
883bda97eabSHugh Dickins 	}
88483e4fa9cSHugh Dickins 	if (partial_end) {
88583e4fa9cSHugh Dickins 		struct page *page = NULL;
8869e18eb29SAndres Lagar-Cavilla 		shmem_getpage(inode, end, &page, SGP_READ);
88783e4fa9cSHugh Dickins 		if (page) {
88883e4fa9cSHugh Dickins 			zero_user_segment(page, 0, partial_end);
88983e4fa9cSHugh Dickins 			set_page_dirty(page);
89083e4fa9cSHugh Dickins 			unlock_page(page);
89109cbfeafSKirill A. Shutemov 			put_page(page);
89283e4fa9cSHugh Dickins 		}
89383e4fa9cSHugh Dickins 	}
89483e4fa9cSHugh Dickins 	if (start >= end)
89583e4fa9cSHugh Dickins 		return;
896bda97eabSHugh Dickins 
897bda97eabSHugh Dickins 	index = start;
898b1a36650SHugh Dickins 	while (index < end) {
899bda97eabSHugh Dickins 		cond_resched();
9000cd6144aSJohannes Weiner 
9010cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
90283e4fa9cSHugh Dickins 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
9037a5d0fbbSHugh Dickins 				pvec.pages, indices);
9047a5d0fbbSHugh Dickins 		if (!pvec.nr) {
905b1a36650SHugh Dickins 			/* If all gone or hole-punch or unfalloc, we're done */
906b1a36650SHugh Dickins 			if (index == start || end != -1)
907bda97eabSHugh Dickins 				break;
908b1a36650SHugh Dickins 			/* But if truncating, restart to make sure all gone */
909bda97eabSHugh Dickins 			index = start;
910bda97eabSHugh Dickins 			continue;
911bda97eabSHugh Dickins 		}
912bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
913bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
914bda97eabSHugh Dickins 
9157a5d0fbbSHugh Dickins 			index = indices[i];
91683e4fa9cSHugh Dickins 			if (index >= end)
917bda97eabSHugh Dickins 				break;
918bda97eabSHugh Dickins 
9193159f943SMatthew Wilcox 			if (xa_is_value(page)) {
9201635f6a7SHugh Dickins 				if (unfalloc)
9211635f6a7SHugh Dickins 					continue;
922b1a36650SHugh Dickins 				if (shmem_free_swap(mapping, index, page)) {
923b1a36650SHugh Dickins 					/* Swap was replaced by page: retry */
924b1a36650SHugh Dickins 					index--;
925b1a36650SHugh Dickins 					break;
926b1a36650SHugh Dickins 				}
927b1a36650SHugh Dickins 				nr_swaps_freed++;
9287a5d0fbbSHugh Dickins 				continue;
9297a5d0fbbSHugh Dickins 			}
9307a5d0fbbSHugh Dickins 
931bda97eabSHugh Dickins 			lock_page(page);
932800d8c63SKirill A. Shutemov 
933800d8c63SKirill A. Shutemov 			if (PageTransTail(page)) {
934800d8c63SKirill A. Shutemov 				/* Middle of THP: zero out the page */
935800d8c63SKirill A. Shutemov 				clear_highpage(page);
936800d8c63SKirill A. Shutemov 				unlock_page(page);
937800d8c63SKirill A. Shutemov 				/*
938800d8c63SKirill A. Shutemov 				 * Partial thp truncate due 'start' in middle
939800d8c63SKirill A. Shutemov 				 * of THP: don't need to look on these pages
940800d8c63SKirill A. Shutemov 				 * again on !pvec.nr restart.
941800d8c63SKirill A. Shutemov 				 */
942800d8c63SKirill A. Shutemov 				if (index != round_down(end, HPAGE_PMD_NR))
943800d8c63SKirill A. Shutemov 					start++;
944800d8c63SKirill A. Shutemov 				continue;
945800d8c63SKirill A. Shutemov 			} else if (PageTransHuge(page)) {
946800d8c63SKirill A. Shutemov 				if (index == round_down(end, HPAGE_PMD_NR)) {
947800d8c63SKirill A. Shutemov 					/*
948800d8c63SKirill A. Shutemov 					 * Range ends in the middle of THP:
949800d8c63SKirill A. Shutemov 					 * zero out the page
950800d8c63SKirill A. Shutemov 					 */
951800d8c63SKirill A. Shutemov 					clear_highpage(page);
952800d8c63SKirill A. Shutemov 					unlock_page(page);
953800d8c63SKirill A. Shutemov 					continue;
954800d8c63SKirill A. Shutemov 				}
955800d8c63SKirill A. Shutemov 				index += HPAGE_PMD_NR - 1;
956800d8c63SKirill A. Shutemov 				i += HPAGE_PMD_NR - 1;
957800d8c63SKirill A. Shutemov 			}
958800d8c63SKirill A. Shutemov 
9591635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
960800d8c63SKirill A. Shutemov 				VM_BUG_ON_PAGE(PageTail(page), page);
961800d8c63SKirill A. Shutemov 				if (page_mapping(page) == mapping) {
962309381feSSasha Levin 					VM_BUG_ON_PAGE(PageWriteback(page), page);
963bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
964b1a36650SHugh Dickins 				} else {
965b1a36650SHugh Dickins 					/* Page was replaced by swap: retry */
966b1a36650SHugh Dickins 					unlock_page(page);
967b1a36650SHugh Dickins 					index--;
968b1a36650SHugh Dickins 					break;
9697a5d0fbbSHugh Dickins 				}
9701635f6a7SHugh Dickins 			}
971bda97eabSHugh Dickins 			unlock_page(page);
972bda97eabSHugh Dickins 		}
9730cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
97424513264SHugh Dickins 		pagevec_release(&pvec);
975bda97eabSHugh Dickins 		index++;
976bda97eabSHugh Dickins 	}
97794c1e62dSHugh Dickins 
9784595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
9797a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
9801da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
9814595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
9821635f6a7SHugh Dickins }
9831da177e4SLinus Torvalds 
9841635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
9851635f6a7SHugh Dickins {
9861635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
987078cd827SDeepa Dinamani 	inode->i_ctime = inode->i_mtime = current_time(inode);
9881da177e4SLinus Torvalds }
98994c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
9901da177e4SLinus Torvalds 
991a528d35eSDavid Howells static int shmem_getattr(const struct path *path, struct kstat *stat,
992a528d35eSDavid Howells 			 u32 request_mask, unsigned int query_flags)
99344a30220SYu Zhao {
994a528d35eSDavid Howells 	struct inode *inode = path->dentry->d_inode;
99544a30220SYu Zhao 	struct shmem_inode_info *info = SHMEM_I(inode);
99689fdcd26SYang Shi 	struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
99744a30220SYu Zhao 
998d0424c42SHugh Dickins 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
9994595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
100044a30220SYu Zhao 		shmem_recalc_inode(inode);
10014595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1002d0424c42SHugh Dickins 	}
100344a30220SYu Zhao 	generic_fillattr(inode, stat);
100489fdcd26SYang Shi 
100589fdcd26SYang Shi 	if (is_huge_enabled(sb_info))
100689fdcd26SYang Shi 		stat->blksize = HPAGE_PMD_SIZE;
100789fdcd26SYang Shi 
100844a30220SYu Zhao 	return 0;
100944a30220SYu Zhao }
101044a30220SYu Zhao 
101194c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
10121da177e4SLinus Torvalds {
101375c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
101440e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
1015779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
10161da177e4SLinus Torvalds 	int error;
10171da177e4SLinus Torvalds 
101831051c85SJan Kara 	error = setattr_prepare(dentry, attr);
1019db78b877SChristoph Hellwig 	if (error)
1020db78b877SChristoph Hellwig 		return error;
1021db78b877SChristoph Hellwig 
102294c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
102394c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
102494c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
10253889e6e7Snpiggin@suse.de 
102640e041a2SDavid Herrmann 		/* protected by i_mutex */
102740e041a2SDavid Herrmann 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
102840e041a2SDavid Herrmann 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
102940e041a2SDavid Herrmann 			return -EPERM;
103040e041a2SDavid Herrmann 
103194c1e62dSHugh Dickins 		if (newsize != oldsize) {
103277142517SKonstantin Khlebnikov 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
103377142517SKonstantin Khlebnikov 					oldsize, newsize);
103477142517SKonstantin Khlebnikov 			if (error)
103577142517SKonstantin Khlebnikov 				return error;
103694c1e62dSHugh Dickins 			i_size_write(inode, newsize);
1037078cd827SDeepa Dinamani 			inode->i_ctime = inode->i_mtime = current_time(inode);
103894c1e62dSHugh Dickins 		}
1039afa2db2fSJosef Bacik 		if (newsize <= oldsize) {
104094c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1041d0424c42SHugh Dickins 			if (oldsize > holebegin)
1042d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1043d0424c42SHugh Dickins 							holebegin, 0, 1);
1044d0424c42SHugh Dickins 			if (info->alloced)
1045d0424c42SHugh Dickins 				shmem_truncate_range(inode,
1046d0424c42SHugh Dickins 							newsize, (loff_t)-1);
104794c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
1048d0424c42SHugh Dickins 			if (oldsize > holebegin)
1049d0424c42SHugh Dickins 				unmap_mapping_range(inode->i_mapping,
1050d0424c42SHugh Dickins 							holebegin, 0, 1);
1051779750d2SKirill A. Shutemov 
1052779750d2SKirill A. Shutemov 			/*
1053779750d2SKirill A. Shutemov 			 * Part of the huge page can be beyond i_size: subject
1054779750d2SKirill A. Shutemov 			 * to shrink under memory pressure.
1055779750d2SKirill A. Shutemov 			 */
1056779750d2SKirill A. Shutemov 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1057779750d2SKirill A. Shutemov 				spin_lock(&sbinfo->shrinklist_lock);
1058d041353dSCong Wang 				/*
1059d041353dSCong Wang 				 * _careful to defend against unlocked access to
1060d041353dSCong Wang 				 * ->shrink_list in shmem_unused_huge_shrink()
1061d041353dSCong Wang 				 */
1062d041353dSCong Wang 				if (list_empty_careful(&info->shrinklist)) {
1063779750d2SKirill A. Shutemov 					list_add_tail(&info->shrinklist,
1064779750d2SKirill A. Shutemov 							&sbinfo->shrinklist);
1065779750d2SKirill A. Shutemov 					sbinfo->shrinklist_len++;
1066779750d2SKirill A. Shutemov 				}
1067779750d2SKirill A. Shutemov 				spin_unlock(&sbinfo->shrinklist_lock);
1068779750d2SKirill A. Shutemov 			}
106994c1e62dSHugh Dickins 		}
10701da177e4SLinus Torvalds 	}
10711da177e4SLinus Torvalds 
10726a1a90adSChristoph Hellwig 	setattr_copy(inode, attr);
1073db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
1074feda821eSChristoph Hellwig 		error = posix_acl_chmod(inode, inode->i_mode);
10751da177e4SLinus Torvalds 	return error;
10761da177e4SLinus Torvalds }
10771da177e4SLinus Torvalds 
10781f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
10791da177e4SLinus Torvalds {
10801da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
1081779750d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
10821da177e4SLinus Torvalds 
10833889e6e7Snpiggin@suse.de 	if (inode->i_mapping->a_ops == &shmem_aops) {
10841da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
10851da177e4SLinus Torvalds 		inode->i_size = 0;
10863889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
1087779750d2SKirill A. Shutemov 		if (!list_empty(&info->shrinklist)) {
1088779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1089779750d2SKirill A. Shutemov 			if (!list_empty(&info->shrinklist)) {
1090779750d2SKirill A. Shutemov 				list_del_init(&info->shrinklist);
1091779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len--;
1092779750d2SKirill A. Shutemov 			}
1093779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1094779750d2SKirill A. Shutemov 		}
10951da177e4SLinus Torvalds 		if (!list_empty(&info->swaplist)) {
1096cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
10971da177e4SLinus Torvalds 			list_del_init(&info->swaplist);
1098cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
10991da177e4SLinus Torvalds 		}
11003ed47db3SAl Viro 	}
1101b09e0fa4SEric Paris 
110238f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
11030f3c42f5SHugh Dickins 	WARN_ON(inode->i_blocks);
11045b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
1105dbd5768fSJan Kara 	clear_inode(inode);
11061da177e4SLinus Torvalds }
11071da177e4SLinus Torvalds 
1108478922e2SMatthew Wilcox static unsigned long find_swap_entry(struct radix_tree_root *root, void *item)
1109478922e2SMatthew Wilcox {
1110478922e2SMatthew Wilcox 	struct radix_tree_iter iter;
11115b9c98f3SMike Kravetz 	void __rcu **slot;
1112478922e2SMatthew Wilcox 	unsigned long found = -1;
1113478922e2SMatthew Wilcox 	unsigned int checked = 0;
1114478922e2SMatthew Wilcox 
1115478922e2SMatthew Wilcox 	rcu_read_lock();
1116478922e2SMatthew Wilcox 	radix_tree_for_each_slot(slot, root, &iter, 0) {
11175b9c98f3SMike Kravetz 		void *entry = radix_tree_deref_slot(slot);
11185b9c98f3SMike Kravetz 
11195b9c98f3SMike Kravetz 		if (radix_tree_deref_retry(entry)) {
11205b9c98f3SMike Kravetz 			slot = radix_tree_iter_retry(&iter);
11215b9c98f3SMike Kravetz 			continue;
11225b9c98f3SMike Kravetz 		}
11235b9c98f3SMike Kravetz 		if (entry == item) {
1124478922e2SMatthew Wilcox 			found = iter.index;
1125478922e2SMatthew Wilcox 			break;
1126478922e2SMatthew Wilcox 		}
1127478922e2SMatthew Wilcox 		checked++;
1128478922e2SMatthew Wilcox 		if ((checked % 4096) != 0)
1129478922e2SMatthew Wilcox 			continue;
1130478922e2SMatthew Wilcox 		slot = radix_tree_iter_resume(slot, &iter);
1131478922e2SMatthew Wilcox 		cond_resched_rcu();
1132478922e2SMatthew Wilcox 	}
1133478922e2SMatthew Wilcox 
1134478922e2SMatthew Wilcox 	rcu_read_unlock();
1135478922e2SMatthew Wilcox 	return found;
1136478922e2SMatthew Wilcox }
1137478922e2SMatthew Wilcox 
113846f65ec1SHugh Dickins /*
113946f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
114046f65ec1SHugh Dickins  */
114141ffe5d5SHugh Dickins static int shmem_unuse_inode(struct shmem_inode_info *info,
1142bde05d1cSHugh Dickins 			     swp_entry_t swap, struct page **pagep)
11431da177e4SLinus Torvalds {
1144285b2c4fSHugh Dickins 	struct address_space *mapping = info->vfs_inode.i_mapping;
114546f65ec1SHugh Dickins 	void *radswap;
114641ffe5d5SHugh Dickins 	pgoff_t index;
1147bde05d1cSHugh Dickins 	gfp_t gfp;
1148bde05d1cSHugh Dickins 	int error = 0;
11491da177e4SLinus Torvalds 
115046f65ec1SHugh Dickins 	radswap = swp_to_radix_entry(swap);
1151b93b0163SMatthew Wilcox 	index = find_swap_entry(&mapping->i_pages, radswap);
115246f65ec1SHugh Dickins 	if (index == -1)
115300501b53SJohannes Weiner 		return -EAGAIN;	/* tell shmem_unuse we found nothing */
11542e0e26c7SHugh Dickins 
11551b1b32f2SHugh Dickins 	/*
11561b1b32f2SHugh Dickins 	 * Move _head_ to start search for next from here.
11571f895f75SAl Viro 	 * But be careful: shmem_evict_inode checks list_empty without taking
11581b1b32f2SHugh Dickins 	 * mutex, and there's an instant in list_move_tail when info->swaplist
1159285b2c4fSHugh Dickins 	 * would appear empty, if it were the only one on shmem_swaplist.
11601b1b32f2SHugh Dickins 	 */
11611b1b32f2SHugh Dickins 	if (shmem_swaplist.next != &info->swaplist)
11622e0e26c7SHugh Dickins 		list_move_tail(&shmem_swaplist, &info->swaplist);
11632e0e26c7SHugh Dickins 
1164bde05d1cSHugh Dickins 	gfp = mapping_gfp_mask(mapping);
1165bde05d1cSHugh Dickins 	if (shmem_should_replace_page(*pagep, gfp)) {
1166bde05d1cSHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1167bde05d1cSHugh Dickins 		error = shmem_replace_page(pagep, gfp, info, index);
1168bde05d1cSHugh Dickins 		mutex_lock(&shmem_swaplist_mutex);
1169bde05d1cSHugh Dickins 		/*
1170bde05d1cSHugh Dickins 		 * We needed to drop mutex to make that restrictive page
11710142ef6cSHugh Dickins 		 * allocation, but the inode might have been freed while we
11720142ef6cSHugh Dickins 		 * dropped it: although a racing shmem_evict_inode() cannot
11730142ef6cSHugh Dickins 		 * complete without emptying the radix_tree, our page lock
11740142ef6cSHugh Dickins 		 * on this swapcache page is not enough to prevent that -
11750142ef6cSHugh Dickins 		 * free_swap_and_cache() of our swap entry will only
11760142ef6cSHugh Dickins 		 * trylock_page(), removing swap from radix_tree whatever.
11770142ef6cSHugh Dickins 		 *
11780142ef6cSHugh Dickins 		 * We must not proceed to shmem_add_to_page_cache() if the
11790142ef6cSHugh Dickins 		 * inode has been freed, but of course we cannot rely on
11800142ef6cSHugh Dickins 		 * inode or mapping or info to check that.  However, we can
11810142ef6cSHugh Dickins 		 * safely check if our swap entry is still in use (and here
11820142ef6cSHugh Dickins 		 * it can't have got reused for another page): if it's still
11830142ef6cSHugh Dickins 		 * in use, then the inode cannot have been freed yet, and we
11840142ef6cSHugh Dickins 		 * can safely proceed (if it's no longer in use, that tells
11850142ef6cSHugh Dickins 		 * nothing about the inode, but we don't need to unuse swap).
1186bde05d1cSHugh Dickins 		 */
1187bde05d1cSHugh Dickins 		if (!page_swapcount(*pagep))
1188bde05d1cSHugh Dickins 			error = -ENOENT;
1189bde05d1cSHugh Dickins 	}
1190bde05d1cSHugh Dickins 
1191d13d1443SKAMEZAWA Hiroyuki 	/*
1192778dd893SHugh Dickins 	 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
1193778dd893SHugh Dickins 	 * but also to hold up shmem_evict_inode(): so inode cannot be freed
1194778dd893SHugh Dickins 	 * beneath us (pagelock doesn't help until the page is in pagecache).
1195d13d1443SKAMEZAWA Hiroyuki 	 */
1196bde05d1cSHugh Dickins 	if (!error)
1197bde05d1cSHugh Dickins 		error = shmem_add_to_page_cache(*pagep, mapping, index,
1198fed400a1SWang Sheng-Hui 						radswap);
119948f170fbSHugh Dickins 	if (error != -ENOMEM) {
120046f65ec1SHugh Dickins 		/*
120146f65ec1SHugh Dickins 		 * Truncation and eviction use free_swap_and_cache(), which
120246f65ec1SHugh Dickins 		 * only does trylock page: if we raced, best clean up here.
120346f65ec1SHugh Dickins 		 */
1204bde05d1cSHugh Dickins 		delete_from_swap_cache(*pagep);
1205bde05d1cSHugh Dickins 		set_page_dirty(*pagep);
120646f65ec1SHugh Dickins 		if (!error) {
12074595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1208285b2c4fSHugh Dickins 			info->swapped--;
12094595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
121041ffe5d5SHugh Dickins 			swap_free(swap);
121146f65ec1SHugh Dickins 		}
12121da177e4SLinus Torvalds 	}
12132e0e26c7SHugh Dickins 	return error;
12141da177e4SLinus Torvalds }
12151da177e4SLinus Torvalds 
12161da177e4SLinus Torvalds /*
121746f65ec1SHugh Dickins  * Search through swapped inodes to find and replace swap by page.
12181da177e4SLinus Torvalds  */
121941ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page)
12201da177e4SLinus Torvalds {
122141ffe5d5SHugh Dickins 	struct list_head *this, *next;
12221da177e4SLinus Torvalds 	struct shmem_inode_info *info;
122300501b53SJohannes Weiner 	struct mem_cgroup *memcg;
1224bde05d1cSHugh Dickins 	int error = 0;
1225bde05d1cSHugh Dickins 
1226bde05d1cSHugh Dickins 	/*
1227bde05d1cSHugh Dickins 	 * There's a faint possibility that swap page was replaced before
12280142ef6cSHugh Dickins 	 * caller locked it: caller will come back later with the right page.
1229bde05d1cSHugh Dickins 	 */
12300142ef6cSHugh Dickins 	if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
1231bde05d1cSHugh Dickins 		goto out;
1232778dd893SHugh Dickins 
1233778dd893SHugh Dickins 	/*
1234778dd893SHugh Dickins 	 * Charge page using GFP_KERNEL while we can wait, before taking
1235778dd893SHugh Dickins 	 * the shmem_swaplist_mutex which might hold up shmem_writepage().
1236778dd893SHugh Dickins 	 * Charged back to the user (not to caller) when swap account is used.
1237778dd893SHugh Dickins 	 */
12382cf85583STejun Heo 	error = mem_cgroup_try_charge_delay(page, current->mm, GFP_KERNEL,
12392cf85583STejun Heo 					    &memcg, false);
1240778dd893SHugh Dickins 	if (error)
1241778dd893SHugh Dickins 		goto out;
124246f65ec1SHugh Dickins 	/* No radix_tree_preload: swap entry keeps a place for page in tree */
124300501b53SJohannes Weiner 	error = -EAGAIN;
12441da177e4SLinus Torvalds 
1245cb5f7b9aSHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
124641ffe5d5SHugh Dickins 	list_for_each_safe(this, next, &shmem_swaplist) {
124741ffe5d5SHugh Dickins 		info = list_entry(this, struct shmem_inode_info, swaplist);
1248285b2c4fSHugh Dickins 		if (info->swapped)
124900501b53SJohannes Weiner 			error = shmem_unuse_inode(info, swap, &page);
12506922c0c7SHugh Dickins 		else
12516922c0c7SHugh Dickins 			list_del_init(&info->swaplist);
1252cb5f7b9aSHugh Dickins 		cond_resched();
125300501b53SJohannes Weiner 		if (error != -EAGAIN)
1254778dd893SHugh Dickins 			break;
125500501b53SJohannes Weiner 		/* found nothing in this: move on to search the next */
12561da177e4SLinus Torvalds 	}
1257cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
1258778dd893SHugh Dickins 
125900501b53SJohannes Weiner 	if (error) {
126000501b53SJohannes Weiner 		if (error != -ENOMEM)
126100501b53SJohannes Weiner 			error = 0;
1262f627c2f5SKirill A. Shutemov 		mem_cgroup_cancel_charge(page, memcg, false);
126300501b53SJohannes Weiner 	} else
1264f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, true, false);
1265778dd893SHugh Dickins out:
1266aaa46865SHugh Dickins 	unlock_page(page);
126709cbfeafSKirill A. Shutemov 	put_page(page);
1268778dd893SHugh Dickins 	return error;
12691da177e4SLinus Torvalds }
12701da177e4SLinus Torvalds 
12711da177e4SLinus Torvalds /*
12721da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
12731da177e4SLinus Torvalds  */
12741da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
12751da177e4SLinus Torvalds {
12761da177e4SLinus Torvalds 	struct shmem_inode_info *info;
12771da177e4SLinus Torvalds 	struct address_space *mapping;
12781da177e4SLinus Torvalds 	struct inode *inode;
12796922c0c7SHugh Dickins 	swp_entry_t swap;
12806922c0c7SHugh Dickins 	pgoff_t index;
12811da177e4SLinus Torvalds 
1282800d8c63SKirill A. Shutemov 	VM_BUG_ON_PAGE(PageCompound(page), page);
12831da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
12841da177e4SLinus Torvalds 	mapping = page->mapping;
12851da177e4SLinus Torvalds 	index = page->index;
12861da177e4SLinus Torvalds 	inode = mapping->host;
12871da177e4SLinus Torvalds 	info = SHMEM_I(inode);
12881da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
12891da177e4SLinus Torvalds 		goto redirty;
1290d9fe526aSHugh Dickins 	if (!total_swap_pages)
12911da177e4SLinus Torvalds 		goto redirty;
12921da177e4SLinus Torvalds 
1293d9fe526aSHugh Dickins 	/*
129497b713baSChristoph Hellwig 	 * Our capabilities prevent regular writeback or sync from ever calling
129597b713baSChristoph Hellwig 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
129697b713baSChristoph Hellwig 	 * its underlying filesystem, in which case tmpfs should write out to
129797b713baSChristoph Hellwig 	 * swap only in response to memory pressure, and not for the writeback
129897b713baSChristoph Hellwig 	 * threads or sync.
1299d9fe526aSHugh Dickins 	 */
130048f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
130148f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
130248f170fbSHugh Dickins 		goto redirty;
130348f170fbSHugh Dickins 	}
13041635f6a7SHugh Dickins 
13051635f6a7SHugh Dickins 	/*
13061635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
13071635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
13081635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
13091aac1400SHugh Dickins 	 *
13101aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
13111aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
13121aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
13131aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
13141aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
13151635f6a7SHugh Dickins 	 */
13161635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
13171aac1400SHugh Dickins 		if (inode->i_private) {
13181aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
13191aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
13201aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
13211aac1400SHugh Dickins 			if (shmem_falloc &&
13228e205f77SHugh Dickins 			    !shmem_falloc->waitq &&
13231aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
13241aac1400SHugh Dickins 			    index < shmem_falloc->next)
13251aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
13261aac1400SHugh Dickins 			else
13271aac1400SHugh Dickins 				shmem_falloc = NULL;
13281aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
13291aac1400SHugh Dickins 			if (shmem_falloc)
13301aac1400SHugh Dickins 				goto redirty;
13311aac1400SHugh Dickins 		}
13321635f6a7SHugh Dickins 		clear_highpage(page);
13331635f6a7SHugh Dickins 		flush_dcache_page(page);
13341635f6a7SHugh Dickins 		SetPageUptodate(page);
13351635f6a7SHugh Dickins 	}
13361635f6a7SHugh Dickins 
133738d8b4e6SHuang Ying 	swap = get_swap_page(page);
133848f170fbSHugh Dickins 	if (!swap.val)
133948f170fbSHugh Dickins 		goto redirty;
1340d9fe526aSHugh Dickins 
1341b1dea800SHugh Dickins 	/*
1342b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
13436922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
13446922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
1345b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
13466922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
13476922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
1348b1dea800SHugh Dickins 	 */
1349b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
135005bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
135105bf86b4SHugh Dickins 		list_add_tail(&info->swaplist, &shmem_swaplist);
1352b1dea800SHugh Dickins 
135348f170fbSHugh Dickins 	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
13544595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1355267a4c76SHugh Dickins 		shmem_recalc_inode(inode);
1356267a4c76SHugh Dickins 		info->swapped++;
13574595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
1358267a4c76SHugh Dickins 
1359aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
13606922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
13616922c0c7SHugh Dickins 
13626922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
1363d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
13649fab5619SHugh Dickins 		swap_writepage(page, wbc);
13651da177e4SLinus Torvalds 		return 0;
13661da177e4SLinus Torvalds 	}
13671da177e4SLinus Torvalds 
13686922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
136975f6d6d2SMinchan Kim 	put_swap_page(page, swap);
13701da177e4SLinus Torvalds redirty:
13711da177e4SLinus Torvalds 	set_page_dirty(page);
1372d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
1373d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1374d9fe526aSHugh Dickins 	unlock_page(page);
1375d9fe526aSHugh Dickins 	return 0;
13761da177e4SLinus Torvalds }
13771da177e4SLinus Torvalds 
137875edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
137971fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1380680d794bSakpm@linux-foundation.org {
1381680d794bSakpm@linux-foundation.org 	char buffer[64];
1382680d794bSakpm@linux-foundation.org 
138371fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1384095f1fc4SLee Schermerhorn 		return;		/* show nothing */
1385095f1fc4SLee Schermerhorn 
1386a7a88b23SHugh Dickins 	mpol_to_str(buffer, sizeof(buffer), mpol);
1387095f1fc4SLee Schermerhorn 
1388095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
1389680d794bSakpm@linux-foundation.org }
139071fe804bSLee Schermerhorn 
139171fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
139271fe804bSLee Schermerhorn {
139371fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
139471fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
139571fe804bSLee Schermerhorn 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
139671fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
139771fe804bSLee Schermerhorn 		mpol_get(mpol);
139871fe804bSLee Schermerhorn 		spin_unlock(&sbinfo->stat_lock);
139971fe804bSLee Schermerhorn 	}
140071fe804bSLee Schermerhorn 	return mpol;
140171fe804bSLee Schermerhorn }
140275edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
140375edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
140475edd345SHugh Dickins {
140575edd345SHugh Dickins }
140675edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
140775edd345SHugh Dickins {
140875edd345SHugh Dickins 	return NULL;
140975edd345SHugh Dickins }
141075edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */
141175edd345SHugh Dickins #ifndef CONFIG_NUMA
141275edd345SHugh Dickins #define vm_policy vm_private_data
141375edd345SHugh Dickins #endif
1414680d794bSakpm@linux-foundation.org 
1415800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1416800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1417800d8c63SKirill A. Shutemov {
1418800d8c63SKirill A. Shutemov 	/* Create a pseudo vma that just contains the policy */
14192c4541e2SKirill A. Shutemov 	vma_init(vma, NULL);
1420800d8c63SKirill A. Shutemov 	/* Bias interleave by inode number to distribute better across nodes */
1421800d8c63SKirill A. Shutemov 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1422800d8c63SKirill A. Shutemov 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1423800d8c63SKirill A. Shutemov }
1424800d8c63SKirill A. Shutemov 
1425800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1426800d8c63SKirill A. Shutemov {
1427800d8c63SKirill A. Shutemov 	/* Drop reference taken by mpol_shared_policy_lookup() */
1428800d8c63SKirill A. Shutemov 	mpol_cond_put(vma->vm_policy);
1429800d8c63SKirill A. Shutemov }
1430800d8c63SKirill A. Shutemov 
143141ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
143241ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
14331da177e4SLinus Torvalds {
14341da177e4SLinus Torvalds 	struct vm_area_struct pvma;
143518a2f371SMel Gorman 	struct page *page;
1436e9e9b7ecSMinchan Kim 	struct vm_fault vmf;
14371da177e4SLinus Torvalds 
1438800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1439e9e9b7ecSMinchan Kim 	vmf.vma = &pvma;
1440e9e9b7ecSMinchan Kim 	vmf.address = 0;
1441e9e9b7ecSMinchan Kim 	page = swap_cluster_readahead(swap, gfp, &vmf);
1442800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
144318a2f371SMel Gorman 
1444800d8c63SKirill A. Shutemov 	return page;
1445800d8c63SKirill A. Shutemov }
144618a2f371SMel Gorman 
1447800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp,
1448800d8c63SKirill A. Shutemov 		struct shmem_inode_info *info, pgoff_t index)
1449800d8c63SKirill A. Shutemov {
1450800d8c63SKirill A. Shutemov 	struct vm_area_struct pvma;
1451800d8c63SKirill A. Shutemov 	struct inode *inode = &info->vfs_inode;
1452800d8c63SKirill A. Shutemov 	struct address_space *mapping = inode->i_mapping;
14534620a06eSGeert Uytterhoeven 	pgoff_t idx, hindex;
1454800d8c63SKirill A. Shutemov 	void __rcu **results;
1455800d8c63SKirill A. Shutemov 	struct page *page;
1456800d8c63SKirill A. Shutemov 
1457e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1458800d8c63SKirill A. Shutemov 		return NULL;
1459800d8c63SKirill A. Shutemov 
14604620a06eSGeert Uytterhoeven 	hindex = round_down(index, HPAGE_PMD_NR);
1461800d8c63SKirill A. Shutemov 	rcu_read_lock();
1462b93b0163SMatthew Wilcox 	if (radix_tree_gang_lookup_slot(&mapping->i_pages, &results, &idx,
1463800d8c63SKirill A. Shutemov 				hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
1464800d8c63SKirill A. Shutemov 		rcu_read_unlock();
1465800d8c63SKirill A. Shutemov 		return NULL;
1466800d8c63SKirill A. Shutemov 	}
1467800d8c63SKirill A. Shutemov 	rcu_read_unlock();
1468800d8c63SKirill A. Shutemov 
1469800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, hindex);
1470800d8c63SKirill A. Shutemov 	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1471800d8c63SKirill A. Shutemov 			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1472800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
1473800d8c63SKirill A. Shutemov 	if (page)
1474800d8c63SKirill A. Shutemov 		prep_transhuge_page(page);
147518a2f371SMel Gorman 	return page;
147618a2f371SMel Gorman }
147718a2f371SMel Gorman 
147818a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp,
147918a2f371SMel Gorman 			struct shmem_inode_info *info, pgoff_t index)
148018a2f371SMel Gorman {
148118a2f371SMel Gorman 	struct vm_area_struct pvma;
148218a2f371SMel Gorman 	struct page *page;
148318a2f371SMel Gorman 
1484800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_init(&pvma, info, index);
1485800d8c63SKirill A. Shutemov 	page = alloc_page_vma(gfp, &pvma, 0);
1486800d8c63SKirill A. Shutemov 	shmem_pseudo_vma_destroy(&pvma);
148718a2f371SMel Gorman 
1488800d8c63SKirill A. Shutemov 	return page;
1489800d8c63SKirill A. Shutemov }
1490800d8c63SKirill A. Shutemov 
1491800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
14920f079694SMike Rapoport 		struct inode *inode,
1493800d8c63SKirill A. Shutemov 		pgoff_t index, bool huge)
1494800d8c63SKirill A. Shutemov {
14950f079694SMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
1496800d8c63SKirill A. Shutemov 	struct page *page;
1497800d8c63SKirill A. Shutemov 	int nr;
1498800d8c63SKirill A. Shutemov 	int err = -ENOSPC;
1499800d8c63SKirill A. Shutemov 
1500e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1501800d8c63SKirill A. Shutemov 		huge = false;
1502800d8c63SKirill A. Shutemov 	nr = huge ? HPAGE_PMD_NR : 1;
1503800d8c63SKirill A. Shutemov 
15040f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, nr))
1505800d8c63SKirill A. Shutemov 		goto failed;
1506800d8c63SKirill A. Shutemov 
1507800d8c63SKirill A. Shutemov 	if (huge)
1508800d8c63SKirill A. Shutemov 		page = shmem_alloc_hugepage(gfp, info, index);
1509800d8c63SKirill A. Shutemov 	else
1510800d8c63SKirill A. Shutemov 		page = shmem_alloc_page(gfp, info, index);
151175edd345SHugh Dickins 	if (page) {
151275edd345SHugh Dickins 		__SetPageLocked(page);
151375edd345SHugh Dickins 		__SetPageSwapBacked(page);
1514800d8c63SKirill A. Shutemov 		return page;
151575edd345SHugh Dickins 	}
151618a2f371SMel Gorman 
1517800d8c63SKirill A. Shutemov 	err = -ENOMEM;
15180f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, nr);
1519800d8c63SKirill A. Shutemov failed:
1520800d8c63SKirill A. Shutemov 	return ERR_PTR(err);
15211da177e4SLinus Torvalds }
152271fe804bSLee Schermerhorn 
15231da177e4SLinus Torvalds /*
1524bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
1525bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1526bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1527bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
1528bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1529bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
1530bde05d1cSHugh Dickins  *
1531bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
1532bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1533bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
1534bde05d1cSHugh Dickins  */
1535bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1536bde05d1cSHugh Dickins {
1537bde05d1cSHugh Dickins 	return page_zonenum(page) > gfp_zone(gfp);
1538bde05d1cSHugh Dickins }
1539bde05d1cSHugh Dickins 
1540bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1541bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
1542bde05d1cSHugh Dickins {
1543bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
1544bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
1545bde05d1cSHugh Dickins 	pgoff_t swap_index;
1546bde05d1cSHugh Dickins 	int error;
1547bde05d1cSHugh Dickins 
1548bde05d1cSHugh Dickins 	oldpage = *pagep;
1549bde05d1cSHugh Dickins 	swap_index = page_private(oldpage);
1550bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1551bde05d1cSHugh Dickins 
1552bde05d1cSHugh Dickins 	/*
1553bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1554bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1555bde05d1cSHugh Dickins 	 */
1556bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1557bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1558bde05d1cSHugh Dickins 	if (!newpage)
1559bde05d1cSHugh Dickins 		return -ENOMEM;
1560bde05d1cSHugh Dickins 
156109cbfeafSKirill A. Shutemov 	get_page(newpage);
1562bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
15630142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1564bde05d1cSHugh Dickins 
15659956edf3SHugh Dickins 	__SetPageLocked(newpage);
15669956edf3SHugh Dickins 	__SetPageSwapBacked(newpage);
1567bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1568bde05d1cSHugh Dickins 	set_page_private(newpage, swap_index);
1569bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1570bde05d1cSHugh Dickins 
1571bde05d1cSHugh Dickins 	/*
1572bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1573bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1574bde05d1cSHugh Dickins 	 */
1575b93b0163SMatthew Wilcox 	xa_lock_irq(&swap_mapping->i_pages);
1576*62f945b6SMatthew Wilcox 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
15770142ef6cSHugh Dickins 	if (!error) {
157811fb9989SMel Gorman 		__inc_node_page_state(newpage, NR_FILE_PAGES);
157911fb9989SMel Gorman 		__dec_node_page_state(oldpage, NR_FILE_PAGES);
15800142ef6cSHugh Dickins 	}
1581b93b0163SMatthew Wilcox 	xa_unlock_irq(&swap_mapping->i_pages);
1582bde05d1cSHugh Dickins 
15830142ef6cSHugh Dickins 	if (unlikely(error)) {
15840142ef6cSHugh Dickins 		/*
15850142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
15860142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
15870142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
15880142ef6cSHugh Dickins 		 */
15890142ef6cSHugh Dickins 		oldpage = newpage;
15900142ef6cSHugh Dickins 	} else {
15916a93ca8fSJohannes Weiner 		mem_cgroup_migrate(oldpage, newpage);
1592bde05d1cSHugh Dickins 		lru_cache_add_anon(newpage);
15930142ef6cSHugh Dickins 		*pagep = newpage;
15940142ef6cSHugh Dickins 	}
1595bde05d1cSHugh Dickins 
1596bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1597bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1598bde05d1cSHugh Dickins 
1599bde05d1cSHugh Dickins 	unlock_page(oldpage);
160009cbfeafSKirill A. Shutemov 	put_page(oldpage);
160109cbfeafSKirill A. Shutemov 	put_page(oldpage);
16020142ef6cSHugh Dickins 	return error;
1603bde05d1cSHugh Dickins }
1604bde05d1cSHugh Dickins 
1605bde05d1cSHugh Dickins /*
160668da9f05SHugh Dickins  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
16071da177e4SLinus Torvalds  *
16081da177e4SLinus Torvalds  * If we allocate a new one we do not mark it dirty. That's up to the
16091da177e4SLinus Torvalds  * vm. If we swap it in we mark it dirty since we also free the swap
16109e18eb29SAndres Lagar-Cavilla  * entry since a page cannot live in both the swap and page cache.
16119e18eb29SAndres Lagar-Cavilla  *
16129e18eb29SAndres Lagar-Cavilla  * fault_mm and fault_type are only supplied by shmem_fault:
16139e18eb29SAndres Lagar-Cavilla  * otherwise they are NULL.
16141da177e4SLinus Torvalds  */
161541ffe5d5SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
16169e18eb29SAndres Lagar-Cavilla 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
16172b740303SSouptick Joarder 	struct vm_area_struct *vma, struct vm_fault *vmf,
16182b740303SSouptick Joarder 			vm_fault_t *fault_type)
16191da177e4SLinus Torvalds {
16201da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
162123f919d4SArnd Bergmann 	struct shmem_inode_info *info = SHMEM_I(inode);
16221da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo;
16239e18eb29SAndres Lagar-Cavilla 	struct mm_struct *charge_mm;
162400501b53SJohannes Weiner 	struct mem_cgroup *memcg;
162527ab7006SHugh Dickins 	struct page *page;
16261da177e4SLinus Torvalds 	swp_entry_t swap;
1627657e3038SKirill A. Shutemov 	enum sgp_type sgp_huge = sgp;
1628800d8c63SKirill A. Shutemov 	pgoff_t hindex = index;
16291da177e4SLinus Torvalds 	int error;
163054af6042SHugh Dickins 	int once = 0;
16311635f6a7SHugh Dickins 	int alloced = 0;
16321da177e4SLinus Torvalds 
163309cbfeafSKirill A. Shutemov 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
16341da177e4SLinus Torvalds 		return -EFBIG;
1635657e3038SKirill A. Shutemov 	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1636657e3038SKirill A. Shutemov 		sgp = SGP_CACHE;
16371da177e4SLinus Torvalds repeat:
163854af6042SHugh Dickins 	swap.val = 0;
16390cd6144aSJohannes Weiner 	page = find_lock_entry(mapping, index);
16403159f943SMatthew Wilcox 	if (xa_is_value(page)) {
164154af6042SHugh Dickins 		swap = radix_to_swp_entry(page);
164254af6042SHugh Dickins 		page = NULL;
164354af6042SHugh Dickins 	}
164454af6042SHugh Dickins 
164575edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
164609cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
164754af6042SHugh Dickins 		error = -EINVAL;
1648267a4c76SHugh Dickins 		goto unlock;
164954af6042SHugh Dickins 	}
165054af6042SHugh Dickins 
165166d2f4d2SHugh Dickins 	if (page && sgp == SGP_WRITE)
165266d2f4d2SHugh Dickins 		mark_page_accessed(page);
165366d2f4d2SHugh Dickins 
16541635f6a7SHugh Dickins 	/* fallocated page? */
16551635f6a7SHugh Dickins 	if (page && !PageUptodate(page)) {
16561635f6a7SHugh Dickins 		if (sgp != SGP_READ)
16571635f6a7SHugh Dickins 			goto clear;
16581635f6a7SHugh Dickins 		unlock_page(page);
165909cbfeafSKirill A. Shutemov 		put_page(page);
16601635f6a7SHugh Dickins 		page = NULL;
16611635f6a7SHugh Dickins 	}
166254af6042SHugh Dickins 	if (page || (sgp == SGP_READ && !swap.val)) {
166354af6042SHugh Dickins 		*pagep = page;
166454af6042SHugh Dickins 		return 0;
166527ab7006SHugh Dickins 	}
166627ab7006SHugh Dickins 
1667b409f9fcSHugh Dickins 	/*
166854af6042SHugh Dickins 	 * Fast cache lookup did not find it:
166954af6042SHugh Dickins 	 * bring it back from swap or allocate.
1670b409f9fcSHugh Dickins 	 */
167154af6042SHugh Dickins 	sbinfo = SHMEM_SB(inode->i_sb);
1672cfda0526SMike Rapoport 	charge_mm = vma ? vma->vm_mm : current->mm;
167327ab7006SHugh Dickins 
16741da177e4SLinus Torvalds 	if (swap.val) {
16751da177e4SLinus Torvalds 		/* Look it up and read it in.. */
1676ec560175SHuang Ying 		page = lookup_swap_cache(swap, NULL, 0);
167727ab7006SHugh Dickins 		if (!page) {
16789e18eb29SAndres Lagar-Cavilla 			/* Or update major stats only when swapin succeeds?? */
16799e18eb29SAndres Lagar-Cavilla 			if (fault_type) {
168068da9f05SHugh Dickins 				*fault_type |= VM_FAULT_MAJOR;
16819e18eb29SAndres Lagar-Cavilla 				count_vm_event(PGMAJFAULT);
16822262185cSRoman Gushchin 				count_memcg_event_mm(charge_mm, PGMAJFAULT);
16839e18eb29SAndres Lagar-Cavilla 			}
16849e18eb29SAndres Lagar-Cavilla 			/* Here we actually start the io */
168541ffe5d5SHugh Dickins 			page = shmem_swapin(swap, gfp, info, index);
168627ab7006SHugh Dickins 			if (!page) {
16871da177e4SLinus Torvalds 				error = -ENOMEM;
168854af6042SHugh Dickins 				goto failed;
1689285b2c4fSHugh Dickins 			}
16901da177e4SLinus Torvalds 		}
16911da177e4SLinus Torvalds 
16921da177e4SLinus Torvalds 		/* We have to do this with page locked to prevent races */
169354af6042SHugh Dickins 		lock_page(page);
16940142ef6cSHugh Dickins 		if (!PageSwapCache(page) || page_private(page) != swap.val ||
1695d1899228SHugh Dickins 		    !shmem_confirm_swap(mapping, index, swap)) {
1696bde05d1cSHugh Dickins 			error = -EEXIST;	/* try again */
1697d1899228SHugh Dickins 			goto unlock;
1698bde05d1cSHugh Dickins 		}
169927ab7006SHugh Dickins 		if (!PageUptodate(page)) {
17001da177e4SLinus Torvalds 			error = -EIO;
170154af6042SHugh Dickins 			goto failed;
170254af6042SHugh Dickins 		}
170354af6042SHugh Dickins 		wait_on_page_writeback(page);
170454af6042SHugh Dickins 
1705bde05d1cSHugh Dickins 		if (shmem_should_replace_page(page, gfp)) {
1706bde05d1cSHugh Dickins 			error = shmem_replace_page(&page, gfp, info, index);
1707bde05d1cSHugh Dickins 			if (error)
170854af6042SHugh Dickins 				goto failed;
17091da177e4SLinus Torvalds 		}
17101da177e4SLinus Torvalds 
17112cf85583STejun Heo 		error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1712f627c2f5SKirill A. Shutemov 				false);
1713d1899228SHugh Dickins 		if (!error) {
171454af6042SHugh Dickins 			error = shmem_add_to_page_cache(page, mapping, index,
1715fed400a1SWang Sheng-Hui 						swp_to_radix_entry(swap));
1716215c02bcSHugh Dickins 			/*
1717215c02bcSHugh Dickins 			 * We already confirmed swap under page lock, and make
1718215c02bcSHugh Dickins 			 * no memory allocation here, so usually no possibility
1719215c02bcSHugh Dickins 			 * of error; but free_swap_and_cache() only trylocks a
1720215c02bcSHugh Dickins 			 * page, so it is just possible that the entry has been
1721215c02bcSHugh Dickins 			 * truncated or holepunched since swap was confirmed.
1722215c02bcSHugh Dickins 			 * shmem_undo_range() will have done some of the
1723215c02bcSHugh Dickins 			 * unaccounting, now delete_from_swap_cache() will do
172493aa7d95SVladimir Davydov 			 * the rest.
1725215c02bcSHugh Dickins 			 * Reset swap.val? No, leave it so "failed" goes back to
1726215c02bcSHugh Dickins 			 * "repeat": reading a hole and writing should succeed.
1727215c02bcSHugh Dickins 			 */
172800501b53SJohannes Weiner 			if (error) {
1729f627c2f5SKirill A. Shutemov 				mem_cgroup_cancel_charge(page, memcg, false);
1730215c02bcSHugh Dickins 				delete_from_swap_cache(page);
1731d1899228SHugh Dickins 			}
173200501b53SJohannes Weiner 		}
173354af6042SHugh Dickins 		if (error)
173454af6042SHugh Dickins 			goto failed;
173554af6042SHugh Dickins 
1736f627c2f5SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, true, false);
173700501b53SJohannes Weiner 
17384595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
173954af6042SHugh Dickins 		info->swapped--;
174054af6042SHugh Dickins 		shmem_recalc_inode(inode);
17414595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
174227ab7006SHugh Dickins 
174366d2f4d2SHugh Dickins 		if (sgp == SGP_WRITE)
174466d2f4d2SHugh Dickins 			mark_page_accessed(page);
174566d2f4d2SHugh Dickins 
174627ab7006SHugh Dickins 		delete_from_swap_cache(page);
174727ab7006SHugh Dickins 		set_page_dirty(page);
174827ab7006SHugh Dickins 		swap_free(swap);
174927ab7006SHugh Dickins 
175054af6042SHugh Dickins 	} else {
1751cfda0526SMike Rapoport 		if (vma && userfaultfd_missing(vma)) {
1752cfda0526SMike Rapoport 			*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1753cfda0526SMike Rapoport 			return 0;
1754cfda0526SMike Rapoport 		}
1755cfda0526SMike Rapoport 
1756800d8c63SKirill A. Shutemov 		/* shmem_symlink() */
1757800d8c63SKirill A. Shutemov 		if (mapping->a_ops != &shmem_aops)
1758800d8c63SKirill A. Shutemov 			goto alloc_nohuge;
1759657e3038SKirill A. Shutemov 		if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1760800d8c63SKirill A. Shutemov 			goto alloc_nohuge;
1761800d8c63SKirill A. Shutemov 		if (shmem_huge == SHMEM_HUGE_FORCE)
1762800d8c63SKirill A. Shutemov 			goto alloc_huge;
1763800d8c63SKirill A. Shutemov 		switch (sbinfo->huge) {
1764800d8c63SKirill A. Shutemov 			loff_t i_size;
1765800d8c63SKirill A. Shutemov 			pgoff_t off;
1766800d8c63SKirill A. Shutemov 		case SHMEM_HUGE_NEVER:
1767800d8c63SKirill A. Shutemov 			goto alloc_nohuge;
1768800d8c63SKirill A. Shutemov 		case SHMEM_HUGE_WITHIN_SIZE:
1769800d8c63SKirill A. Shutemov 			off = round_up(index, HPAGE_PMD_NR);
1770800d8c63SKirill A. Shutemov 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
1771800d8c63SKirill A. Shutemov 			if (i_size >= HPAGE_PMD_SIZE &&
1772800d8c63SKirill A. Shutemov 					i_size >> PAGE_SHIFT >= off)
1773800d8c63SKirill A. Shutemov 				goto alloc_huge;
1774800d8c63SKirill A. Shutemov 			/* fallthrough */
1775800d8c63SKirill A. Shutemov 		case SHMEM_HUGE_ADVISE:
1776657e3038SKirill A. Shutemov 			if (sgp_huge == SGP_HUGE)
1777657e3038SKirill A. Shutemov 				goto alloc_huge;
1778657e3038SKirill A. Shutemov 			/* TODO: implement fadvise() hints */
1779800d8c63SKirill A. Shutemov 			goto alloc_nohuge;
178059a16eadSHugh Dickins 		}
17811da177e4SLinus Torvalds 
1782800d8c63SKirill A. Shutemov alloc_huge:
17830f079694SMike Rapoport 		page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1784800d8c63SKirill A. Shutemov 		if (IS_ERR(page)) {
17850f079694SMike Rapoport alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, inode,
1786800d8c63SKirill A. Shutemov 					index, false);
178754af6042SHugh Dickins 		}
1788800d8c63SKirill A. Shutemov 		if (IS_ERR(page)) {
1789779750d2SKirill A. Shutemov 			int retry = 5;
1790800d8c63SKirill A. Shutemov 			error = PTR_ERR(page);
1791800d8c63SKirill A. Shutemov 			page = NULL;
1792779750d2SKirill A. Shutemov 			if (error != -ENOSPC)
1793779750d2SKirill A. Shutemov 				goto failed;
1794779750d2SKirill A. Shutemov 			/*
1795779750d2SKirill A. Shutemov 			 * Try to reclaim some spece by splitting a huge page
1796779750d2SKirill A. Shutemov 			 * beyond i_size on the filesystem.
1797779750d2SKirill A. Shutemov 			 */
1798779750d2SKirill A. Shutemov 			while (retry--) {
1799779750d2SKirill A. Shutemov 				int ret;
1800779750d2SKirill A. Shutemov 				ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1801779750d2SKirill A. Shutemov 				if (ret == SHRINK_STOP)
1802779750d2SKirill A. Shutemov 					break;
1803779750d2SKirill A. Shutemov 				if (ret)
1804779750d2SKirill A. Shutemov 					goto alloc_nohuge;
1805779750d2SKirill A. Shutemov 			}
1806800d8c63SKirill A. Shutemov 			goto failed;
1807800d8c63SKirill A. Shutemov 		}
1808800d8c63SKirill A. Shutemov 
1809800d8c63SKirill A. Shutemov 		if (PageTransHuge(page))
1810800d8c63SKirill A. Shutemov 			hindex = round_down(index, HPAGE_PMD_NR);
1811800d8c63SKirill A. Shutemov 		else
1812800d8c63SKirill A. Shutemov 			hindex = index;
1813800d8c63SKirill A. Shutemov 
181466d2f4d2SHugh Dickins 		if (sgp == SGP_WRITE)
1815eb39d618SHugh Dickins 			__SetPageReferenced(page);
181666d2f4d2SHugh Dickins 
18172cf85583STejun Heo 		error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1818800d8c63SKirill A. Shutemov 				PageTransHuge(page));
181954af6042SHugh Dickins 		if (error)
1820800d8c63SKirill A. Shutemov 			goto unacct;
1821800d8c63SKirill A. Shutemov 		error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK,
1822800d8c63SKirill A. Shutemov 				compound_order(page));
1823b065b432SHugh Dickins 		if (!error) {
1824800d8c63SKirill A. Shutemov 			error = shmem_add_to_page_cache(page, mapping, hindex,
1825fed400a1SWang Sheng-Hui 							NULL);
1826b065b432SHugh Dickins 			radix_tree_preload_end();
1827b065b432SHugh Dickins 		}
1828b065b432SHugh Dickins 		if (error) {
1829800d8c63SKirill A. Shutemov 			mem_cgroup_cancel_charge(page, memcg,
1830800d8c63SKirill A. Shutemov 					PageTransHuge(page));
1831800d8c63SKirill A. Shutemov 			goto unacct;
1832b065b432SHugh Dickins 		}
1833800d8c63SKirill A. Shutemov 		mem_cgroup_commit_charge(page, memcg, false,
1834800d8c63SKirill A. Shutemov 				PageTransHuge(page));
183554af6042SHugh Dickins 		lru_cache_add_anon(page);
183654af6042SHugh Dickins 
18374595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
1838800d8c63SKirill A. Shutemov 		info->alloced += 1 << compound_order(page);
1839800d8c63SKirill A. Shutemov 		inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
184054af6042SHugh Dickins 		shmem_recalc_inode(inode);
18414595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
18421635f6a7SHugh Dickins 		alloced = true;
184354af6042SHugh Dickins 
1844779750d2SKirill A. Shutemov 		if (PageTransHuge(page) &&
1845779750d2SKirill A. Shutemov 				DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1846779750d2SKirill A. Shutemov 				hindex + HPAGE_PMD_NR - 1) {
1847779750d2SKirill A. Shutemov 			/*
1848779750d2SKirill A. Shutemov 			 * Part of the huge page is beyond i_size: subject
1849779750d2SKirill A. Shutemov 			 * to shrink under memory pressure.
1850779750d2SKirill A. Shutemov 			 */
1851779750d2SKirill A. Shutemov 			spin_lock(&sbinfo->shrinklist_lock);
1852d041353dSCong Wang 			/*
1853d041353dSCong Wang 			 * _careful to defend against unlocked access to
1854d041353dSCong Wang 			 * ->shrink_list in shmem_unused_huge_shrink()
1855d041353dSCong Wang 			 */
1856d041353dSCong Wang 			if (list_empty_careful(&info->shrinklist)) {
1857779750d2SKirill A. Shutemov 				list_add_tail(&info->shrinklist,
1858779750d2SKirill A. Shutemov 						&sbinfo->shrinklist);
1859779750d2SKirill A. Shutemov 				sbinfo->shrinklist_len++;
1860779750d2SKirill A. Shutemov 			}
1861779750d2SKirill A. Shutemov 			spin_unlock(&sbinfo->shrinklist_lock);
1862779750d2SKirill A. Shutemov 		}
1863779750d2SKirill A. Shutemov 
1864ec9516fbSHugh Dickins 		/*
18651635f6a7SHugh Dickins 		 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
18661635f6a7SHugh Dickins 		 */
18671635f6a7SHugh Dickins 		if (sgp == SGP_FALLOC)
18681635f6a7SHugh Dickins 			sgp = SGP_WRITE;
18691635f6a7SHugh Dickins clear:
18701635f6a7SHugh Dickins 		/*
18711635f6a7SHugh Dickins 		 * Let SGP_WRITE caller clear ends if write does not fill page;
18721635f6a7SHugh Dickins 		 * but SGP_FALLOC on a page fallocated earlier must initialize
18731635f6a7SHugh Dickins 		 * it now, lest undo on failure cancel our earlier guarantee.
1874ec9516fbSHugh Dickins 		 */
1875800d8c63SKirill A. Shutemov 		if (sgp != SGP_WRITE && !PageUptodate(page)) {
1876800d8c63SKirill A. Shutemov 			struct page *head = compound_head(page);
1877800d8c63SKirill A. Shutemov 			int i;
1878800d8c63SKirill A. Shutemov 
1879800d8c63SKirill A. Shutemov 			for (i = 0; i < (1 << compound_order(head)); i++) {
1880800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
1881800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
1882800d8c63SKirill A. Shutemov 			}
1883800d8c63SKirill A. Shutemov 			SetPageUptodate(head);
1884ec9516fbSHugh Dickins 		}
18851da177e4SLinus Torvalds 	}
1886bde05d1cSHugh Dickins 
188754af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
188875edd345SHugh Dickins 	if (sgp <= SGP_CACHE &&
188909cbfeafSKirill A. Shutemov 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1890267a4c76SHugh Dickins 		if (alloced) {
1891267a4c76SHugh Dickins 			ClearPageDirty(page);
1892267a4c76SHugh Dickins 			delete_from_page_cache(page);
18934595ef88SKirill A. Shutemov 			spin_lock_irq(&info->lock);
1894267a4c76SHugh Dickins 			shmem_recalc_inode(inode);
18954595ef88SKirill A. Shutemov 			spin_unlock_irq(&info->lock);
1896267a4c76SHugh Dickins 		}
189754af6042SHugh Dickins 		error = -EINVAL;
1898267a4c76SHugh Dickins 		goto unlock;
1899ff36b801SShaohua Li 	}
1900800d8c63SKirill A. Shutemov 	*pagep = page + index - hindex;
190154af6042SHugh Dickins 	return 0;
1902d00806b1SNick Piggin 
1903d0217ac0SNick Piggin 	/*
190454af6042SHugh Dickins 	 * Error recovery.
19051da177e4SLinus Torvalds 	 */
190654af6042SHugh Dickins unacct:
19070f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
1908800d8c63SKirill A. Shutemov 
1909800d8c63SKirill A. Shutemov 	if (PageTransHuge(page)) {
1910800d8c63SKirill A. Shutemov 		unlock_page(page);
1911800d8c63SKirill A. Shutemov 		put_page(page);
1912800d8c63SKirill A. Shutemov 		goto alloc_nohuge;
1913800d8c63SKirill A. Shutemov 	}
191454af6042SHugh Dickins failed:
1915267a4c76SHugh Dickins 	if (swap.val && !shmem_confirm_swap(mapping, index, swap))
191654af6042SHugh Dickins 		error = -EEXIST;
1917d1899228SHugh Dickins unlock:
191827ab7006SHugh Dickins 	if (page) {
191954af6042SHugh Dickins 		unlock_page(page);
192009cbfeafSKirill A. Shutemov 		put_page(page);
192154af6042SHugh Dickins 	}
192254af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
19234595ef88SKirill A. Shutemov 		spin_lock_irq(&info->lock);
192454af6042SHugh Dickins 		shmem_recalc_inode(inode);
19254595ef88SKirill A. Shutemov 		spin_unlock_irq(&info->lock);
19261da177e4SLinus Torvalds 		goto repeat;
1927d8dc74f2SAdrian Bunk 	}
1928d1899228SHugh Dickins 	if (error == -EEXIST)	/* from above or from radix_tree_insert */
192954af6042SHugh Dickins 		goto repeat;
193054af6042SHugh Dickins 	return error;
19311da177e4SLinus Torvalds }
19321da177e4SLinus Torvalds 
193310d20bd2SLinus Torvalds /*
193410d20bd2SLinus Torvalds  * This is like autoremove_wake_function, but it removes the wait queue
193510d20bd2SLinus Torvalds  * entry unconditionally - even if something else had already woken the
193610d20bd2SLinus Torvalds  * target.
193710d20bd2SLinus Torvalds  */
1938ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
193910d20bd2SLinus Torvalds {
194010d20bd2SLinus Torvalds 	int ret = default_wake_function(wait, mode, sync, key);
19412055da97SIngo Molnar 	list_del_init(&wait->entry);
194210d20bd2SLinus Torvalds 	return ret;
194310d20bd2SLinus Torvalds }
194410d20bd2SLinus Torvalds 
194520acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf)
19461da177e4SLinus Torvalds {
194711bac800SDave Jiang 	struct vm_area_struct *vma = vmf->vma;
1948496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
19499e18eb29SAndres Lagar-Cavilla 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1950657e3038SKirill A. Shutemov 	enum sgp_type sgp;
195120acce67SSouptick Joarder 	int err;
195220acce67SSouptick Joarder 	vm_fault_t ret = VM_FAULT_LOCKED;
19531da177e4SLinus Torvalds 
1954f00cdc6dSHugh Dickins 	/*
1955f00cdc6dSHugh Dickins 	 * Trinity finds that probing a hole which tmpfs is punching can
1956f00cdc6dSHugh Dickins 	 * prevent the hole-punch from ever completing: which in turn
1957f00cdc6dSHugh Dickins 	 * locks writers out with its hold on i_mutex.  So refrain from
19588e205f77SHugh Dickins 	 * faulting pages into the hole while it's being punched.  Although
19598e205f77SHugh Dickins 	 * shmem_undo_range() does remove the additions, it may be unable to
19608e205f77SHugh Dickins 	 * keep up, as each new page needs its own unmap_mapping_range() call,
19618e205f77SHugh Dickins 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
19628e205f77SHugh Dickins 	 *
19638e205f77SHugh Dickins 	 * It does not matter if we sometimes reach this check just before the
19648e205f77SHugh Dickins 	 * hole-punch begins, so that one fault then races with the punch:
19658e205f77SHugh Dickins 	 * we just need to make racing faults a rare case.
19668e205f77SHugh Dickins 	 *
19678e205f77SHugh Dickins 	 * The implementation below would be much simpler if we just used a
19688e205f77SHugh Dickins 	 * standard mutex or completion: but we cannot take i_mutex in fault,
19698e205f77SHugh Dickins 	 * and bloating every shmem inode for this unlikely case would be sad.
1970f00cdc6dSHugh Dickins 	 */
1971f00cdc6dSHugh Dickins 	if (unlikely(inode->i_private)) {
1972f00cdc6dSHugh Dickins 		struct shmem_falloc *shmem_falloc;
1973f00cdc6dSHugh Dickins 
1974f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
1975f00cdc6dSHugh Dickins 		shmem_falloc = inode->i_private;
19768e205f77SHugh Dickins 		if (shmem_falloc &&
19778e205f77SHugh Dickins 		    shmem_falloc->waitq &&
19788e205f77SHugh Dickins 		    vmf->pgoff >= shmem_falloc->start &&
19798e205f77SHugh Dickins 		    vmf->pgoff < shmem_falloc->next) {
19808e205f77SHugh Dickins 			wait_queue_head_t *shmem_falloc_waitq;
198110d20bd2SLinus Torvalds 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
19828e205f77SHugh Dickins 
19838e205f77SHugh Dickins 			ret = VM_FAULT_NOPAGE;
1984f00cdc6dSHugh Dickins 			if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1985f00cdc6dSHugh Dickins 			   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
19868e205f77SHugh Dickins 				/* It's polite to up mmap_sem if we can */
1987f00cdc6dSHugh Dickins 				up_read(&vma->vm_mm->mmap_sem);
19888e205f77SHugh Dickins 				ret = VM_FAULT_RETRY;
1989f00cdc6dSHugh Dickins 			}
19908e205f77SHugh Dickins 
19918e205f77SHugh Dickins 			shmem_falloc_waitq = shmem_falloc->waitq;
19928e205f77SHugh Dickins 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
19938e205f77SHugh Dickins 					TASK_UNINTERRUPTIBLE);
19948e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
19958e205f77SHugh Dickins 			schedule();
19968e205f77SHugh Dickins 
19978e205f77SHugh Dickins 			/*
19988e205f77SHugh Dickins 			 * shmem_falloc_waitq points into the shmem_fallocate()
19998e205f77SHugh Dickins 			 * stack of the hole-punching task: shmem_falloc_waitq
20008e205f77SHugh Dickins 			 * is usually invalid by the time we reach here, but
20018e205f77SHugh Dickins 			 * finish_wait() does not dereference it in that case;
20028e205f77SHugh Dickins 			 * though i_lock needed lest racing with wake_up_all().
20038e205f77SHugh Dickins 			 */
20048e205f77SHugh Dickins 			spin_lock(&inode->i_lock);
20058e205f77SHugh Dickins 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
20068e205f77SHugh Dickins 			spin_unlock(&inode->i_lock);
20078e205f77SHugh Dickins 			return ret;
2008f00cdc6dSHugh Dickins 		}
20098e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
2010f00cdc6dSHugh Dickins 	}
2011f00cdc6dSHugh Dickins 
2012657e3038SKirill A. Shutemov 	sgp = SGP_CACHE;
201318600332SMichal Hocko 
201418600332SMichal Hocko 	if ((vma->vm_flags & VM_NOHUGEPAGE) ||
201518600332SMichal Hocko 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2016657e3038SKirill A. Shutemov 		sgp = SGP_NOHUGE;
201718600332SMichal Hocko 	else if (vma->vm_flags & VM_HUGEPAGE)
201818600332SMichal Hocko 		sgp = SGP_HUGE;
2019657e3038SKirill A. Shutemov 
202020acce67SSouptick Joarder 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2021cfda0526SMike Rapoport 				  gfp, vma, vmf, &ret);
202220acce67SSouptick Joarder 	if (err)
202320acce67SSouptick Joarder 		return vmf_error(err);
202468da9f05SHugh Dickins 	return ret;
20251da177e4SLinus Torvalds }
20261da177e4SLinus Torvalds 
2027c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
2028c01d5b30SHugh Dickins 				      unsigned long uaddr, unsigned long len,
2029c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
2030c01d5b30SHugh Dickins {
2031c01d5b30SHugh Dickins 	unsigned long (*get_area)(struct file *,
2032c01d5b30SHugh Dickins 		unsigned long, unsigned long, unsigned long, unsigned long);
2033c01d5b30SHugh Dickins 	unsigned long addr;
2034c01d5b30SHugh Dickins 	unsigned long offset;
2035c01d5b30SHugh Dickins 	unsigned long inflated_len;
2036c01d5b30SHugh Dickins 	unsigned long inflated_addr;
2037c01d5b30SHugh Dickins 	unsigned long inflated_offset;
2038c01d5b30SHugh Dickins 
2039c01d5b30SHugh Dickins 	if (len > TASK_SIZE)
2040c01d5b30SHugh Dickins 		return -ENOMEM;
2041c01d5b30SHugh Dickins 
2042c01d5b30SHugh Dickins 	get_area = current->mm->get_unmapped_area;
2043c01d5b30SHugh Dickins 	addr = get_area(file, uaddr, len, pgoff, flags);
2044c01d5b30SHugh Dickins 
2045e496cf3dSKirill A. Shutemov 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2046c01d5b30SHugh Dickins 		return addr;
2047c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(addr))
2048c01d5b30SHugh Dickins 		return addr;
2049c01d5b30SHugh Dickins 	if (addr & ~PAGE_MASK)
2050c01d5b30SHugh Dickins 		return addr;
2051c01d5b30SHugh Dickins 	if (addr > TASK_SIZE - len)
2052c01d5b30SHugh Dickins 		return addr;
2053c01d5b30SHugh Dickins 
2054c01d5b30SHugh Dickins 	if (shmem_huge == SHMEM_HUGE_DENY)
2055c01d5b30SHugh Dickins 		return addr;
2056c01d5b30SHugh Dickins 	if (len < HPAGE_PMD_SIZE)
2057c01d5b30SHugh Dickins 		return addr;
2058c01d5b30SHugh Dickins 	if (flags & MAP_FIXED)
2059c01d5b30SHugh Dickins 		return addr;
2060c01d5b30SHugh Dickins 	/*
2061c01d5b30SHugh Dickins 	 * Our priority is to support MAP_SHARED mapped hugely;
2062c01d5b30SHugh Dickins 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2063c01d5b30SHugh Dickins 	 * But if caller specified an address hint, respect that as before.
2064c01d5b30SHugh Dickins 	 */
2065c01d5b30SHugh Dickins 	if (uaddr)
2066c01d5b30SHugh Dickins 		return addr;
2067c01d5b30SHugh Dickins 
2068c01d5b30SHugh Dickins 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2069c01d5b30SHugh Dickins 		struct super_block *sb;
2070c01d5b30SHugh Dickins 
2071c01d5b30SHugh Dickins 		if (file) {
2072c01d5b30SHugh Dickins 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2073c01d5b30SHugh Dickins 			sb = file_inode(file)->i_sb;
2074c01d5b30SHugh Dickins 		} else {
2075c01d5b30SHugh Dickins 			/*
2076c01d5b30SHugh Dickins 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2077c01d5b30SHugh Dickins 			 * for "/dev/zero", to create a shared anonymous object.
2078c01d5b30SHugh Dickins 			 */
2079c01d5b30SHugh Dickins 			if (IS_ERR(shm_mnt))
2080c01d5b30SHugh Dickins 				return addr;
2081c01d5b30SHugh Dickins 			sb = shm_mnt->mnt_sb;
2082c01d5b30SHugh Dickins 		}
20833089bf61SToshi Kani 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2084c01d5b30SHugh Dickins 			return addr;
2085c01d5b30SHugh Dickins 	}
2086c01d5b30SHugh Dickins 
2087c01d5b30SHugh Dickins 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2088c01d5b30SHugh Dickins 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2089c01d5b30SHugh Dickins 		return addr;
2090c01d5b30SHugh Dickins 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2091c01d5b30SHugh Dickins 		return addr;
2092c01d5b30SHugh Dickins 
2093c01d5b30SHugh Dickins 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2094c01d5b30SHugh Dickins 	if (inflated_len > TASK_SIZE)
2095c01d5b30SHugh Dickins 		return addr;
2096c01d5b30SHugh Dickins 	if (inflated_len < len)
2097c01d5b30SHugh Dickins 		return addr;
2098c01d5b30SHugh Dickins 
2099c01d5b30SHugh Dickins 	inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2100c01d5b30SHugh Dickins 	if (IS_ERR_VALUE(inflated_addr))
2101c01d5b30SHugh Dickins 		return addr;
2102c01d5b30SHugh Dickins 	if (inflated_addr & ~PAGE_MASK)
2103c01d5b30SHugh Dickins 		return addr;
2104c01d5b30SHugh Dickins 
2105c01d5b30SHugh Dickins 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2106c01d5b30SHugh Dickins 	inflated_addr += offset - inflated_offset;
2107c01d5b30SHugh Dickins 	if (inflated_offset > offset)
2108c01d5b30SHugh Dickins 		inflated_addr += HPAGE_PMD_SIZE;
2109c01d5b30SHugh Dickins 
2110c01d5b30SHugh Dickins 	if (inflated_addr > TASK_SIZE - len)
2111c01d5b30SHugh Dickins 		return addr;
2112c01d5b30SHugh Dickins 	return inflated_addr;
2113c01d5b30SHugh Dickins }
2114c01d5b30SHugh Dickins 
21151da177e4SLinus Torvalds #ifdef CONFIG_NUMA
211641ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
21171da177e4SLinus Torvalds {
2118496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
211941ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
21201da177e4SLinus Torvalds }
21211da177e4SLinus Torvalds 
2122d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2123d8dc74f2SAdrian Bunk 					  unsigned long addr)
21241da177e4SLinus Torvalds {
2125496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
212641ffe5d5SHugh Dickins 	pgoff_t index;
21271da177e4SLinus Torvalds 
212841ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
212941ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
21301da177e4SLinus Torvalds }
21311da177e4SLinus Torvalds #endif
21321da177e4SLinus Torvalds 
21331da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user)
21341da177e4SLinus Torvalds {
2135496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
21361da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
21371da177e4SLinus Torvalds 	int retval = -ENOMEM;
21381da177e4SLinus Torvalds 
21394595ef88SKirill A. Shutemov 	spin_lock_irq(&info->lock);
21401da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
21411da177e4SLinus Torvalds 		if (!user_shm_lock(inode->i_size, user))
21421da177e4SLinus Torvalds 			goto out_nomem;
21431da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
214489e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
21451da177e4SLinus Torvalds 	}
21461da177e4SLinus Torvalds 	if (!lock && (info->flags & VM_LOCKED) && user) {
21471da177e4SLinus Torvalds 		user_shm_unlock(inode->i_size, user);
21481da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
214989e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
21501da177e4SLinus Torvalds 	}
21511da177e4SLinus Torvalds 	retval = 0;
215289e004eaSLee Schermerhorn 
21531da177e4SLinus Torvalds out_nomem:
21544595ef88SKirill A. Shutemov 	spin_unlock_irq(&info->lock);
21551da177e4SLinus Torvalds 	return retval;
21561da177e4SLinus Torvalds }
21571da177e4SLinus Torvalds 
21589b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
21591da177e4SLinus Torvalds {
21601da177e4SLinus Torvalds 	file_accessed(file);
21611da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
2162e496cf3dSKirill A. Shutemov 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2163f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2164f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
2165f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
2166f3f0e1d2SKirill A. Shutemov 	}
21671da177e4SLinus Torvalds 	return 0;
21681da177e4SLinus Torvalds }
21691da177e4SLinus Torvalds 
2170454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
217109208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
21721da177e4SLinus Torvalds {
21731da177e4SLinus Torvalds 	struct inode *inode;
21741da177e4SLinus Torvalds 	struct shmem_inode_info *info;
21751da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
21761da177e4SLinus Torvalds 
21775b04c689SPavel Emelyanov 	if (shmem_reserve_inode(sb))
21781da177e4SLinus Torvalds 		return NULL;
21791da177e4SLinus Torvalds 
21801da177e4SLinus Torvalds 	inode = new_inode(sb);
21811da177e4SLinus Torvalds 	if (inode) {
218285fe4025SChristoph Hellwig 		inode->i_ino = get_next_ino();
2183454abafeSDmitry Monakhov 		inode_init_owner(inode, dir, mode);
21841da177e4SLinus Torvalds 		inode->i_blocks = 0;
2185078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
218646c9a946SArnd Bergmann 		inode->i_generation = prandom_u32();
21871da177e4SLinus Torvalds 		info = SHMEM_I(inode);
21881da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
21891da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
219040e041a2SDavid Herrmann 		info->seals = F_SEAL_SEAL;
21910b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
2192779750d2SKirill A. Shutemov 		INIT_LIST_HEAD(&info->shrinklist);
21931da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
219438f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
219572c04902SAl Viro 		cache_no_acl(inode);
21961da177e4SLinus Torvalds 
21971da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
21981da177e4SLinus Torvalds 		default:
219939f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
22001da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
22011da177e4SLinus Torvalds 			break;
22021da177e4SLinus Torvalds 		case S_IFREG:
220314fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
22041da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
22051da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
220671fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
220771fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
22081da177e4SLinus Torvalds 			break;
22091da177e4SLinus Torvalds 		case S_IFDIR:
2210d8c76e6fSDave Hansen 			inc_nlink(inode);
22111da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
22121da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
22131da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
22141da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
22151da177e4SLinus Torvalds 			break;
22161da177e4SLinus Torvalds 		case S_IFLNK:
22171da177e4SLinus Torvalds 			/*
22181da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
22191da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
22201da177e4SLinus Torvalds 			 */
222171fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
22221da177e4SLinus Torvalds 			break;
22231da177e4SLinus Torvalds 		}
2224b45d71fbSJoel Fernandes (Google) 
2225b45d71fbSJoel Fernandes (Google) 		lockdep_annotate_inode_mutex_key(inode);
22265b04c689SPavel Emelyanov 	} else
22275b04c689SPavel Emelyanov 		shmem_free_inode(sb);
22281da177e4SLinus Torvalds 	return inode;
22291da177e4SLinus Torvalds }
22301da177e4SLinus Torvalds 
22310cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping)
22320cd6144aSJohannes Weiner {
2233f8005451SHugh Dickins 	return mapping->a_ops == &shmem_aops;
22340cd6144aSJohannes Weiner }
22350cd6144aSJohannes Weiner 
22368d103963SMike Rapoport static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
22374c27fe4cSMike Rapoport 				  pmd_t *dst_pmd,
22384c27fe4cSMike Rapoport 				  struct vm_area_struct *dst_vma,
22394c27fe4cSMike Rapoport 				  unsigned long dst_addr,
22404c27fe4cSMike Rapoport 				  unsigned long src_addr,
22418d103963SMike Rapoport 				  bool zeropage,
22424c27fe4cSMike Rapoport 				  struct page **pagep)
22434c27fe4cSMike Rapoport {
22444c27fe4cSMike Rapoport 	struct inode *inode = file_inode(dst_vma->vm_file);
22454c27fe4cSMike Rapoport 	struct shmem_inode_info *info = SHMEM_I(inode);
22464c27fe4cSMike Rapoport 	struct address_space *mapping = inode->i_mapping;
22474c27fe4cSMike Rapoport 	gfp_t gfp = mapping_gfp_mask(mapping);
22484c27fe4cSMike Rapoport 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
22494c27fe4cSMike Rapoport 	struct mem_cgroup *memcg;
22504c27fe4cSMike Rapoport 	spinlock_t *ptl;
22514c27fe4cSMike Rapoport 	void *page_kaddr;
22524c27fe4cSMike Rapoport 	struct page *page;
22534c27fe4cSMike Rapoport 	pte_t _dst_pte, *dst_pte;
22544c27fe4cSMike Rapoport 	int ret;
22554c27fe4cSMike Rapoport 
22564c27fe4cSMike Rapoport 	ret = -ENOMEM;
22570f079694SMike Rapoport 	if (!shmem_inode_acct_block(inode, 1))
22584c27fe4cSMike Rapoport 		goto out;
22594c27fe4cSMike Rapoport 
2260cb658a45SAndrea Arcangeli 	if (!*pagep) {
22614c27fe4cSMike Rapoport 		page = shmem_alloc_page(gfp, info, pgoff);
22624c27fe4cSMike Rapoport 		if (!page)
22630f079694SMike Rapoport 			goto out_unacct_blocks;
22644c27fe4cSMike Rapoport 
22658d103963SMike Rapoport 		if (!zeropage) {	/* mcopy_atomic */
22664c27fe4cSMike Rapoport 			page_kaddr = kmap_atomic(page);
22678d103963SMike Rapoport 			ret = copy_from_user(page_kaddr,
22688d103963SMike Rapoport 					     (const void __user *)src_addr,
22694c27fe4cSMike Rapoport 					     PAGE_SIZE);
22704c27fe4cSMike Rapoport 			kunmap_atomic(page_kaddr);
22714c27fe4cSMike Rapoport 
22724c27fe4cSMike Rapoport 			/* fallback to copy_from_user outside mmap_sem */
22734c27fe4cSMike Rapoport 			if (unlikely(ret)) {
22744c27fe4cSMike Rapoport 				*pagep = page;
22750f079694SMike Rapoport 				shmem_inode_unacct_blocks(inode, 1);
22764c27fe4cSMike Rapoport 				/* don't free the page */
22774c27fe4cSMike Rapoport 				return -EFAULT;
22784c27fe4cSMike Rapoport 			}
22798d103963SMike Rapoport 		} else {		/* mfill_zeropage_atomic */
22808d103963SMike Rapoport 			clear_highpage(page);
22818d103963SMike Rapoport 		}
22824c27fe4cSMike Rapoport 	} else {
22834c27fe4cSMike Rapoport 		page = *pagep;
22844c27fe4cSMike Rapoport 		*pagep = NULL;
22854c27fe4cSMike Rapoport 	}
22864c27fe4cSMike Rapoport 
22879cc90c66SAndrea Arcangeli 	VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
22889cc90c66SAndrea Arcangeli 	__SetPageLocked(page);
22899cc90c66SAndrea Arcangeli 	__SetPageSwapBacked(page);
2290a425d358SAndrea Arcangeli 	__SetPageUptodate(page);
22919cc90c66SAndrea Arcangeli 
22922cf85583STejun Heo 	ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
22934c27fe4cSMike Rapoport 	if (ret)
22944c27fe4cSMike Rapoport 		goto out_release;
22954c27fe4cSMike Rapoport 
22964c27fe4cSMike Rapoport 	ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
22974c27fe4cSMike Rapoport 	if (!ret) {
22984c27fe4cSMike Rapoport 		ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL);
22994c27fe4cSMike Rapoport 		radix_tree_preload_end();
23004c27fe4cSMike Rapoport 	}
23014c27fe4cSMike Rapoport 	if (ret)
23024c27fe4cSMike Rapoport 		goto out_release_uncharge;
23034c27fe4cSMike Rapoport 
23044c27fe4cSMike Rapoport 	mem_cgroup_commit_charge(page, memcg, false, false);
23054c27fe4cSMike Rapoport 
23064c27fe4cSMike Rapoport 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
23074c27fe4cSMike Rapoport 	if (dst_vma->vm_flags & VM_WRITE)
23084c27fe4cSMike Rapoport 		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
23094c27fe4cSMike Rapoport 
23104c27fe4cSMike Rapoport 	ret = -EEXIST;
23114c27fe4cSMike Rapoport 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
23124c27fe4cSMike Rapoport 	if (!pte_none(*dst_pte))
23134c27fe4cSMike Rapoport 		goto out_release_uncharge_unlock;
23144c27fe4cSMike Rapoport 
23154c27fe4cSMike Rapoport 	lru_cache_add_anon(page);
23164c27fe4cSMike Rapoport 
23174c27fe4cSMike Rapoport 	spin_lock(&info->lock);
23184c27fe4cSMike Rapoport 	info->alloced++;
23194c27fe4cSMike Rapoport 	inode->i_blocks += BLOCKS_PER_PAGE;
23204c27fe4cSMike Rapoport 	shmem_recalc_inode(inode);
23214c27fe4cSMike Rapoport 	spin_unlock(&info->lock);
23224c27fe4cSMike Rapoport 
23234c27fe4cSMike Rapoport 	inc_mm_counter(dst_mm, mm_counter_file(page));
23244c27fe4cSMike Rapoport 	page_add_file_rmap(page, false);
23254c27fe4cSMike Rapoport 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
23264c27fe4cSMike Rapoport 
23274c27fe4cSMike Rapoport 	/* No need to invalidate - it was non-present before */
23284c27fe4cSMike Rapoport 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
23294c27fe4cSMike Rapoport 	unlock_page(page);
23304c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
23314c27fe4cSMike Rapoport 	ret = 0;
23324c27fe4cSMike Rapoport out:
23334c27fe4cSMike Rapoport 	return ret;
23344c27fe4cSMike Rapoport out_release_uncharge_unlock:
23354c27fe4cSMike Rapoport 	pte_unmap_unlock(dst_pte, ptl);
23364c27fe4cSMike Rapoport out_release_uncharge:
23374c27fe4cSMike Rapoport 	mem_cgroup_cancel_charge(page, memcg, false);
23384c27fe4cSMike Rapoport out_release:
23399cc90c66SAndrea Arcangeli 	unlock_page(page);
23404c27fe4cSMike Rapoport 	put_page(page);
23414c27fe4cSMike Rapoport out_unacct_blocks:
23420f079694SMike Rapoport 	shmem_inode_unacct_blocks(inode, 1);
23434c27fe4cSMike Rapoport 	goto out;
23444c27fe4cSMike Rapoport }
23454c27fe4cSMike Rapoport 
23468d103963SMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
23478d103963SMike Rapoport 			   pmd_t *dst_pmd,
23488d103963SMike Rapoport 			   struct vm_area_struct *dst_vma,
23498d103963SMike Rapoport 			   unsigned long dst_addr,
23508d103963SMike Rapoport 			   unsigned long src_addr,
23518d103963SMike Rapoport 			   struct page **pagep)
23528d103963SMike Rapoport {
23538d103963SMike Rapoport 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
23548d103963SMike Rapoport 				      dst_addr, src_addr, false, pagep);
23558d103963SMike Rapoport }
23568d103963SMike Rapoport 
23578d103963SMike Rapoport int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
23588d103963SMike Rapoport 			     pmd_t *dst_pmd,
23598d103963SMike Rapoport 			     struct vm_area_struct *dst_vma,
23608d103963SMike Rapoport 			     unsigned long dst_addr)
23618d103963SMike Rapoport {
23628d103963SMike Rapoport 	struct page *page = NULL;
23638d103963SMike Rapoport 
23648d103963SMike Rapoport 	return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
23658d103963SMike Rapoport 				      dst_addr, 0, true, &page);
23668d103963SMike Rapoport }
23678d103963SMike Rapoport 
23681da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
236992e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
237069f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
23711da177e4SLinus Torvalds 
23726d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
23736d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
23746d9d88d0SJarkko Sakkinen #else
23756d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
23766d9d88d0SJarkko Sakkinen #endif
23776d9d88d0SJarkko Sakkinen 
23781da177e4SLinus Torvalds static int
2379800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
2380800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
2381800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
23821da177e4SLinus Torvalds {
2383800d15a5SNick Piggin 	struct inode *inode = mapping->host;
238440e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
238509cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
238640e041a2SDavid Herrmann 
238740e041a2SDavid Herrmann 	/* i_mutex is held by caller */
23883f472cc9SSteven Rostedt (VMware) 	if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
238940e041a2SDavid Herrmann 		if (info->seals & F_SEAL_WRITE)
239040e041a2SDavid Herrmann 			return -EPERM;
239140e041a2SDavid Herrmann 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
239240e041a2SDavid Herrmann 			return -EPERM;
239340e041a2SDavid Herrmann 	}
239440e041a2SDavid Herrmann 
23959e18eb29SAndres Lagar-Cavilla 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2396800d15a5SNick Piggin }
2397800d15a5SNick Piggin 
2398800d15a5SNick Piggin static int
2399800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
2400800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2401800d15a5SNick Piggin 			struct page *page, void *fsdata)
2402800d15a5SNick Piggin {
2403800d15a5SNick Piggin 	struct inode *inode = mapping->host;
2404800d15a5SNick Piggin 
2405800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
2406800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
2407800d15a5SNick Piggin 
2408ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
2409800d8c63SKirill A. Shutemov 		struct page *head = compound_head(page);
2410800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
2411800d8c63SKirill A. Shutemov 			int i;
2412800d8c63SKirill A. Shutemov 
2413800d8c63SKirill A. Shutemov 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2414800d8c63SKirill A. Shutemov 				if (head + i == page)
2415800d8c63SKirill A. Shutemov 					continue;
2416800d8c63SKirill A. Shutemov 				clear_highpage(head + i);
2417800d8c63SKirill A. Shutemov 				flush_dcache_page(head + i);
2418800d8c63SKirill A. Shutemov 			}
2419800d8c63SKirill A. Shutemov 		}
242009cbfeafSKirill A. Shutemov 		if (copied < PAGE_SIZE) {
242109cbfeafSKirill A. Shutemov 			unsigned from = pos & (PAGE_SIZE - 1);
2422ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
242309cbfeafSKirill A. Shutemov 					from + copied, PAGE_SIZE);
2424ec9516fbSHugh Dickins 		}
2425800d8c63SKirill A. Shutemov 		SetPageUptodate(head);
2426ec9516fbSHugh Dickins 	}
2427d3602444SHugh Dickins 	set_page_dirty(page);
24286746aff7SWu Fengguang 	unlock_page(page);
242909cbfeafSKirill A. Shutemov 	put_page(page);
2430d3602444SHugh Dickins 
2431800d15a5SNick Piggin 	return copied;
24321da177e4SLinus Torvalds }
24331da177e4SLinus Torvalds 
24342ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
24351da177e4SLinus Torvalds {
24366e58e79dSAl Viro 	struct file *file = iocb->ki_filp;
24376e58e79dSAl Viro 	struct inode *inode = file_inode(file);
24381da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
243941ffe5d5SHugh Dickins 	pgoff_t index;
244041ffe5d5SHugh Dickins 	unsigned long offset;
2441a0ee5ec5SHugh Dickins 	enum sgp_type sgp = SGP_READ;
2442f7c1d074SGeert Uytterhoeven 	int error = 0;
2443cb66a7a1SAl Viro 	ssize_t retval = 0;
24446e58e79dSAl Viro 	loff_t *ppos = &iocb->ki_pos;
2445a0ee5ec5SHugh Dickins 
2446a0ee5ec5SHugh Dickins 	/*
2447a0ee5ec5SHugh Dickins 	 * Might this read be for a stacking filesystem?  Then when reading
2448a0ee5ec5SHugh Dickins 	 * holes of a sparse file, we actually need to allocate those pages,
2449a0ee5ec5SHugh Dickins 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2450a0ee5ec5SHugh Dickins 	 */
2451777eda2cSAl Viro 	if (!iter_is_iovec(to))
245275edd345SHugh Dickins 		sgp = SGP_CACHE;
24531da177e4SLinus Torvalds 
245409cbfeafSKirill A. Shutemov 	index = *ppos >> PAGE_SHIFT;
245509cbfeafSKirill A. Shutemov 	offset = *ppos & ~PAGE_MASK;
24561da177e4SLinus Torvalds 
24571da177e4SLinus Torvalds 	for (;;) {
24581da177e4SLinus Torvalds 		struct page *page = NULL;
245941ffe5d5SHugh Dickins 		pgoff_t end_index;
246041ffe5d5SHugh Dickins 		unsigned long nr, ret;
24611da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
24621da177e4SLinus Torvalds 
246309cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
24641da177e4SLinus Torvalds 		if (index > end_index)
24651da177e4SLinus Torvalds 			break;
24661da177e4SLinus Torvalds 		if (index == end_index) {
246709cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
24681da177e4SLinus Torvalds 			if (nr <= offset)
24691da177e4SLinus Torvalds 				break;
24701da177e4SLinus Torvalds 		}
24711da177e4SLinus Torvalds 
24729e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, index, &page, sgp);
24736e58e79dSAl Viro 		if (error) {
24746e58e79dSAl Viro 			if (error == -EINVAL)
24756e58e79dSAl Viro 				error = 0;
24761da177e4SLinus Torvalds 			break;
24771da177e4SLinus Torvalds 		}
247875edd345SHugh Dickins 		if (page) {
247975edd345SHugh Dickins 			if (sgp == SGP_CACHE)
248075edd345SHugh Dickins 				set_page_dirty(page);
2481d3602444SHugh Dickins 			unlock_page(page);
248275edd345SHugh Dickins 		}
24831da177e4SLinus Torvalds 
24841da177e4SLinus Torvalds 		/*
24851da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
24861b1dcc1bSJes Sorensen 		 * are called without i_mutex protection against truncate
24871da177e4SLinus Torvalds 		 */
248809cbfeafSKirill A. Shutemov 		nr = PAGE_SIZE;
24891da177e4SLinus Torvalds 		i_size = i_size_read(inode);
249009cbfeafSKirill A. Shutemov 		end_index = i_size >> PAGE_SHIFT;
24911da177e4SLinus Torvalds 		if (index == end_index) {
249209cbfeafSKirill A. Shutemov 			nr = i_size & ~PAGE_MASK;
24931da177e4SLinus Torvalds 			if (nr <= offset) {
24941da177e4SLinus Torvalds 				if (page)
249509cbfeafSKirill A. Shutemov 					put_page(page);
24961da177e4SLinus Torvalds 				break;
24971da177e4SLinus Torvalds 			}
24981da177e4SLinus Torvalds 		}
24991da177e4SLinus Torvalds 		nr -= offset;
25001da177e4SLinus Torvalds 
25011da177e4SLinus Torvalds 		if (page) {
25021da177e4SLinus Torvalds 			/*
25031da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
25041da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
25051da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
25061da177e4SLinus Torvalds 			 */
25071da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
25081da177e4SLinus Torvalds 				flush_dcache_page(page);
25091da177e4SLinus Torvalds 			/*
25101da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
25111da177e4SLinus Torvalds 			 */
25121da177e4SLinus Torvalds 			if (!offset)
25131da177e4SLinus Torvalds 				mark_page_accessed(page);
2514b5810039SNick Piggin 		} else {
25151da177e4SLinus Torvalds 			page = ZERO_PAGE(0);
251609cbfeafSKirill A. Shutemov 			get_page(page);
2517b5810039SNick Piggin 		}
25181da177e4SLinus Torvalds 
25191da177e4SLinus Torvalds 		/*
25201da177e4SLinus Torvalds 		 * Ok, we have the page, and it's up-to-date, so
25211da177e4SLinus Torvalds 		 * now we can copy it to user space...
25221da177e4SLinus Torvalds 		 */
25232ba5bbedSAl Viro 		ret = copy_page_to_iter(page, offset, nr, to);
25246e58e79dSAl Viro 		retval += ret;
25251da177e4SLinus Torvalds 		offset += ret;
252609cbfeafSKirill A. Shutemov 		index += offset >> PAGE_SHIFT;
252709cbfeafSKirill A. Shutemov 		offset &= ~PAGE_MASK;
25281da177e4SLinus Torvalds 
252909cbfeafSKirill A. Shutemov 		put_page(page);
25302ba5bbedSAl Viro 		if (!iov_iter_count(to))
25311da177e4SLinus Torvalds 			break;
25326e58e79dSAl Viro 		if (ret < nr) {
25336e58e79dSAl Viro 			error = -EFAULT;
25346e58e79dSAl Viro 			break;
25356e58e79dSAl Viro 		}
25361da177e4SLinus Torvalds 		cond_resched();
25371da177e4SLinus Torvalds 	}
25381da177e4SLinus Torvalds 
253909cbfeafSKirill A. Shutemov 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
25406e58e79dSAl Viro 	file_accessed(file);
25416e58e79dSAl Viro 	return retval ? retval : error;
25421da177e4SLinus Torvalds }
25431da177e4SLinus Torvalds 
2544220f2ac9SHugh Dickins /*
2545220f2ac9SHugh Dickins  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
2546220f2ac9SHugh Dickins  */
2547220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2548965c8e59SAndrew Morton 				    pgoff_t index, pgoff_t end, int whence)
2549220f2ac9SHugh Dickins {
2550220f2ac9SHugh Dickins 	struct page *page;
2551220f2ac9SHugh Dickins 	struct pagevec pvec;
2552220f2ac9SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
2553220f2ac9SHugh Dickins 	bool done = false;
2554220f2ac9SHugh Dickins 	int i;
2555220f2ac9SHugh Dickins 
255686679820SMel Gorman 	pagevec_init(&pvec);
2557220f2ac9SHugh Dickins 	pvec.nr = 1;		/* start small: we may be there already */
2558220f2ac9SHugh Dickins 	while (!done) {
25590cd6144aSJohannes Weiner 		pvec.nr = find_get_entries(mapping, index,
2560220f2ac9SHugh Dickins 					pvec.nr, pvec.pages, indices);
2561220f2ac9SHugh Dickins 		if (!pvec.nr) {
2562965c8e59SAndrew Morton 			if (whence == SEEK_DATA)
2563220f2ac9SHugh Dickins 				index = end;
2564220f2ac9SHugh Dickins 			break;
2565220f2ac9SHugh Dickins 		}
2566220f2ac9SHugh Dickins 		for (i = 0; i < pvec.nr; i++, index++) {
2567220f2ac9SHugh Dickins 			if (index < indices[i]) {
2568965c8e59SAndrew Morton 				if (whence == SEEK_HOLE) {
2569220f2ac9SHugh Dickins 					done = true;
2570220f2ac9SHugh Dickins 					break;
2571220f2ac9SHugh Dickins 				}
2572220f2ac9SHugh Dickins 				index = indices[i];
2573220f2ac9SHugh Dickins 			}
2574220f2ac9SHugh Dickins 			page = pvec.pages[i];
25753159f943SMatthew Wilcox 			if (page && !xa_is_value(page)) {
2576220f2ac9SHugh Dickins 				if (!PageUptodate(page))
2577220f2ac9SHugh Dickins 					page = NULL;
2578220f2ac9SHugh Dickins 			}
2579220f2ac9SHugh Dickins 			if (index >= end ||
2580965c8e59SAndrew Morton 			    (page && whence == SEEK_DATA) ||
2581965c8e59SAndrew Morton 			    (!page && whence == SEEK_HOLE)) {
2582220f2ac9SHugh Dickins 				done = true;
2583220f2ac9SHugh Dickins 				break;
2584220f2ac9SHugh Dickins 			}
2585220f2ac9SHugh Dickins 		}
25860cd6144aSJohannes Weiner 		pagevec_remove_exceptionals(&pvec);
2587220f2ac9SHugh Dickins 		pagevec_release(&pvec);
2588220f2ac9SHugh Dickins 		pvec.nr = PAGEVEC_SIZE;
2589220f2ac9SHugh Dickins 		cond_resched();
2590220f2ac9SHugh Dickins 	}
2591220f2ac9SHugh Dickins 	return index;
2592220f2ac9SHugh Dickins }
2593220f2ac9SHugh Dickins 
2594965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2595220f2ac9SHugh Dickins {
2596220f2ac9SHugh Dickins 	struct address_space *mapping = file->f_mapping;
2597220f2ac9SHugh Dickins 	struct inode *inode = mapping->host;
2598220f2ac9SHugh Dickins 	pgoff_t start, end;
2599220f2ac9SHugh Dickins 	loff_t new_offset;
2600220f2ac9SHugh Dickins 
2601965c8e59SAndrew Morton 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2602965c8e59SAndrew Morton 		return generic_file_llseek_size(file, offset, whence,
2603220f2ac9SHugh Dickins 					MAX_LFS_FILESIZE, i_size_read(inode));
26045955102cSAl Viro 	inode_lock(inode);
2605220f2ac9SHugh Dickins 	/* We're holding i_mutex so we can access i_size directly */
2606220f2ac9SHugh Dickins 
2607220f2ac9SHugh Dickins 	if (offset < 0)
2608220f2ac9SHugh Dickins 		offset = -EINVAL;
2609220f2ac9SHugh Dickins 	else if (offset >= inode->i_size)
2610220f2ac9SHugh Dickins 		offset = -ENXIO;
2611220f2ac9SHugh Dickins 	else {
261209cbfeafSKirill A. Shutemov 		start = offset >> PAGE_SHIFT;
261309cbfeafSKirill A. Shutemov 		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2614965c8e59SAndrew Morton 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
261509cbfeafSKirill A. Shutemov 		new_offset <<= PAGE_SHIFT;
2616220f2ac9SHugh Dickins 		if (new_offset > offset) {
2617220f2ac9SHugh Dickins 			if (new_offset < inode->i_size)
2618220f2ac9SHugh Dickins 				offset = new_offset;
2619965c8e59SAndrew Morton 			else if (whence == SEEK_DATA)
2620220f2ac9SHugh Dickins 				offset = -ENXIO;
2621220f2ac9SHugh Dickins 			else
2622220f2ac9SHugh Dickins 				offset = inode->i_size;
2623220f2ac9SHugh Dickins 		}
2624220f2ac9SHugh Dickins 	}
2625220f2ac9SHugh Dickins 
2626387aae6fSHugh Dickins 	if (offset >= 0)
262746a1c2c7SJie Liu 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
26285955102cSAl Viro 	inode_unlock(inode);
2629220f2ac9SHugh Dickins 	return offset;
2630220f2ac9SHugh Dickins }
2631220f2ac9SHugh Dickins 
263283e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
263383e4fa9cSHugh Dickins 							 loff_t len)
263483e4fa9cSHugh Dickins {
2635496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
2636e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
263740e041a2SDavid Herrmann 	struct shmem_inode_info *info = SHMEM_I(inode);
26381aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
2639e2d12e22SHugh Dickins 	pgoff_t start, index, end;
2640e2d12e22SHugh Dickins 	int error;
264183e4fa9cSHugh Dickins 
264213ace4d0SHugh Dickins 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
264313ace4d0SHugh Dickins 		return -EOPNOTSUPP;
264413ace4d0SHugh Dickins 
26455955102cSAl Viro 	inode_lock(inode);
264683e4fa9cSHugh Dickins 
264783e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
264883e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
264983e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
265083e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
26518e205f77SHugh Dickins 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
265283e4fa9cSHugh Dickins 
265340e041a2SDavid Herrmann 		/* protected by i_mutex */
265440e041a2SDavid Herrmann 		if (info->seals & F_SEAL_WRITE) {
265540e041a2SDavid Herrmann 			error = -EPERM;
265640e041a2SDavid Herrmann 			goto out;
265740e041a2SDavid Herrmann 		}
265840e041a2SDavid Herrmann 
26598e205f77SHugh Dickins 		shmem_falloc.waitq = &shmem_falloc_waitq;
2660f00cdc6dSHugh Dickins 		shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2661f00cdc6dSHugh Dickins 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2662f00cdc6dSHugh Dickins 		spin_lock(&inode->i_lock);
2663f00cdc6dSHugh Dickins 		inode->i_private = &shmem_falloc;
2664f00cdc6dSHugh Dickins 		spin_unlock(&inode->i_lock);
2665f00cdc6dSHugh Dickins 
266683e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
266783e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
266883e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
266983e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
267083e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
26718e205f77SHugh Dickins 
26728e205f77SHugh Dickins 		spin_lock(&inode->i_lock);
26738e205f77SHugh Dickins 		inode->i_private = NULL;
26748e205f77SHugh Dickins 		wake_up_all(&shmem_falloc_waitq);
26752055da97SIngo Molnar 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
26768e205f77SHugh Dickins 		spin_unlock(&inode->i_lock);
267783e4fa9cSHugh Dickins 		error = 0;
26788e205f77SHugh Dickins 		goto out;
267983e4fa9cSHugh Dickins 	}
268083e4fa9cSHugh Dickins 
2681e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2682e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
2683e2d12e22SHugh Dickins 	if (error)
2684e2d12e22SHugh Dickins 		goto out;
2685e2d12e22SHugh Dickins 
268640e041a2SDavid Herrmann 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
268740e041a2SDavid Herrmann 		error = -EPERM;
268840e041a2SDavid Herrmann 		goto out;
268940e041a2SDavid Herrmann 	}
269040e041a2SDavid Herrmann 
269109cbfeafSKirill A. Shutemov 	start = offset >> PAGE_SHIFT;
269209cbfeafSKirill A. Shutemov 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2693e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2694e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2695e2d12e22SHugh Dickins 		error = -ENOSPC;
2696e2d12e22SHugh Dickins 		goto out;
2697e2d12e22SHugh Dickins 	}
2698e2d12e22SHugh Dickins 
26998e205f77SHugh Dickins 	shmem_falloc.waitq = NULL;
27001aac1400SHugh Dickins 	shmem_falloc.start = start;
27011aac1400SHugh Dickins 	shmem_falloc.next  = start;
27021aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
27031aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
27041aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27051aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
27061aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
27071aac1400SHugh Dickins 
2708e2d12e22SHugh Dickins 	for (index = start; index < end; index++) {
2709e2d12e22SHugh Dickins 		struct page *page;
2710e2d12e22SHugh Dickins 
2711e2d12e22SHugh Dickins 		/*
2712e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2713e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
2714e2d12e22SHugh Dickins 		 */
2715e2d12e22SHugh Dickins 		if (signal_pending(current))
2716e2d12e22SHugh Dickins 			error = -EINTR;
27171aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
27181aac1400SHugh Dickins 			error = -ENOMEM;
2719e2d12e22SHugh Dickins 		else
27209e18eb29SAndres Lagar-Cavilla 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2721e2d12e22SHugh Dickins 		if (error) {
27221635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
27237f556567SHugh Dickins 			if (index > start) {
27241635f6a7SHugh Dickins 				shmem_undo_range(inode,
272509cbfeafSKirill A. Shutemov 				    (loff_t)start << PAGE_SHIFT,
2726b9b4bb26SAnthony Romano 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
27277f556567SHugh Dickins 			}
27281aac1400SHugh Dickins 			goto undone;
2729e2d12e22SHugh Dickins 		}
2730e2d12e22SHugh Dickins 
2731e2d12e22SHugh Dickins 		/*
27321aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
27331aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
27341aac1400SHugh Dickins 		 */
27351aac1400SHugh Dickins 		shmem_falloc.next++;
27361aac1400SHugh Dickins 		if (!PageUptodate(page))
27371aac1400SHugh Dickins 			shmem_falloc.nr_falloced++;
27381aac1400SHugh Dickins 
27391aac1400SHugh Dickins 		/*
27401635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
27411635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
27421635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
2743e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
2744e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
2745e2d12e22SHugh Dickins 		 */
2746e2d12e22SHugh Dickins 		set_page_dirty(page);
2747e2d12e22SHugh Dickins 		unlock_page(page);
274809cbfeafSKirill A. Shutemov 		put_page(page);
2749e2d12e22SHugh Dickins 		cond_resched();
2750e2d12e22SHugh Dickins 	}
2751e2d12e22SHugh Dickins 
2752e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2753e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
2754078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
27551aac1400SHugh Dickins undone:
27561aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
27571aac1400SHugh Dickins 	inode->i_private = NULL;
27581aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
2759e2d12e22SHugh Dickins out:
27605955102cSAl Viro 	inode_unlock(inode);
276183e4fa9cSHugh Dickins 	return error;
276283e4fa9cSHugh Dickins }
276383e4fa9cSHugh Dickins 
2764726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
27651da177e4SLinus Torvalds {
2766726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
27671da177e4SLinus Torvalds 
27681da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
276909cbfeafSKirill A. Shutemov 	buf->f_bsize = PAGE_SIZE;
27701da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
27710edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
27721da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
277341ffe5d5SHugh Dickins 		buf->f_bavail =
277441ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
277541ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
27760edd73b3SHugh Dickins 	}
27770edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
27781da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
27791da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
27801da177e4SLinus Torvalds 	}
27811da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
27821da177e4SLinus Torvalds 	return 0;
27831da177e4SLinus Torvalds }
27841da177e4SLinus Torvalds 
27851da177e4SLinus Torvalds /*
27861da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
27871da177e4SLinus Torvalds  */
27881da177e4SLinus Torvalds static int
27891a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
27901da177e4SLinus Torvalds {
27910b0a0806SHugh Dickins 	struct inode *inode;
27921da177e4SLinus Torvalds 	int error = -ENOSPC;
27931da177e4SLinus Torvalds 
2794454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
27951da177e4SLinus Torvalds 	if (inode) {
2796feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2797feda821eSChristoph Hellwig 		if (error)
2798feda821eSChristoph Hellwig 			goto out_iput;
27992a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
28009d8f13baSMimi Zohar 						     &dentry->d_name,
28016d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
2802feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2803feda821eSChristoph Hellwig 			goto out_iput;
280437ec43cdSMimi Zohar 
2805718deb6bSAl Viro 		error = 0;
28061da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
2807078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
28081da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
28091da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
28101da177e4SLinus Torvalds 	}
28111da177e4SLinus Torvalds 	return error;
2812feda821eSChristoph Hellwig out_iput:
2813feda821eSChristoph Hellwig 	iput(inode);
2814feda821eSChristoph Hellwig 	return error;
28151da177e4SLinus Torvalds }
28161da177e4SLinus Torvalds 
281760545d0dSAl Viro static int
281860545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
281960545d0dSAl Viro {
282060545d0dSAl Viro 	struct inode *inode;
282160545d0dSAl Viro 	int error = -ENOSPC;
282260545d0dSAl Viro 
282360545d0dSAl Viro 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
282460545d0dSAl Viro 	if (inode) {
282560545d0dSAl Viro 		error = security_inode_init_security(inode, dir,
282660545d0dSAl Viro 						     NULL,
282760545d0dSAl Viro 						     shmem_initxattrs, NULL);
2828feda821eSChristoph Hellwig 		if (error && error != -EOPNOTSUPP)
2829feda821eSChristoph Hellwig 			goto out_iput;
2830feda821eSChristoph Hellwig 		error = simple_acl_create(dir, inode);
2831feda821eSChristoph Hellwig 		if (error)
2832feda821eSChristoph Hellwig 			goto out_iput;
283360545d0dSAl Viro 		d_tmpfile(dentry, inode);
283460545d0dSAl Viro 	}
283560545d0dSAl Viro 	return error;
2836feda821eSChristoph Hellwig out_iput:
2837feda821eSChristoph Hellwig 	iput(inode);
2838feda821eSChristoph Hellwig 	return error;
283960545d0dSAl Viro }
284060545d0dSAl Viro 
284118bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
28421da177e4SLinus Torvalds {
28431da177e4SLinus Torvalds 	int error;
28441da177e4SLinus Torvalds 
28451da177e4SLinus Torvalds 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
28461da177e4SLinus Torvalds 		return error;
2847d8c76e6fSDave Hansen 	inc_nlink(dir);
28481da177e4SLinus Torvalds 	return 0;
28491da177e4SLinus Torvalds }
28501da177e4SLinus Torvalds 
28514acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2852ebfc3b49SAl Viro 		bool excl)
28531da177e4SLinus Torvalds {
28541da177e4SLinus Torvalds 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
28551da177e4SLinus Torvalds }
28561da177e4SLinus Torvalds 
28571da177e4SLinus Torvalds /*
28581da177e4SLinus Torvalds  * Link a file..
28591da177e4SLinus Torvalds  */
28601da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
28611da177e4SLinus Torvalds {
286275c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
28635b04c689SPavel Emelyanov 	int ret;
28641da177e4SLinus Torvalds 
28651da177e4SLinus Torvalds 	/*
28661da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
28671da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
28681da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
28691da177e4SLinus Torvalds 	 */
28705b04c689SPavel Emelyanov 	ret = shmem_reserve_inode(inode->i_sb);
28715b04c689SPavel Emelyanov 	if (ret)
28725b04c689SPavel Emelyanov 		goto out;
28731da177e4SLinus Torvalds 
28741da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
2875078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2876d8c76e6fSDave Hansen 	inc_nlink(inode);
28777de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
28781da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
28791da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
28805b04c689SPavel Emelyanov out:
28815b04c689SPavel Emelyanov 	return ret;
28821da177e4SLinus Torvalds }
28831da177e4SLinus Torvalds 
28841da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
28851da177e4SLinus Torvalds {
288675c3cfa8SDavid Howells 	struct inode *inode = d_inode(dentry);
28871da177e4SLinus Torvalds 
28885b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
28895b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
28901da177e4SLinus Torvalds 
28911da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
2892078cd827SDeepa Dinamani 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
28939a53c3a7SDave Hansen 	drop_nlink(inode);
28941da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
28951da177e4SLinus Torvalds 	return 0;
28961da177e4SLinus Torvalds }
28971da177e4SLinus Torvalds 
28981da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
28991da177e4SLinus Torvalds {
29001da177e4SLinus Torvalds 	if (!simple_empty(dentry))
29011da177e4SLinus Torvalds 		return -ENOTEMPTY;
29021da177e4SLinus Torvalds 
290375c3cfa8SDavid Howells 	drop_nlink(d_inode(dentry));
29049a53c3a7SDave Hansen 	drop_nlink(dir);
29051da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
29061da177e4SLinus Torvalds }
29071da177e4SLinus Torvalds 
290837456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
290937456771SMiklos Szeredi {
2910e36cb0b8SDavid Howells 	bool old_is_dir = d_is_dir(old_dentry);
2911e36cb0b8SDavid Howells 	bool new_is_dir = d_is_dir(new_dentry);
291237456771SMiklos Szeredi 
291337456771SMiklos Szeredi 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
291437456771SMiklos Szeredi 		if (old_is_dir) {
291537456771SMiklos Szeredi 			drop_nlink(old_dir);
291637456771SMiklos Szeredi 			inc_nlink(new_dir);
291737456771SMiklos Szeredi 		} else {
291837456771SMiklos Szeredi 			drop_nlink(new_dir);
291937456771SMiklos Szeredi 			inc_nlink(old_dir);
292037456771SMiklos Szeredi 		}
292137456771SMiklos Szeredi 	}
292237456771SMiklos Szeredi 	old_dir->i_ctime = old_dir->i_mtime =
292337456771SMiklos Szeredi 	new_dir->i_ctime = new_dir->i_mtime =
292475c3cfa8SDavid Howells 	d_inode(old_dentry)->i_ctime =
2925078cd827SDeepa Dinamani 	d_inode(new_dentry)->i_ctime = current_time(old_dir);
292637456771SMiklos Szeredi 
292737456771SMiklos Szeredi 	return 0;
292837456771SMiklos Szeredi }
292937456771SMiklos Szeredi 
293046fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
293146fdb794SMiklos Szeredi {
293246fdb794SMiklos Szeredi 	struct dentry *whiteout;
293346fdb794SMiklos Szeredi 	int error;
293446fdb794SMiklos Szeredi 
293546fdb794SMiklos Szeredi 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
293646fdb794SMiklos Szeredi 	if (!whiteout)
293746fdb794SMiklos Szeredi 		return -ENOMEM;
293846fdb794SMiklos Szeredi 
293946fdb794SMiklos Szeredi 	error = shmem_mknod(old_dir, whiteout,
294046fdb794SMiklos Szeredi 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
294146fdb794SMiklos Szeredi 	dput(whiteout);
294246fdb794SMiklos Szeredi 	if (error)
294346fdb794SMiklos Szeredi 		return error;
294446fdb794SMiklos Szeredi 
294546fdb794SMiklos Szeredi 	/*
294646fdb794SMiklos Szeredi 	 * Cheat and hash the whiteout while the old dentry is still in
294746fdb794SMiklos Szeredi 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
294846fdb794SMiklos Szeredi 	 *
294946fdb794SMiklos Szeredi 	 * d_lookup() will consistently find one of them at this point,
295046fdb794SMiklos Szeredi 	 * not sure which one, but that isn't even important.
295146fdb794SMiklos Szeredi 	 */
295246fdb794SMiklos Szeredi 	d_rehash(whiteout);
295346fdb794SMiklos Szeredi 	return 0;
295446fdb794SMiklos Szeredi }
295546fdb794SMiklos Szeredi 
29561da177e4SLinus Torvalds /*
29571da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
29581da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
29591da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
29601da177e4SLinus Torvalds  * gets overwritten.
29611da177e4SLinus Torvalds  */
29623b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
29631da177e4SLinus Torvalds {
296475c3cfa8SDavid Howells 	struct inode *inode = d_inode(old_dentry);
29651da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
29661da177e4SLinus Torvalds 
296746fdb794SMiklos Szeredi 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
29683b69ff51SMiklos Szeredi 		return -EINVAL;
29693b69ff51SMiklos Szeredi 
297037456771SMiklos Szeredi 	if (flags & RENAME_EXCHANGE)
297137456771SMiklos Szeredi 		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
297237456771SMiklos Szeredi 
29731da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
29741da177e4SLinus Torvalds 		return -ENOTEMPTY;
29751da177e4SLinus Torvalds 
297646fdb794SMiklos Szeredi 	if (flags & RENAME_WHITEOUT) {
297746fdb794SMiklos Szeredi 		int error;
297846fdb794SMiklos Szeredi 
297946fdb794SMiklos Szeredi 		error = shmem_whiteout(old_dir, old_dentry);
298046fdb794SMiklos Szeredi 		if (error)
298146fdb794SMiklos Szeredi 			return error;
298246fdb794SMiklos Szeredi 	}
298346fdb794SMiklos Szeredi 
298475c3cfa8SDavid Howells 	if (d_really_is_positive(new_dentry)) {
29851da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
2986b928095bSMiklos Szeredi 		if (they_are_dirs) {
298775c3cfa8SDavid Howells 			drop_nlink(d_inode(new_dentry));
29889a53c3a7SDave Hansen 			drop_nlink(old_dir);
2989b928095bSMiklos Szeredi 		}
29901da177e4SLinus Torvalds 	} else if (they_are_dirs) {
29919a53c3a7SDave Hansen 		drop_nlink(old_dir);
2992d8c76e6fSDave Hansen 		inc_nlink(new_dir);
29931da177e4SLinus Torvalds 	}
29941da177e4SLinus Torvalds 
29951da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
29961da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
29971da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
29981da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
2999078cd827SDeepa Dinamani 	inode->i_ctime = current_time(old_dir);
30001da177e4SLinus Torvalds 	return 0;
30011da177e4SLinus Torvalds }
30021da177e4SLinus Torvalds 
30031da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
30041da177e4SLinus Torvalds {
30051da177e4SLinus Torvalds 	int error;
30061da177e4SLinus Torvalds 	int len;
30071da177e4SLinus Torvalds 	struct inode *inode;
30089276aad6SHugh Dickins 	struct page *page;
30091da177e4SLinus Torvalds 
30101da177e4SLinus Torvalds 	len = strlen(symname) + 1;
301109cbfeafSKirill A. Shutemov 	if (len > PAGE_SIZE)
30121da177e4SLinus Torvalds 		return -ENAMETOOLONG;
30131da177e4SLinus Torvalds 
30140825a6f9SJoe Perches 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
30150825a6f9SJoe Perches 				VM_NORESERVE);
30161da177e4SLinus Torvalds 	if (!inode)
30171da177e4SLinus Torvalds 		return -ENOSPC;
30181da177e4SLinus Torvalds 
30199d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
30206d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
3021570bc1c2SStephen Smalley 	if (error) {
3022570bc1c2SStephen Smalley 		if (error != -EOPNOTSUPP) {
3023570bc1c2SStephen Smalley 			iput(inode);
3024570bc1c2SStephen Smalley 			return error;
3025570bc1c2SStephen Smalley 		}
3026570bc1c2SStephen Smalley 		error = 0;
3027570bc1c2SStephen Smalley 	}
3028570bc1c2SStephen Smalley 
30291da177e4SLinus Torvalds 	inode->i_size = len-1;
303069f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
30313ed47db3SAl Viro 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
30323ed47db3SAl Viro 		if (!inode->i_link) {
303369f07ec9SHugh Dickins 			iput(inode);
303469f07ec9SHugh Dickins 			return -ENOMEM;
303569f07ec9SHugh Dickins 		}
303669f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
30371da177e4SLinus Torvalds 	} else {
3038e8ecde25SAl Viro 		inode_nohighmem(inode);
30399e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
30401da177e4SLinus Torvalds 		if (error) {
30411da177e4SLinus Torvalds 			iput(inode);
30421da177e4SLinus Torvalds 			return error;
30431da177e4SLinus Torvalds 		}
304414fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
30451da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
304621fc61c7SAl Viro 		memcpy(page_address(page), symname, len);
3047ec9516fbSHugh Dickins 		SetPageUptodate(page);
30481da177e4SLinus Torvalds 		set_page_dirty(page);
30496746aff7SWu Fengguang 		unlock_page(page);
305009cbfeafSKirill A. Shutemov 		put_page(page);
30511da177e4SLinus Torvalds 	}
30521da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
3053078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
30541da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
30551da177e4SLinus Torvalds 	dget(dentry);
30561da177e4SLinus Torvalds 	return 0;
30571da177e4SLinus Torvalds }
30581da177e4SLinus Torvalds 
3059fceef393SAl Viro static void shmem_put_link(void *arg)
3060fceef393SAl Viro {
3061fceef393SAl Viro 	mark_page_accessed(arg);
3062fceef393SAl Viro 	put_page(arg);
3063fceef393SAl Viro }
3064fceef393SAl Viro 
30656b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry,
3066fceef393SAl Viro 				  struct inode *inode,
3067fceef393SAl Viro 				  struct delayed_call *done)
30681da177e4SLinus Torvalds {
30691da177e4SLinus Torvalds 	struct page *page = NULL;
30706b255391SAl Viro 	int error;
30716a6c9904SAl Viro 	if (!dentry) {
30726a6c9904SAl Viro 		page = find_get_page(inode->i_mapping, 0);
30736a6c9904SAl Viro 		if (!page)
30746b255391SAl Viro 			return ERR_PTR(-ECHILD);
30756a6c9904SAl Viro 		if (!PageUptodate(page)) {
30766a6c9904SAl Viro 			put_page(page);
30776a6c9904SAl Viro 			return ERR_PTR(-ECHILD);
30786a6c9904SAl Viro 		}
30796a6c9904SAl Viro 	} else {
30809e18eb29SAndres Lagar-Cavilla 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3081680baacbSAl Viro 		if (error)
3082680baacbSAl Viro 			return ERR_PTR(error);
3083d3602444SHugh Dickins 		unlock_page(page);
30841da177e4SLinus Torvalds 	}
3085fceef393SAl Viro 	set_delayed_call(done, shmem_put_link, page);
308621fc61c7SAl Viro 	return page_address(page);
30871da177e4SLinus Torvalds }
30881da177e4SLinus Torvalds 
3089b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3090b09e0fa4SEric Paris /*
3091b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
3092b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
3093b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
3094b09e0fa4SEric Paris  * filesystem level, though.
3095b09e0fa4SEric Paris  */
3096b09e0fa4SEric Paris 
30976d9d88d0SJarkko Sakkinen /*
30986d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
30996d9d88d0SJarkko Sakkinen  */
31006d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
31016d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
31026d9d88d0SJarkko Sakkinen 			    void *fs_info)
31036d9d88d0SJarkko Sakkinen {
31046d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
31056d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
310638f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
31076d9d88d0SJarkko Sakkinen 	size_t len;
31086d9d88d0SJarkko Sakkinen 
31096d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
311038f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
31116d9d88d0SJarkko Sakkinen 		if (!new_xattr)
31126d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31136d9d88d0SJarkko Sakkinen 
31146d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
31156d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
31166d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
31176d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
31186d9d88d0SJarkko Sakkinen 			kfree(new_xattr);
31196d9d88d0SJarkko Sakkinen 			return -ENOMEM;
31206d9d88d0SJarkko Sakkinen 		}
31216d9d88d0SJarkko Sakkinen 
31226d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
31236d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
31246d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
31256d9d88d0SJarkko Sakkinen 		       xattr->name, len);
31266d9d88d0SJarkko Sakkinen 
312738f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
31286d9d88d0SJarkko Sakkinen 	}
31296d9d88d0SJarkko Sakkinen 
31306d9d88d0SJarkko Sakkinen 	return 0;
31316d9d88d0SJarkko Sakkinen }
31326d9d88d0SJarkko Sakkinen 
3133aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3134b296821aSAl Viro 				   struct dentry *unused, struct inode *inode,
3135b296821aSAl Viro 				   const char *name, void *buffer, size_t size)
3136aa7c5241SAndreas Gruenbacher {
3137b296821aSAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3138aa7c5241SAndreas Gruenbacher 
3139aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3140aa7c5241SAndreas Gruenbacher 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3141aa7c5241SAndreas Gruenbacher }
3142aa7c5241SAndreas Gruenbacher 
3143aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler,
314459301226SAl Viro 				   struct dentry *unused, struct inode *inode,
314559301226SAl Viro 				   const char *name, const void *value,
314659301226SAl Viro 				   size_t size, int flags)
3147aa7c5241SAndreas Gruenbacher {
314859301226SAl Viro 	struct shmem_inode_info *info = SHMEM_I(inode);
3149aa7c5241SAndreas Gruenbacher 
3150aa7c5241SAndreas Gruenbacher 	name = xattr_full_name(handler, name);
3151aa7c5241SAndreas Gruenbacher 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
3152aa7c5241SAndreas Gruenbacher }
3153aa7c5241SAndreas Gruenbacher 
3154aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = {
3155aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_SECURITY_PREFIX,
3156aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3157aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3158aa7c5241SAndreas Gruenbacher };
3159aa7c5241SAndreas Gruenbacher 
3160aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = {
3161aa7c5241SAndreas Gruenbacher 	.prefix = XATTR_TRUSTED_PREFIX,
3162aa7c5241SAndreas Gruenbacher 	.get = shmem_xattr_handler_get,
3163aa7c5241SAndreas Gruenbacher 	.set = shmem_xattr_handler_set,
3164aa7c5241SAndreas Gruenbacher };
3165aa7c5241SAndreas Gruenbacher 
3166b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
3167b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
3168feda821eSChristoph Hellwig 	&posix_acl_access_xattr_handler,
3169feda821eSChristoph Hellwig 	&posix_acl_default_xattr_handler,
3170b09e0fa4SEric Paris #endif
3171aa7c5241SAndreas Gruenbacher 	&shmem_security_xattr_handler,
3172aa7c5241SAndreas Gruenbacher 	&shmem_trusted_xattr_handler,
3173b09e0fa4SEric Paris 	NULL
3174b09e0fa4SEric Paris };
3175b09e0fa4SEric Paris 
3176b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3177b09e0fa4SEric Paris {
317875c3cfa8SDavid Howells 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3179786534b9SAndreas Gruenbacher 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3180b09e0fa4SEric Paris }
3181b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
3182b09e0fa4SEric Paris 
318369f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
31846b255391SAl Viro 	.get_link	= simple_get_link,
3185b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3186b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3187b09e0fa4SEric Paris #endif
31881da177e4SLinus Torvalds };
31891da177e4SLinus Torvalds 
319092e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
31916b255391SAl Viro 	.get_link	= shmem_get_link,
3192b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3193b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
319439f0247dSAndreas Gruenbacher #endif
3195b09e0fa4SEric Paris };
319639f0247dSAndreas Gruenbacher 
319791828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
319891828a40SDavid M. Grimes {
319991828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
320091828a40SDavid M. Grimes }
320191828a40SDavid M. Grimes 
320291828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
320391828a40SDavid M. Grimes {
320491828a40SDavid M. Grimes 	__u32 *fh = vfh;
320591828a40SDavid M. Grimes 	__u64 inum = fh[2];
320691828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
320791828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
320891828a40SDavid M. Grimes }
320991828a40SDavid M. Grimes 
321012ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */
321112ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode)
321212ba780dSAmir Goldstein {
321312ba780dSAmir Goldstein 	struct dentry *alias = d_find_alias(inode);
321412ba780dSAmir Goldstein 
321512ba780dSAmir Goldstein 	return alias ?: d_find_any_alias(inode);
321612ba780dSAmir Goldstein }
321712ba780dSAmir Goldstein 
321812ba780dSAmir Goldstein 
3219480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3220480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
322191828a40SDavid M. Grimes {
322291828a40SDavid M. Grimes 	struct inode *inode;
3223480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
322435c2a7f4SHugh Dickins 	u64 inum;
322591828a40SDavid M. Grimes 
3226480b116cSChristoph Hellwig 	if (fh_len < 3)
3227480b116cSChristoph Hellwig 		return NULL;
3228480b116cSChristoph Hellwig 
322935c2a7f4SHugh Dickins 	inum = fid->raw[2];
323035c2a7f4SHugh Dickins 	inum = (inum << 32) | fid->raw[1];
323135c2a7f4SHugh Dickins 
3232480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3233480b116cSChristoph Hellwig 			shmem_match, fid->raw);
323491828a40SDavid M. Grimes 	if (inode) {
323512ba780dSAmir Goldstein 		dentry = shmem_find_alias(inode);
323691828a40SDavid M. Grimes 		iput(inode);
323791828a40SDavid M. Grimes 	}
323891828a40SDavid M. Grimes 
3239480b116cSChristoph Hellwig 	return dentry;
324091828a40SDavid M. Grimes }
324191828a40SDavid M. Grimes 
3242b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3243b0b0382bSAl Viro 				struct inode *parent)
324491828a40SDavid M. Grimes {
32455fe0c237SAneesh Kumar K.V 	if (*len < 3) {
32465fe0c237SAneesh Kumar K.V 		*len = 3;
324794e07a75SNamjae Jeon 		return FILEID_INVALID;
32485fe0c237SAneesh Kumar K.V 	}
324991828a40SDavid M. Grimes 
32501d3382cbSAl Viro 	if (inode_unhashed(inode)) {
325191828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
325291828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
325391828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
325491828a40SDavid M. Grimes 		 * to do it once
325591828a40SDavid M. Grimes 		 */
325691828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
325791828a40SDavid M. Grimes 		spin_lock(&lock);
32581d3382cbSAl Viro 		if (inode_unhashed(inode))
325991828a40SDavid M. Grimes 			__insert_inode_hash(inode,
326091828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
326191828a40SDavid M. Grimes 		spin_unlock(&lock);
326291828a40SDavid M. Grimes 	}
326391828a40SDavid M. Grimes 
326491828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
326591828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
326691828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
326791828a40SDavid M. Grimes 
326891828a40SDavid M. Grimes 	*len = 3;
326991828a40SDavid M. Grimes 	return 1;
327091828a40SDavid M. Grimes }
327191828a40SDavid M. Grimes 
327239655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
327391828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
327491828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
3275480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
327691828a40SDavid M. Grimes };
327791828a40SDavid M. Grimes 
3278680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3279680d794bSakpm@linux-foundation.org 			       bool remount)
32801da177e4SLinus Torvalds {
32811da177e4SLinus Torvalds 	char *this_char, *value, *rest;
328249cd0a5cSGreg Thelen 	struct mempolicy *mpol = NULL;
32838751e039SEric W. Biederman 	uid_t uid;
32848751e039SEric W. Biederman 	gid_t gid;
32851da177e4SLinus Torvalds 
3286b00dc3adSHugh Dickins 	while (options != NULL) {
3287b00dc3adSHugh Dickins 		this_char = options;
3288b00dc3adSHugh Dickins 		for (;;) {
3289b00dc3adSHugh Dickins 			/*
3290b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
3291b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
3292b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
3293b00dc3adSHugh Dickins 			 */
3294b00dc3adSHugh Dickins 			options = strchr(options, ',');
3295b00dc3adSHugh Dickins 			if (options == NULL)
3296b00dc3adSHugh Dickins 				break;
3297b00dc3adSHugh Dickins 			options++;
3298b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
3299b00dc3adSHugh Dickins 				options[-1] = '\0';
3300b00dc3adSHugh Dickins 				break;
3301b00dc3adSHugh Dickins 			}
3302b00dc3adSHugh Dickins 		}
33031da177e4SLinus Torvalds 		if (!*this_char)
33041da177e4SLinus Torvalds 			continue;
33051da177e4SLinus Torvalds 		if ((value = strchr(this_char,'=')) != NULL) {
33061da177e4SLinus Torvalds 			*value++ = 0;
33071da177e4SLinus Torvalds 		} else {
33081170532bSJoe Perches 			pr_err("tmpfs: No value for mount option '%s'\n",
33091da177e4SLinus Torvalds 			       this_char);
331049cd0a5cSGreg Thelen 			goto error;
33111da177e4SLinus Torvalds 		}
33121da177e4SLinus Torvalds 
33131da177e4SLinus Torvalds 		if (!strcmp(this_char,"size")) {
33141da177e4SLinus Torvalds 			unsigned long long size;
33151da177e4SLinus Torvalds 			size = memparse(value,&rest);
33161da177e4SLinus Torvalds 			if (*rest == '%') {
33171da177e4SLinus Torvalds 				size <<= PAGE_SHIFT;
33181da177e4SLinus Torvalds 				size *= totalram_pages;
33191da177e4SLinus Torvalds 				do_div(size, 100);
33201da177e4SLinus Torvalds 				rest++;
33211da177e4SLinus Torvalds 			}
33221da177e4SLinus Torvalds 			if (*rest)
33231da177e4SLinus Torvalds 				goto bad_val;
3324680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks =
332509cbfeafSKirill A. Shutemov 				DIV_ROUND_UP(size, PAGE_SIZE);
33261da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"nr_blocks")) {
3327680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks = memparse(value, &rest);
33281da177e4SLinus Torvalds 			if (*rest)
33291da177e4SLinus Torvalds 				goto bad_val;
33301da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"nr_inodes")) {
3331680d794bSakpm@linux-foundation.org 			sbinfo->max_inodes = memparse(value, &rest);
33321da177e4SLinus Torvalds 			if (*rest)
33331da177e4SLinus Torvalds 				goto bad_val;
33341da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"mode")) {
3335680d794bSakpm@linux-foundation.org 			if (remount)
33361da177e4SLinus Torvalds 				continue;
3337680d794bSakpm@linux-foundation.org 			sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
33381da177e4SLinus Torvalds 			if (*rest)
33391da177e4SLinus Torvalds 				goto bad_val;
33401da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"uid")) {
3341680d794bSakpm@linux-foundation.org 			if (remount)
33421da177e4SLinus Torvalds 				continue;
33438751e039SEric W. Biederman 			uid = simple_strtoul(value, &rest, 0);
33441da177e4SLinus Torvalds 			if (*rest)
33451da177e4SLinus Torvalds 				goto bad_val;
33468751e039SEric W. Biederman 			sbinfo->uid = make_kuid(current_user_ns(), uid);
33478751e039SEric W. Biederman 			if (!uid_valid(sbinfo->uid))
33488751e039SEric W. Biederman 				goto bad_val;
33491da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"gid")) {
3350680d794bSakpm@linux-foundation.org 			if (remount)
33511da177e4SLinus Torvalds 				continue;
33528751e039SEric W. Biederman 			gid = simple_strtoul(value, &rest, 0);
33531da177e4SLinus Torvalds 			if (*rest)
33541da177e4SLinus Torvalds 				goto bad_val;
33558751e039SEric W. Biederman 			sbinfo->gid = make_kgid(current_user_ns(), gid);
33568751e039SEric W. Biederman 			if (!gid_valid(sbinfo->gid))
33578751e039SEric W. Biederman 				goto bad_val;
3358e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
33595a6e75f8SKirill A. Shutemov 		} else if (!strcmp(this_char, "huge")) {
33605a6e75f8SKirill A. Shutemov 			int huge;
33615a6e75f8SKirill A. Shutemov 			huge = shmem_parse_huge(value);
33625a6e75f8SKirill A. Shutemov 			if (huge < 0)
33635a6e75f8SKirill A. Shutemov 				goto bad_val;
33645a6e75f8SKirill A. Shutemov 			if (!has_transparent_hugepage() &&
33655a6e75f8SKirill A. Shutemov 					huge != SHMEM_HUGE_NEVER)
33665a6e75f8SKirill A. Shutemov 				goto bad_val;
33675a6e75f8SKirill A. Shutemov 			sbinfo->huge = huge;
33685a6e75f8SKirill A. Shutemov #endif
33695a6e75f8SKirill A. Shutemov #ifdef CONFIG_NUMA
33707339ff83SRobin Holt 		} else if (!strcmp(this_char,"mpol")) {
337149cd0a5cSGreg Thelen 			mpol_put(mpol);
337249cd0a5cSGreg Thelen 			mpol = NULL;
337349cd0a5cSGreg Thelen 			if (mpol_parse_str(value, &mpol))
33747339ff83SRobin Holt 				goto bad_val;
33755a6e75f8SKirill A. Shutemov #endif
33761da177e4SLinus Torvalds 		} else {
33771170532bSJoe Perches 			pr_err("tmpfs: Bad mount option %s\n", this_char);
337849cd0a5cSGreg Thelen 			goto error;
33791da177e4SLinus Torvalds 		}
33801da177e4SLinus Torvalds 	}
338149cd0a5cSGreg Thelen 	sbinfo->mpol = mpol;
33821da177e4SLinus Torvalds 	return 0;
33831da177e4SLinus Torvalds 
33841da177e4SLinus Torvalds bad_val:
33851170532bSJoe Perches 	pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
33861da177e4SLinus Torvalds 	       value, this_char);
338749cd0a5cSGreg Thelen error:
338849cd0a5cSGreg Thelen 	mpol_put(mpol);
33891da177e4SLinus Torvalds 	return 1;
33901da177e4SLinus Torvalds 
33911da177e4SLinus Torvalds }
33921da177e4SLinus Torvalds 
33931da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
33941da177e4SLinus Torvalds {
33951da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3396680d794bSakpm@linux-foundation.org 	struct shmem_sb_info config = *sbinfo;
33970edd73b3SHugh Dickins 	unsigned long inodes;
33980edd73b3SHugh Dickins 	int error = -EINVAL;
33991da177e4SLinus Torvalds 
34005f00110fSGreg Thelen 	config.mpol = NULL;
3401680d794bSakpm@linux-foundation.org 	if (shmem_parse_options(data, &config, true))
34020edd73b3SHugh Dickins 		return error;
34030edd73b3SHugh Dickins 
34040edd73b3SHugh Dickins 	spin_lock(&sbinfo->stat_lock);
34050edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
34067e496299STim Chen 	if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
34070edd73b3SHugh Dickins 		goto out;
3408680d794bSakpm@linux-foundation.org 	if (config.max_inodes < inodes)
34090edd73b3SHugh Dickins 		goto out;
34100edd73b3SHugh Dickins 	/*
341154af6042SHugh Dickins 	 * Those tests disallow limited->unlimited while any are in use;
34120edd73b3SHugh Dickins 	 * but we must separately disallow unlimited->limited, because
34130edd73b3SHugh Dickins 	 * in that case we have no record of how much is already in use.
34140edd73b3SHugh Dickins 	 */
3415680d794bSakpm@linux-foundation.org 	if (config.max_blocks && !sbinfo->max_blocks)
34160edd73b3SHugh Dickins 		goto out;
3417680d794bSakpm@linux-foundation.org 	if (config.max_inodes && !sbinfo->max_inodes)
34180edd73b3SHugh Dickins 		goto out;
34190edd73b3SHugh Dickins 
34200edd73b3SHugh Dickins 	error = 0;
34215a6e75f8SKirill A. Shutemov 	sbinfo->huge = config.huge;
3422680d794bSakpm@linux-foundation.org 	sbinfo->max_blocks  = config.max_blocks;
3423680d794bSakpm@linux-foundation.org 	sbinfo->max_inodes  = config.max_inodes;
3424680d794bSakpm@linux-foundation.org 	sbinfo->free_inodes = config.max_inodes - inodes;
342571fe804bSLee Schermerhorn 
34265f00110fSGreg Thelen 	/*
34275f00110fSGreg Thelen 	 * Preserve previous mempolicy unless mpol remount option was specified.
34285f00110fSGreg Thelen 	 */
34295f00110fSGreg Thelen 	if (config.mpol) {
343071fe804bSLee Schermerhorn 		mpol_put(sbinfo->mpol);
343171fe804bSLee Schermerhorn 		sbinfo->mpol = config.mpol;	/* transfers initial ref */
34325f00110fSGreg Thelen 	}
34330edd73b3SHugh Dickins out:
34340edd73b3SHugh Dickins 	spin_unlock(&sbinfo->stat_lock);
34350edd73b3SHugh Dickins 	return error;
34361da177e4SLinus Torvalds }
3437680d794bSakpm@linux-foundation.org 
343834c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3439680d794bSakpm@linux-foundation.org {
344034c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3441680d794bSakpm@linux-foundation.org 
3442680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3443680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
344409cbfeafSKirill A. Shutemov 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3445680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3446680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
34470825a6f9SJoe Perches 	if (sbinfo->mode != (0777 | S_ISVTX))
344809208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
34498751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
34508751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
34518751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
34528751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
34538751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
34548751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3455e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
34565a6e75f8SKirill A. Shutemov 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
34575a6e75f8SKirill A. Shutemov 	if (sbinfo->huge)
34585a6e75f8SKirill A. Shutemov 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
34595a6e75f8SKirill A. Shutemov #endif
346071fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
3461680d794bSakpm@linux-foundation.org 	return 0;
3462680d794bSakpm@linux-foundation.org }
34639183df25SDavid Herrmann 
3464680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
34651da177e4SLinus Torvalds 
34661da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
34671da177e4SLinus Torvalds {
3468602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3469602586a8SHugh Dickins 
3470602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
347149cd0a5cSGreg Thelen 	mpol_put(sbinfo->mpol);
3472602586a8SHugh Dickins 	kfree(sbinfo);
34731da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
34741da177e4SLinus Torvalds }
34751da177e4SLinus Torvalds 
34762b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent)
34771da177e4SLinus Torvalds {
34781da177e4SLinus Torvalds 	struct inode *inode;
34790edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
3480680d794bSakpm@linux-foundation.org 	int err = -ENOMEM;
3481680d794bSakpm@linux-foundation.org 
3482680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3483425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3484680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
3485680d794bSakpm@linux-foundation.org 	if (!sbinfo)
3486680d794bSakpm@linux-foundation.org 		return -ENOMEM;
3487680d794bSakpm@linux-foundation.org 
34880825a6f9SJoe Perches 	sbinfo->mode = 0777 | S_ISVTX;
348976aac0e9SDavid Howells 	sbinfo->uid = current_fsuid();
349076aac0e9SDavid Howells 	sbinfo->gid = current_fsgid();
3491680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
34921da177e4SLinus Torvalds 
34930edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
34941da177e4SLinus Torvalds 	/*
34951da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
34961da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
34971da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
34981da177e4SLinus Torvalds 	 */
34991751e8a6SLinus Torvalds 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3500680d794bSakpm@linux-foundation.org 		sbinfo->max_blocks = shmem_default_max_blocks();
3501680d794bSakpm@linux-foundation.org 		sbinfo->max_inodes = shmem_default_max_inodes();
3502680d794bSakpm@linux-foundation.org 		if (shmem_parse_options(data, sbinfo, false)) {
3503680d794bSakpm@linux-foundation.org 			err = -EINVAL;
3504680d794bSakpm@linux-foundation.org 			goto failed;
3505680d794bSakpm@linux-foundation.org 		}
3506ca4e0519SAl Viro 	} else {
35071751e8a6SLinus Torvalds 		sb->s_flags |= SB_NOUSER;
35081da177e4SLinus Torvalds 	}
350991828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
35101751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOSEC;
35110edd73b3SHugh Dickins #else
35121751e8a6SLinus Torvalds 	sb->s_flags |= SB_NOUSER;
35130edd73b3SHugh Dickins #endif
35141da177e4SLinus Torvalds 
35151da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
3516908c7f19STejun Heo 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3517602586a8SHugh Dickins 		goto failed;
3518680d794bSakpm@linux-foundation.org 	sbinfo->free_inodes = sbinfo->max_inodes;
3519779750d2SKirill A. Shutemov 	spin_lock_init(&sbinfo->shrinklist_lock);
3520779750d2SKirill A. Shutemov 	INIT_LIST_HEAD(&sbinfo->shrinklist);
35211da177e4SLinus Torvalds 
3522285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
352309cbfeafSKirill A. Shutemov 	sb->s_blocksize = PAGE_SIZE;
352409cbfeafSKirill A. Shutemov 	sb->s_blocksize_bits = PAGE_SHIFT;
35251da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
35261da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
3527cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
3528b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
352939f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
3530b09e0fa4SEric Paris #endif
3531b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
35321751e8a6SLinus Torvalds 	sb->s_flags |= SB_POSIXACL;
353339f0247dSAndreas Gruenbacher #endif
35342b4db796SAmir Goldstein 	uuid_gen(&sb->s_uuid);
35350edd73b3SHugh Dickins 
3536454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
35371da177e4SLinus Torvalds 	if (!inode)
35381da177e4SLinus Torvalds 		goto failed;
3539680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
3540680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
3541318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
3542318ceed0SAl Viro 	if (!sb->s_root)
354348fde701SAl Viro 		goto failed;
35441da177e4SLinus Torvalds 	return 0;
35451da177e4SLinus Torvalds 
35461da177e4SLinus Torvalds failed:
35471da177e4SLinus Torvalds 	shmem_put_super(sb);
35481da177e4SLinus Torvalds 	return err;
35491da177e4SLinus Torvalds }
35501da177e4SLinus Torvalds 
3551fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
35521da177e4SLinus Torvalds 
35531da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
35541da177e4SLinus Torvalds {
355541ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
355641ffe5d5SHugh Dickins 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
355741ffe5d5SHugh Dickins 	if (!info)
35581da177e4SLinus Torvalds 		return NULL;
355941ffe5d5SHugh Dickins 	return &info->vfs_inode;
35601da177e4SLinus Torvalds }
35611da177e4SLinus Torvalds 
356241ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head)
3563fa0d7e3dSNick Piggin {
3564fa0d7e3dSNick Piggin 	struct inode *inode = container_of(head, struct inode, i_rcu);
356584e710daSAl Viro 	if (S_ISLNK(inode->i_mode))
35663ed47db3SAl Viro 		kfree(inode->i_link);
3567fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3568fa0d7e3dSNick Piggin }
3569fa0d7e3dSNick Piggin 
35701da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
35711da177e4SLinus Torvalds {
357209208d15SAl Viro 	if (S_ISREG(inode->i_mode))
35731da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
357441ffe5d5SHugh Dickins 	call_rcu(&inode->i_rcu, shmem_destroy_callback);
35751da177e4SLinus Torvalds }
35761da177e4SLinus Torvalds 
357741ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
35781da177e4SLinus Torvalds {
357941ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
358041ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
35811da177e4SLinus Torvalds }
35821da177e4SLinus Torvalds 
35839a8ec03eSweiping zhang static void shmem_init_inodecache(void)
35841da177e4SLinus Torvalds {
35851da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
35861da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
35875d097056SVladimir Davydov 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
35881da177e4SLinus Torvalds }
35891da177e4SLinus Torvalds 
359041ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
35911da177e4SLinus Torvalds {
35921a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
35931da177e4SLinus Torvalds }
35941da177e4SLinus Torvalds 
3595f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = {
35961da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
359776719325SKen Chen 	.set_page_dirty	= __set_page_dirty_no_writeback,
35981da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3599800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
3600800d15a5SNick Piggin 	.write_end	= shmem_write_end,
36011da177e4SLinus Torvalds #endif
36021c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
3603304dbdb7SLee Schermerhorn 	.migratepage	= migrate_page,
36041c93923cSAndrew Morton #endif
3605aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
36061da177e4SLinus Torvalds };
36071da177e4SLinus Torvalds 
360815ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
36091da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
3610c01d5b30SHugh Dickins 	.get_unmapped_area = shmem_get_unmapped_area,
36111da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
3612220f2ac9SHugh Dickins 	.llseek		= shmem_file_llseek,
36132ba5bbedSAl Viro 	.read_iter	= shmem_file_read_iter,
36148174202bSAl Viro 	.write_iter	= generic_file_write_iter,
36151b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
361682c156f8SAl Viro 	.splice_read	= generic_file_splice_read,
3617f6cb85d0SAl Viro 	.splice_write	= iter_file_splice_write,
361883e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
36191da177e4SLinus Torvalds #endif
36201da177e4SLinus Torvalds };
36211da177e4SLinus Torvalds 
362292e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
362344a30220SYu Zhao 	.getattr	= shmem_getattr,
362494c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3625b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3626b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3627feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
3628b09e0fa4SEric Paris #endif
36291da177e4SLinus Torvalds };
36301da177e4SLinus Torvalds 
363192e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
36321da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
36331da177e4SLinus Torvalds 	.create		= shmem_create,
36341da177e4SLinus Torvalds 	.lookup		= simple_lookup,
36351da177e4SLinus Torvalds 	.link		= shmem_link,
36361da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
36371da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
36381da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
36391da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
36401da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
36412773bf00SMiklos Szeredi 	.rename		= shmem_rename2,
364260545d0dSAl Viro 	.tmpfile	= shmem_tmpfile,
36431da177e4SLinus Torvalds #endif
3644b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3645b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3646b09e0fa4SEric Paris #endif
364739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
364894c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3649feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
365039f0247dSAndreas Gruenbacher #endif
365139f0247dSAndreas Gruenbacher };
365239f0247dSAndreas Gruenbacher 
365392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
3654b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
3655b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
3656b09e0fa4SEric Paris #endif
365739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
365894c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
3659feda821eSChristoph Hellwig 	.set_acl	= simple_set_acl,
366039f0247dSAndreas Gruenbacher #endif
36611da177e4SLinus Torvalds };
36621da177e4SLinus Torvalds 
3663759b9775SHugh Dickins static const struct super_operations shmem_ops = {
36641da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
36651da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
36661da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
36671da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
36681da177e4SLinus Torvalds 	.remount_fs	= shmem_remount_fs,
3669680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
36701da177e4SLinus Torvalds #endif
36711f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
36721da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
36731da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
3674779750d2SKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3675779750d2SKirill A. Shutemov 	.nr_cached_objects	= shmem_unused_huge_count,
3676779750d2SKirill A. Shutemov 	.free_cached_objects	= shmem_unused_huge_scan,
3677779750d2SKirill A. Shutemov #endif
36781da177e4SLinus Torvalds };
36791da177e4SLinus Torvalds 
3680f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
368154cb8821SNick Piggin 	.fault		= shmem_fault,
3682d7c17551SNing Qu 	.map_pages	= filemap_map_pages,
36831da177e4SLinus Torvalds #ifdef CONFIG_NUMA
36841da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
36851da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
36861da177e4SLinus Torvalds #endif
36871da177e4SLinus Torvalds };
36881da177e4SLinus Torvalds 
36893c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type,
36903c26ff6eSAl Viro 	int flags, const char *dev_name, void *data)
36911da177e4SLinus Torvalds {
36923c26ff6eSAl Viro 	return mount_nodev(fs_type, flags, data, shmem_fill_super);
36931da177e4SLinus Torvalds }
36941da177e4SLinus Torvalds 
369541ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
36961da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
36971da177e4SLinus Torvalds 	.name		= "tmpfs",
36983c26ff6eSAl Viro 	.mount		= shmem_mount,
36991da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
37002b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
37011da177e4SLinus Torvalds };
37021da177e4SLinus Torvalds 
370341ffe5d5SHugh Dickins int __init shmem_init(void)
37041da177e4SLinus Torvalds {
37051da177e4SLinus Torvalds 	int error;
37061da177e4SLinus Torvalds 
370716203a7aSRob Landley 	/* If rootfs called this, don't re-init */
370816203a7aSRob Landley 	if (shmem_inode_cachep)
370916203a7aSRob Landley 		return 0;
371016203a7aSRob Landley 
37119a8ec03eSweiping zhang 	shmem_init_inodecache();
37121da177e4SLinus Torvalds 
371341ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
37141da177e4SLinus Torvalds 	if (error) {
37151170532bSJoe Perches 		pr_err("Could not register tmpfs\n");
37161da177e4SLinus Torvalds 		goto out2;
37171da177e4SLinus Torvalds 	}
371895dc112aSGreg Kroah-Hartman 
3719ca4e0519SAl Viro 	shm_mnt = kern_mount(&shmem_fs_type);
37201da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
37211da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
37221170532bSJoe Perches 		pr_err("Could not kern_mount tmpfs\n");
37231da177e4SLinus Torvalds 		goto out1;
37241da177e4SLinus Torvalds 	}
37255a6e75f8SKirill A. Shutemov 
3726e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3727435c0b87SKirill A. Shutemov 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
37285a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
37295a6e75f8SKirill A. Shutemov 	else
37305a6e75f8SKirill A. Shutemov 		shmem_huge = 0; /* just in case it was patched */
37315a6e75f8SKirill A. Shutemov #endif
37321da177e4SLinus Torvalds 	return 0;
37331da177e4SLinus Torvalds 
37341da177e4SLinus Torvalds out1:
373541ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
37361da177e4SLinus Torvalds out2:
373741ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
37381da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
37391da177e4SLinus Torvalds 	return error;
37401da177e4SLinus Torvalds }
3741853ac43aSMatt Mackall 
3742e496cf3dSKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
37435a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj,
37445a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, char *buf)
37455a6e75f8SKirill A. Shutemov {
37465a6e75f8SKirill A. Shutemov 	int values[] = {
37475a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ALWAYS,
37485a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_WITHIN_SIZE,
37495a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_ADVISE,
37505a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_NEVER,
37515a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_DENY,
37525a6e75f8SKirill A. Shutemov 		SHMEM_HUGE_FORCE,
37535a6e75f8SKirill A. Shutemov 	};
37545a6e75f8SKirill A. Shutemov 	int i, count;
37555a6e75f8SKirill A. Shutemov 
37565a6e75f8SKirill A. Shutemov 	for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
37575a6e75f8SKirill A. Shutemov 		const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
37585a6e75f8SKirill A. Shutemov 
37595a6e75f8SKirill A. Shutemov 		count += sprintf(buf + count, fmt,
37605a6e75f8SKirill A. Shutemov 				shmem_format_huge(values[i]));
37615a6e75f8SKirill A. Shutemov 	}
37625a6e75f8SKirill A. Shutemov 	buf[count - 1] = '\n';
37635a6e75f8SKirill A. Shutemov 	return count;
37645a6e75f8SKirill A. Shutemov }
37655a6e75f8SKirill A. Shutemov 
37665a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj,
37675a6e75f8SKirill A. Shutemov 		struct kobj_attribute *attr, const char *buf, size_t count)
37685a6e75f8SKirill A. Shutemov {
37695a6e75f8SKirill A. Shutemov 	char tmp[16];
37705a6e75f8SKirill A. Shutemov 	int huge;
37715a6e75f8SKirill A. Shutemov 
37725a6e75f8SKirill A. Shutemov 	if (count + 1 > sizeof(tmp))
37735a6e75f8SKirill A. Shutemov 		return -EINVAL;
37745a6e75f8SKirill A. Shutemov 	memcpy(tmp, buf, count);
37755a6e75f8SKirill A. Shutemov 	tmp[count] = '\0';
37765a6e75f8SKirill A. Shutemov 	if (count && tmp[count - 1] == '\n')
37775a6e75f8SKirill A. Shutemov 		tmp[count - 1] = '\0';
37785a6e75f8SKirill A. Shutemov 
37795a6e75f8SKirill A. Shutemov 	huge = shmem_parse_huge(tmp);
37805a6e75f8SKirill A. Shutemov 	if (huge == -EINVAL)
37815a6e75f8SKirill A. Shutemov 		return -EINVAL;
37825a6e75f8SKirill A. Shutemov 	if (!has_transparent_hugepage() &&
37835a6e75f8SKirill A. Shutemov 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
37845a6e75f8SKirill A. Shutemov 		return -EINVAL;
37855a6e75f8SKirill A. Shutemov 
37865a6e75f8SKirill A. Shutemov 	shmem_huge = huge;
3787435c0b87SKirill A. Shutemov 	if (shmem_huge > SHMEM_HUGE_DENY)
37885a6e75f8SKirill A. Shutemov 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
37895a6e75f8SKirill A. Shutemov 	return count;
37905a6e75f8SKirill A. Shutemov }
37915a6e75f8SKirill A. Shutemov 
37925a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr =
37935a6e75f8SKirill A. Shutemov 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
37943b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3795f3f0e1d2SKirill A. Shutemov 
37963b33719cSArnd Bergmann #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3797f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma)
3798f3f0e1d2SKirill A. Shutemov {
3799f3f0e1d2SKirill A. Shutemov 	struct inode *inode = file_inode(vma->vm_file);
3800f3f0e1d2SKirill A. Shutemov 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3801f3f0e1d2SKirill A. Shutemov 	loff_t i_size;
3802f3f0e1d2SKirill A. Shutemov 	pgoff_t off;
3803f3f0e1d2SKirill A. Shutemov 
3804f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_FORCE)
3805f3f0e1d2SKirill A. Shutemov 		return true;
3806f3f0e1d2SKirill A. Shutemov 	if (shmem_huge == SHMEM_HUGE_DENY)
3807f3f0e1d2SKirill A. Shutemov 		return false;
3808f3f0e1d2SKirill A. Shutemov 	switch (sbinfo->huge) {
3809f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_NEVER:
3810f3f0e1d2SKirill A. Shutemov 			return false;
3811f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ALWAYS:
3812f3f0e1d2SKirill A. Shutemov 			return true;
3813f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_WITHIN_SIZE:
3814f3f0e1d2SKirill A. Shutemov 			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
3815f3f0e1d2SKirill A. Shutemov 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
3816f3f0e1d2SKirill A. Shutemov 			if (i_size >= HPAGE_PMD_SIZE &&
3817f3f0e1d2SKirill A. Shutemov 					i_size >> PAGE_SHIFT >= off)
3818f3f0e1d2SKirill A. Shutemov 				return true;
3819c8402871SGustavo A. R. Silva 			/* fall through */
3820f3f0e1d2SKirill A. Shutemov 		case SHMEM_HUGE_ADVISE:
3821f3f0e1d2SKirill A. Shutemov 			/* TODO: implement fadvise() hints */
3822f3f0e1d2SKirill A. Shutemov 			return (vma->vm_flags & VM_HUGEPAGE);
3823f3f0e1d2SKirill A. Shutemov 		default:
3824f3f0e1d2SKirill A. Shutemov 			VM_BUG_ON(1);
3825f3f0e1d2SKirill A. Shutemov 			return false;
3826f3f0e1d2SKirill A. Shutemov 	}
3827f3f0e1d2SKirill A. Shutemov }
38283b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
38295a6e75f8SKirill A. Shutemov 
3830853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
3831853ac43aSMatt Mackall 
3832853ac43aSMatt Mackall /*
3833853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3834853ac43aSMatt Mackall  *
3835853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
3836853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
3837853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
3838853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
3839853ac43aSMatt Mackall  */
3840853ac43aSMatt Mackall 
384141ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
3842853ac43aSMatt Mackall 	.name		= "tmpfs",
38433c26ff6eSAl Viro 	.mount		= ramfs_mount,
3844853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
38452b8576cbSEric W. Biederman 	.fs_flags	= FS_USERNS_MOUNT,
3846853ac43aSMatt Mackall };
3847853ac43aSMatt Mackall 
384841ffe5d5SHugh Dickins int __init shmem_init(void)
3849853ac43aSMatt Mackall {
385041ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3851853ac43aSMatt Mackall 
385241ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
3853853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
3854853ac43aSMatt Mackall 
3855853ac43aSMatt Mackall 	return 0;
3856853ac43aSMatt Mackall }
3857853ac43aSMatt Mackall 
385841ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page)
3859853ac43aSMatt Mackall {
3860853ac43aSMatt Mackall 	return 0;
3861853ac43aSMatt Mackall }
3862853ac43aSMatt Mackall 
38633f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user)
38643f96b79aSHugh Dickins {
38653f96b79aSHugh Dickins 	return 0;
38663f96b79aSHugh Dickins }
38673f96b79aSHugh Dickins 
386824513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
386924513264SHugh Dickins {
387024513264SHugh Dickins }
387124513264SHugh Dickins 
3872c01d5b30SHugh Dickins #ifdef CONFIG_MMU
3873c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file,
3874c01d5b30SHugh Dickins 				      unsigned long addr, unsigned long len,
3875c01d5b30SHugh Dickins 				      unsigned long pgoff, unsigned long flags)
3876c01d5b30SHugh Dickins {
3877c01d5b30SHugh Dickins 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
3878c01d5b30SHugh Dickins }
3879c01d5b30SHugh Dickins #endif
3880c01d5b30SHugh Dickins 
388141ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
388294c1e62dSHugh Dickins {
388341ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
388494c1e62dSHugh Dickins }
388594c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
388694c1e62dSHugh Dickins 
3887853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
38880b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
3889454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
38900b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
38910b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
3892853ac43aSMatt Mackall 
3893853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
3894853ac43aSMatt Mackall 
3895853ac43aSMatt Mackall /* common code */
38961da177e4SLinus Torvalds 
3897703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
3898c7277090SEric Paris 				       unsigned long flags, unsigned int i_flags)
38991da177e4SLinus Torvalds {
39001da177e4SLinus Torvalds 	struct inode *inode;
390193dec2daSAl Viro 	struct file *res;
39021da177e4SLinus Torvalds 
3903703321b6SMatthew Auld 	if (IS_ERR(mnt))
3904703321b6SMatthew Auld 		return ERR_CAST(mnt);
39051da177e4SLinus Torvalds 
3906285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
39071da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
39081da177e4SLinus Torvalds 
39091da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
39101da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
39111da177e4SLinus Torvalds 
391293dec2daSAl Viro 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
391393dec2daSAl Viro 				flags);
3914dac2d1f6SAl Viro 	if (unlikely(!inode)) {
3915dac2d1f6SAl Viro 		shmem_unacct_size(flags, size);
3916dac2d1f6SAl Viro 		return ERR_PTR(-ENOSPC);
3917dac2d1f6SAl Viro 	}
3918c7277090SEric Paris 	inode->i_flags |= i_flags;
39191da177e4SLinus Torvalds 	inode->i_size = size;
39206d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
392126567cdbSAl Viro 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
392293dec2daSAl Viro 	if (!IS_ERR(res))
392393dec2daSAl Viro 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
39244b42af81SAl Viro 				&shmem_file_operations);
39256b4d0b27SAl Viro 	if (IS_ERR(res))
392693dec2daSAl Viro 		iput(inode);
39276b4d0b27SAl Viro 	return res;
39281da177e4SLinus Torvalds }
3929c7277090SEric Paris 
3930c7277090SEric Paris /**
3931c7277090SEric Paris  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3932c7277090SEric Paris  * 	kernel internal.  There will be NO LSM permission checks against the
3933c7277090SEric Paris  * 	underlying inode.  So users of this interface must do LSM checks at a
3934e1832f29SStephen Smalley  *	higher layer.  The users are the big_key and shm implementations.  LSM
3935e1832f29SStephen Smalley  *	checks are provided at the key or shm level rather than the inode.
3936c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
3937c7277090SEric Paris  * @size: size to be set for the file
3938c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3939c7277090SEric Paris  */
3940c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
3941c7277090SEric Paris {
3942703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
3943c7277090SEric Paris }
3944c7277090SEric Paris 
3945c7277090SEric Paris /**
3946c7277090SEric Paris  * shmem_file_setup - get an unlinked file living in tmpfs
3947c7277090SEric Paris  * @name: name for dentry (to be seen in /proc/<pid>/maps
3948c7277090SEric Paris  * @size: size to be set for the file
3949c7277090SEric Paris  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3950c7277090SEric Paris  */
3951c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3952c7277090SEric Paris {
3953703321b6SMatthew Auld 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
3954c7277090SEric Paris }
3955395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
39561da177e4SLinus Torvalds 
395746711810SRandy Dunlap /**
3958703321b6SMatthew Auld  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
3959703321b6SMatthew Auld  * @mnt: the tmpfs mount where the file will be created
3960703321b6SMatthew Auld  * @name: name for dentry (to be seen in /proc/<pid>/maps
3961703321b6SMatthew Auld  * @size: size to be set for the file
3962703321b6SMatthew Auld  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3963703321b6SMatthew Auld  */
3964703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
3965703321b6SMatthew Auld 				       loff_t size, unsigned long flags)
3966703321b6SMatthew Auld {
3967703321b6SMatthew Auld 	return __shmem_file_setup(mnt, name, size, flags, 0);
3968703321b6SMatthew Auld }
3969703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
3970703321b6SMatthew Auld 
3971703321b6SMatthew Auld /**
39721da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
39731da177e4SLinus Torvalds  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
39741da177e4SLinus Torvalds  */
39751da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
39761da177e4SLinus Torvalds {
39771da177e4SLinus Torvalds 	struct file *file;
39781da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
39791da177e4SLinus Torvalds 
398066fc1303SHugh Dickins 	/*
398166fc1303SHugh Dickins 	 * Cloning a new file under mmap_sem leads to a lock ordering conflict
398266fc1303SHugh Dickins 	 * between XFS directory reading and selinux: since this file is only
398366fc1303SHugh Dickins 	 * accessible to the user through its mapping, use S_PRIVATE flag to
398466fc1303SHugh Dickins 	 * bypass file security, in the same way as shmem_kernel_file_setup().
398566fc1303SHugh Dickins 	 */
3986703321b6SMatthew Auld 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
39871da177e4SLinus Torvalds 	if (IS_ERR(file))
39881da177e4SLinus Torvalds 		return PTR_ERR(file);
39891da177e4SLinus Torvalds 
39901da177e4SLinus Torvalds 	if (vma->vm_file)
39911da177e4SLinus Torvalds 		fput(vma->vm_file);
39921da177e4SLinus Torvalds 	vma->vm_file = file;
39931da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
3994f3f0e1d2SKirill A. Shutemov 
3995e496cf3dSKirill A. Shutemov 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
3996f3f0e1d2SKirill A. Shutemov 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
3997f3f0e1d2SKirill A. Shutemov 			(vma->vm_end & HPAGE_PMD_MASK)) {
3998f3f0e1d2SKirill A. Shutemov 		khugepaged_enter(vma, vma->vm_flags);
3999f3f0e1d2SKirill A. Shutemov 	}
4000f3f0e1d2SKirill A. Shutemov 
40011da177e4SLinus Torvalds 	return 0;
40021da177e4SLinus Torvalds }
4003d9d90e5eSHugh Dickins 
4004d9d90e5eSHugh Dickins /**
4005d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4006d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
4007d9d90e5eSHugh Dickins  * @index:	the page index
4008d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
4009d9d90e5eSHugh Dickins  *
4010d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4011d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
4012d9d90e5eSHugh Dickins  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4013d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4014d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4015d9d90e5eSHugh Dickins  *
401668da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
401768da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4018d9d90e5eSHugh Dickins  */
4019d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4020d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
4021d9d90e5eSHugh Dickins {
402268da9f05SHugh Dickins #ifdef CONFIG_SHMEM
402368da9f05SHugh Dickins 	struct inode *inode = mapping->host;
40249276aad6SHugh Dickins 	struct page *page;
402568da9f05SHugh Dickins 	int error;
402668da9f05SHugh Dickins 
402768da9f05SHugh Dickins 	BUG_ON(mapping->a_ops != &shmem_aops);
40289e18eb29SAndres Lagar-Cavilla 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4029cfda0526SMike Rapoport 				  gfp, NULL, NULL, NULL);
403068da9f05SHugh Dickins 	if (error)
403168da9f05SHugh Dickins 		page = ERR_PTR(error);
403268da9f05SHugh Dickins 	else
403368da9f05SHugh Dickins 		unlock_page(page);
403468da9f05SHugh Dickins 	return page;
403568da9f05SHugh Dickins #else
403668da9f05SHugh Dickins 	/*
403768da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
403868da9f05SHugh Dickins 	 */
4039d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
404068da9f05SHugh Dickins #endif
4041d9d90e5eSHugh Dickins }
4042d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4043