xref: /openbmc/linux/mm/shmem.c (revision 38f38657444d15e1a8574eae80ed3de9f501737a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Resizable virtual memory filesystem for Linux.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 2000 Linus Torvalds.
51da177e4SLinus Torvalds  *		 2000 Transmeta Corp.
61da177e4SLinus Torvalds  *		 2000-2001 Christoph Rohland
71da177e4SLinus Torvalds  *		 2000-2001 SAP AG
81da177e4SLinus Torvalds  *		 2002 Red Hat Inc.
96922c0c7SHugh Dickins  * Copyright (C) 2002-2011 Hugh Dickins.
106922c0c7SHugh Dickins  * Copyright (C) 2011 Google Inc.
110edd73b3SHugh Dickins  * Copyright (C) 2002-2005 VERITAS Software Corporation.
121da177e4SLinus Torvalds  * Copyright (C) 2004 Andi Kleen, SuSE Labs
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Extended attribute support for tmpfs:
151da177e4SLinus Torvalds  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
161da177e4SLinus Torvalds  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
171da177e4SLinus Torvalds  *
18853ac43aSMatt Mackall  * tiny-shmem:
19853ac43aSMatt Mackall  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20853ac43aSMatt Mackall  *
211da177e4SLinus Torvalds  * This file is released under the GPL.
221da177e4SLinus Torvalds  */
231da177e4SLinus Torvalds 
24853ac43aSMatt Mackall #include <linux/fs.h>
25853ac43aSMatt Mackall #include <linux/init.h>
26853ac43aSMatt Mackall #include <linux/vfs.h>
27853ac43aSMatt Mackall #include <linux/mount.h>
28caefba17SHugh Dickins #include <linux/pagemap.h>
29853ac43aSMatt Mackall #include <linux/file.h>
30853ac43aSMatt Mackall #include <linux/mm.h>
31b95f1b31SPaul Gortmaker #include <linux/export.h>
32853ac43aSMatt Mackall #include <linux/swap.h>
33853ac43aSMatt Mackall 
34853ac43aSMatt Mackall static struct vfsmount *shm_mnt;
35853ac43aSMatt Mackall 
36853ac43aSMatt Mackall #ifdef CONFIG_SHMEM
371da177e4SLinus Torvalds /*
381da177e4SLinus Torvalds  * This virtual memory filesystem is heavily based on the ramfs. It
391da177e4SLinus Torvalds  * extends ramfs by the ability to use swap and honor resource limits
401da177e4SLinus Torvalds  * which makes it a completely usable filesystem.
411da177e4SLinus Torvalds  */
421da177e4SLinus Torvalds 
4339f0247dSAndreas Gruenbacher #include <linux/xattr.h>
44a5694255SChristoph Hellwig #include <linux/exportfs.h>
451c7c474cSChristoph Hellwig #include <linux/posix_acl.h>
4639f0247dSAndreas Gruenbacher #include <linux/generic_acl.h>
471da177e4SLinus Torvalds #include <linux/mman.h>
481da177e4SLinus Torvalds #include <linux/string.h>
491da177e4SLinus Torvalds #include <linux/slab.h>
501da177e4SLinus Torvalds #include <linux/backing-dev.h>
511da177e4SLinus Torvalds #include <linux/shmem_fs.h>
521da177e4SLinus Torvalds #include <linux/writeback.h>
531da177e4SLinus Torvalds #include <linux/blkdev.h>
54bda97eabSHugh Dickins #include <linux/pagevec.h>
5541ffe5d5SHugh Dickins #include <linux/percpu_counter.h>
5683e4fa9cSHugh Dickins #include <linux/falloc.h>
57708e3508SHugh Dickins #include <linux/splice.h>
581da177e4SLinus Torvalds #include <linux/security.h>
591da177e4SLinus Torvalds #include <linux/swapops.h>
601da177e4SLinus Torvalds #include <linux/mempolicy.h>
611da177e4SLinus Torvalds #include <linux/namei.h>
62b00dc3adSHugh Dickins #include <linux/ctype.h>
63304dbdb7SLee Schermerhorn #include <linux/migrate.h>
64c1f60a5aSChristoph Lameter #include <linux/highmem.h>
65680d794bSakpm@linux-foundation.org #include <linux/seq_file.h>
6692562927SMimi Zohar #include <linux/magic.h>
67304dbdb7SLee Schermerhorn 
681da177e4SLinus Torvalds #include <asm/uaccess.h>
691da177e4SLinus Torvalds #include <asm/pgtable.h>
701da177e4SLinus Torvalds 
711da177e4SLinus Torvalds #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
721da177e4SLinus Torvalds #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
731da177e4SLinus Torvalds 
741da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */
751da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20
761da177e4SLinus Torvalds 
7769f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
7869f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128
7969f07ec9SHugh Dickins 
801aac1400SHugh Dickins /*
811aac1400SHugh Dickins  * shmem_fallocate and shmem_writepage communicate via inode->i_private
821aac1400SHugh Dickins  * (with i_mutex making sure that it has only one user at a time):
831aac1400SHugh Dickins  * we would prefer not to enlarge the shmem inode just for that.
841aac1400SHugh Dickins  */
851aac1400SHugh Dickins struct shmem_falloc {
861aac1400SHugh Dickins 	pgoff_t start;		/* start of range currently being fallocated */
871aac1400SHugh Dickins 	pgoff_t next;		/* the next page offset to be fallocated */
881aac1400SHugh Dickins 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
891aac1400SHugh Dickins 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
901aac1400SHugh Dickins };
911aac1400SHugh Dickins 
92285b2c4fSHugh Dickins /* Flag allocation requirements to shmem_getpage */
931da177e4SLinus Torvalds enum sgp_type {
941da177e4SLinus Torvalds 	SGP_READ,	/* don't exceed i_size, don't allocate page */
951da177e4SLinus Torvalds 	SGP_CACHE,	/* don't exceed i_size, may allocate page */
96a0ee5ec5SHugh Dickins 	SGP_DIRTY,	/* like SGP_CACHE, but set new page dirty */
971635f6a7SHugh Dickins 	SGP_WRITE,	/* may exceed i_size, may allocate !Uptodate page */
981635f6a7SHugh Dickins 	SGP_FALLOC,	/* like SGP_WRITE, but make existing page Uptodate */
991da177e4SLinus Torvalds };
1001da177e4SLinus Torvalds 
101b76db735SAndrew Morton #ifdef CONFIG_TMPFS
102680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void)
103680d794bSakpm@linux-foundation.org {
104680d794bSakpm@linux-foundation.org 	return totalram_pages / 2;
105680d794bSakpm@linux-foundation.org }
106680d794bSakpm@linux-foundation.org 
107680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void)
108680d794bSakpm@linux-foundation.org {
109680d794bSakpm@linux-foundation.org 	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
110680d794bSakpm@linux-foundation.org }
111b76db735SAndrew Morton #endif
112680d794bSakpm@linux-foundation.org 
113bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
114bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
115bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index);
11668da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
11768da9f05SHugh Dickins 	struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
11868da9f05SHugh Dickins 
11968da9f05SHugh Dickins static inline int shmem_getpage(struct inode *inode, pgoff_t index,
12068da9f05SHugh Dickins 	struct page **pagep, enum sgp_type sgp, int *fault_type)
12168da9f05SHugh Dickins {
12268da9f05SHugh Dickins 	return shmem_getpage_gfp(inode, index, pagep, sgp,
12368da9f05SHugh Dickins 			mapping_gfp_mask(inode->i_mapping), fault_type);
12468da9f05SHugh Dickins }
1251da177e4SLinus Torvalds 
1261da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
1271da177e4SLinus Torvalds {
1281da177e4SLinus Torvalds 	return sb->s_fs_info;
1291da177e4SLinus Torvalds }
1301da177e4SLinus Torvalds 
1311da177e4SLinus Torvalds /*
1321da177e4SLinus Torvalds  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
1331da177e4SLinus Torvalds  * for shared memory and for shared anonymous (/dev/zero) mappings
1341da177e4SLinus Torvalds  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
1351da177e4SLinus Torvalds  * consistent with the pre-accounting of private mappings ...
1361da177e4SLinus Torvalds  */
1371da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size)
1381da177e4SLinus Torvalds {
1390b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
140191c5424SAl Viro 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
1411da177e4SLinus Torvalds }
1421da177e4SLinus Torvalds 
1431da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size)
1441da177e4SLinus Torvalds {
1450b0a0806SHugh Dickins 	if (!(flags & VM_NORESERVE))
1461da177e4SLinus Torvalds 		vm_unacct_memory(VM_ACCT(size));
1471da177e4SLinus Torvalds }
1481da177e4SLinus Torvalds 
1491da177e4SLinus Torvalds /*
1501da177e4SLinus Torvalds  * ... whereas tmpfs objects are accounted incrementally as
1511da177e4SLinus Torvalds  * pages are allocated, in order to allow huge sparse files.
1521da177e4SLinus Torvalds  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
1531da177e4SLinus Torvalds  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
1541da177e4SLinus Torvalds  */
1551da177e4SLinus Torvalds static inline int shmem_acct_block(unsigned long flags)
1561da177e4SLinus Torvalds {
1570b0a0806SHugh Dickins 	return (flags & VM_NORESERVE) ?
158191c5424SAl Viro 		security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds 
1611da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages)
1621da177e4SLinus Torvalds {
1630b0a0806SHugh Dickins 	if (flags & VM_NORESERVE)
1641da177e4SLinus Torvalds 		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
1651da177e4SLinus Torvalds }
1661da177e4SLinus Torvalds 
167759b9775SHugh Dickins static const struct super_operations shmem_ops;
168f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops;
16915ad7cdcSHelge Deller static const struct file_operations shmem_file_operations;
17092e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations;
17192e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations;
17292e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations;
173f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops;
1741da177e4SLinus Torvalds 
1756c231b7bSRavikiran G Thirumalai static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
1761da177e4SLinus Torvalds 	.ra_pages	= 0,	/* No readahead */
1774f98a2feSRik van Riel 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1781da177e4SLinus Torvalds };
1791da177e4SLinus Torvalds 
1801da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist);
181cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex);
1821da177e4SLinus Torvalds 
1835b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb)
1845b04c689SPavel Emelyanov {
1855b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1865b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
1875b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
1885b04c689SPavel Emelyanov 		if (!sbinfo->free_inodes) {
1895b04c689SPavel Emelyanov 			spin_unlock(&sbinfo->stat_lock);
1905b04c689SPavel Emelyanov 			return -ENOSPC;
1915b04c689SPavel Emelyanov 		}
1925b04c689SPavel Emelyanov 		sbinfo->free_inodes--;
1935b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
1945b04c689SPavel Emelyanov 	}
1955b04c689SPavel Emelyanov 	return 0;
1965b04c689SPavel Emelyanov }
1975b04c689SPavel Emelyanov 
1985b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb)
1995b04c689SPavel Emelyanov {
2005b04c689SPavel Emelyanov 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2015b04c689SPavel Emelyanov 	if (sbinfo->max_inodes) {
2025b04c689SPavel Emelyanov 		spin_lock(&sbinfo->stat_lock);
2035b04c689SPavel Emelyanov 		sbinfo->free_inodes++;
2045b04c689SPavel Emelyanov 		spin_unlock(&sbinfo->stat_lock);
2055b04c689SPavel Emelyanov 	}
2065b04c689SPavel Emelyanov }
2075b04c689SPavel Emelyanov 
20846711810SRandy Dunlap /**
20941ffe5d5SHugh Dickins  * shmem_recalc_inode - recalculate the block usage of an inode
2101da177e4SLinus Torvalds  * @inode: inode to recalc
2111da177e4SLinus Torvalds  *
2121da177e4SLinus Torvalds  * We have to calculate the free blocks since the mm can drop
2131da177e4SLinus Torvalds  * undirtied hole pages behind our back.
2141da177e4SLinus Torvalds  *
2151da177e4SLinus Torvalds  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
2161da177e4SLinus Torvalds  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
2171da177e4SLinus Torvalds  *
2181da177e4SLinus Torvalds  * It has to be called with the spinlock held.
2191da177e4SLinus Torvalds  */
2201da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode)
2211da177e4SLinus Torvalds {
2221da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
2231da177e4SLinus Torvalds 	long freed;
2241da177e4SLinus Torvalds 
2251da177e4SLinus Torvalds 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
2261da177e4SLinus Torvalds 	if (freed > 0) {
22754af6042SHugh Dickins 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
22854af6042SHugh Dickins 		if (sbinfo->max_blocks)
22954af6042SHugh Dickins 			percpu_counter_add(&sbinfo->used_blocks, -freed);
2301da177e4SLinus Torvalds 		info->alloced -= freed;
23154af6042SHugh Dickins 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
2321da177e4SLinus Torvalds 		shmem_unacct_blocks(info->flags, freed);
2331da177e4SLinus Torvalds 	}
2341da177e4SLinus Torvalds }
2351da177e4SLinus Torvalds 
2367a5d0fbbSHugh Dickins /*
2377a5d0fbbSHugh Dickins  * Replace item expected in radix tree by a new item, while holding tree lock.
2387a5d0fbbSHugh Dickins  */
2397a5d0fbbSHugh Dickins static int shmem_radix_tree_replace(struct address_space *mapping,
2407a5d0fbbSHugh Dickins 			pgoff_t index, void *expected, void *replacement)
2417a5d0fbbSHugh Dickins {
2427a5d0fbbSHugh Dickins 	void **pslot;
2437a5d0fbbSHugh Dickins 	void *item = NULL;
2447a5d0fbbSHugh Dickins 
2457a5d0fbbSHugh Dickins 	VM_BUG_ON(!expected);
2467a5d0fbbSHugh Dickins 	pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
2477a5d0fbbSHugh Dickins 	if (pslot)
2487a5d0fbbSHugh Dickins 		item = radix_tree_deref_slot_protected(pslot,
2497a5d0fbbSHugh Dickins 							&mapping->tree_lock);
2507a5d0fbbSHugh Dickins 	if (item != expected)
2517a5d0fbbSHugh Dickins 		return -ENOENT;
2527a5d0fbbSHugh Dickins 	if (replacement)
2537a5d0fbbSHugh Dickins 		radix_tree_replace_slot(pslot, replacement);
2547a5d0fbbSHugh Dickins 	else
2557a5d0fbbSHugh Dickins 		radix_tree_delete(&mapping->page_tree, index);
2567a5d0fbbSHugh Dickins 	return 0;
2577a5d0fbbSHugh Dickins }
2587a5d0fbbSHugh Dickins 
2597a5d0fbbSHugh Dickins /*
260d1899228SHugh Dickins  * Sometimes, before we decide whether to proceed or to fail, we must check
261d1899228SHugh Dickins  * that an entry was not already brought back from swap by a racing thread.
262d1899228SHugh Dickins  *
263d1899228SHugh Dickins  * Checking page is not enough: by the time a SwapCache page is locked, it
264d1899228SHugh Dickins  * might be reused, and again be SwapCache, using the same swap as before.
265d1899228SHugh Dickins  */
266d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping,
267d1899228SHugh Dickins 			       pgoff_t index, swp_entry_t swap)
268d1899228SHugh Dickins {
269d1899228SHugh Dickins 	void *item;
270d1899228SHugh Dickins 
271d1899228SHugh Dickins 	rcu_read_lock();
272d1899228SHugh Dickins 	item = radix_tree_lookup(&mapping->page_tree, index);
273d1899228SHugh Dickins 	rcu_read_unlock();
274d1899228SHugh Dickins 	return item == swp_to_radix_entry(swap);
275d1899228SHugh Dickins }
276d1899228SHugh Dickins 
277d1899228SHugh Dickins /*
27846f65ec1SHugh Dickins  * Like add_to_page_cache_locked, but error if expected item has gone.
27946f65ec1SHugh Dickins  */
28046f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page,
28146f65ec1SHugh Dickins 				   struct address_space *mapping,
28246f65ec1SHugh Dickins 				   pgoff_t index, gfp_t gfp, void *expected)
28346f65ec1SHugh Dickins {
284b065b432SHugh Dickins 	int error;
28546f65ec1SHugh Dickins 
28646f65ec1SHugh Dickins 	VM_BUG_ON(!PageLocked(page));
28746f65ec1SHugh Dickins 	VM_BUG_ON(!PageSwapBacked(page));
28846f65ec1SHugh Dickins 
28946f65ec1SHugh Dickins 	page_cache_get(page);
29046f65ec1SHugh Dickins 	page->mapping = mapping;
29146f65ec1SHugh Dickins 	page->index = index;
29246f65ec1SHugh Dickins 
29346f65ec1SHugh Dickins 	spin_lock_irq(&mapping->tree_lock);
29446f65ec1SHugh Dickins 	if (!expected)
295b065b432SHugh Dickins 		error = radix_tree_insert(&mapping->page_tree, index, page);
29646f65ec1SHugh Dickins 	else
297b065b432SHugh Dickins 		error = shmem_radix_tree_replace(mapping, index, expected,
298b065b432SHugh Dickins 								 page);
29946f65ec1SHugh Dickins 	if (!error) {
30046f65ec1SHugh Dickins 		mapping->nrpages++;
30146f65ec1SHugh Dickins 		__inc_zone_page_state(page, NR_FILE_PAGES);
30246f65ec1SHugh Dickins 		__inc_zone_page_state(page, NR_SHMEM);
30346f65ec1SHugh Dickins 		spin_unlock_irq(&mapping->tree_lock);
30446f65ec1SHugh Dickins 	} else {
30546f65ec1SHugh Dickins 		page->mapping = NULL;
30646f65ec1SHugh Dickins 		spin_unlock_irq(&mapping->tree_lock);
30746f65ec1SHugh Dickins 		page_cache_release(page);
30846f65ec1SHugh Dickins 	}
30946f65ec1SHugh Dickins 	return error;
31046f65ec1SHugh Dickins }
31146f65ec1SHugh Dickins 
31246f65ec1SHugh Dickins /*
3136922c0c7SHugh Dickins  * Like delete_from_page_cache, but substitutes swap for page.
3146922c0c7SHugh Dickins  */
3156922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap)
3166922c0c7SHugh Dickins {
3176922c0c7SHugh Dickins 	struct address_space *mapping = page->mapping;
3186922c0c7SHugh Dickins 	int error;
3196922c0c7SHugh Dickins 
3206922c0c7SHugh Dickins 	spin_lock_irq(&mapping->tree_lock);
3216922c0c7SHugh Dickins 	error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
3226922c0c7SHugh Dickins 	page->mapping = NULL;
3236922c0c7SHugh Dickins 	mapping->nrpages--;
3246922c0c7SHugh Dickins 	__dec_zone_page_state(page, NR_FILE_PAGES);
3256922c0c7SHugh Dickins 	__dec_zone_page_state(page, NR_SHMEM);
3266922c0c7SHugh Dickins 	spin_unlock_irq(&mapping->tree_lock);
3276922c0c7SHugh Dickins 	page_cache_release(page);
3286922c0c7SHugh Dickins 	BUG_ON(error);
3296922c0c7SHugh Dickins }
3306922c0c7SHugh Dickins 
3316922c0c7SHugh Dickins /*
3327a5d0fbbSHugh Dickins  * Like find_get_pages, but collecting swap entries as well as pages.
3337a5d0fbbSHugh Dickins  */
3347a5d0fbbSHugh Dickins static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
3357a5d0fbbSHugh Dickins 					pgoff_t start, unsigned int nr_pages,
3367a5d0fbbSHugh Dickins 					struct page **pages, pgoff_t *indices)
3377a5d0fbbSHugh Dickins {
3387a5d0fbbSHugh Dickins 	unsigned int i;
3397a5d0fbbSHugh Dickins 	unsigned int ret;
3407a5d0fbbSHugh Dickins 	unsigned int nr_found;
3417a5d0fbbSHugh Dickins 
3427a5d0fbbSHugh Dickins 	rcu_read_lock();
3437a5d0fbbSHugh Dickins restart:
3447a5d0fbbSHugh Dickins 	nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
3457a5d0fbbSHugh Dickins 				(void ***)pages, indices, start, nr_pages);
3467a5d0fbbSHugh Dickins 	ret = 0;
3477a5d0fbbSHugh Dickins 	for (i = 0; i < nr_found; i++) {
3487a5d0fbbSHugh Dickins 		struct page *page;
3497a5d0fbbSHugh Dickins repeat:
3507a5d0fbbSHugh Dickins 		page = radix_tree_deref_slot((void **)pages[i]);
3517a5d0fbbSHugh Dickins 		if (unlikely(!page))
3527a5d0fbbSHugh Dickins 			continue;
3537a5d0fbbSHugh Dickins 		if (radix_tree_exception(page)) {
3548079b1c8SHugh Dickins 			if (radix_tree_deref_retry(page))
3557a5d0fbbSHugh Dickins 				goto restart;
3568079b1c8SHugh Dickins 			/*
3578079b1c8SHugh Dickins 			 * Otherwise, we must be storing a swap entry
3588079b1c8SHugh Dickins 			 * here as an exceptional entry: so return it
3598079b1c8SHugh Dickins 			 * without attempting to raise page count.
3608079b1c8SHugh Dickins 			 */
3618079b1c8SHugh Dickins 			goto export;
3627a5d0fbbSHugh Dickins 		}
3637a5d0fbbSHugh Dickins 		if (!page_cache_get_speculative(page))
3647a5d0fbbSHugh Dickins 			goto repeat;
3657a5d0fbbSHugh Dickins 
3667a5d0fbbSHugh Dickins 		/* Has the page moved? */
3677a5d0fbbSHugh Dickins 		if (unlikely(page != *((void **)pages[i]))) {
3687a5d0fbbSHugh Dickins 			page_cache_release(page);
3697a5d0fbbSHugh Dickins 			goto repeat;
3707a5d0fbbSHugh Dickins 		}
3717a5d0fbbSHugh Dickins export:
3727a5d0fbbSHugh Dickins 		indices[ret] = indices[i];
3737a5d0fbbSHugh Dickins 		pages[ret] = page;
3747a5d0fbbSHugh Dickins 		ret++;
3757a5d0fbbSHugh Dickins 	}
3767a5d0fbbSHugh Dickins 	if (unlikely(!ret && nr_found))
3777a5d0fbbSHugh Dickins 		goto restart;
3787a5d0fbbSHugh Dickins 	rcu_read_unlock();
3797a5d0fbbSHugh Dickins 	return ret;
3807a5d0fbbSHugh Dickins }
3817a5d0fbbSHugh Dickins 
3827a5d0fbbSHugh Dickins /*
3837a5d0fbbSHugh Dickins  * Remove swap entry from radix tree, free the swap and its page cache.
3847a5d0fbbSHugh Dickins  */
3857a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping,
3867a5d0fbbSHugh Dickins 			   pgoff_t index, void *radswap)
3877a5d0fbbSHugh Dickins {
3887a5d0fbbSHugh Dickins 	int error;
3897a5d0fbbSHugh Dickins 
3907a5d0fbbSHugh Dickins 	spin_lock_irq(&mapping->tree_lock);
3917a5d0fbbSHugh Dickins 	error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
3927a5d0fbbSHugh Dickins 	spin_unlock_irq(&mapping->tree_lock);
3937a5d0fbbSHugh Dickins 	if (!error)
3947a5d0fbbSHugh Dickins 		free_swap_and_cache(radix_to_swp_entry(radswap));
3957a5d0fbbSHugh Dickins 	return error;
3967a5d0fbbSHugh Dickins }
3977a5d0fbbSHugh Dickins 
3987a5d0fbbSHugh Dickins /*
3997a5d0fbbSHugh Dickins  * Pagevec may contain swap entries, so shuffle up pages before releasing.
4007a5d0fbbSHugh Dickins  */
40124513264SHugh Dickins static void shmem_deswap_pagevec(struct pagevec *pvec)
4027a5d0fbbSHugh Dickins {
4037a5d0fbbSHugh Dickins 	int i, j;
4047a5d0fbbSHugh Dickins 
4057a5d0fbbSHugh Dickins 	for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
4067a5d0fbbSHugh Dickins 		struct page *page = pvec->pages[i];
4077a5d0fbbSHugh Dickins 		if (!radix_tree_exceptional_entry(page))
4087a5d0fbbSHugh Dickins 			pvec->pages[j++] = page;
4097a5d0fbbSHugh Dickins 	}
4107a5d0fbbSHugh Dickins 	pvec->nr = j;
41124513264SHugh Dickins }
41224513264SHugh Dickins 
41324513264SHugh Dickins /*
41424513264SHugh Dickins  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
41524513264SHugh Dickins  */
41624513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
41724513264SHugh Dickins {
41824513264SHugh Dickins 	struct pagevec pvec;
41924513264SHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
42024513264SHugh Dickins 	pgoff_t index = 0;
42124513264SHugh Dickins 
42224513264SHugh Dickins 	pagevec_init(&pvec, 0);
42324513264SHugh Dickins 	/*
42424513264SHugh Dickins 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
42524513264SHugh Dickins 	 */
42624513264SHugh Dickins 	while (!mapping_unevictable(mapping)) {
42724513264SHugh Dickins 		/*
42824513264SHugh Dickins 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
42924513264SHugh Dickins 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
43024513264SHugh Dickins 		 */
43124513264SHugh Dickins 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
43224513264SHugh Dickins 					PAGEVEC_SIZE, pvec.pages, indices);
43324513264SHugh Dickins 		if (!pvec.nr)
43424513264SHugh Dickins 			break;
43524513264SHugh Dickins 		index = indices[pvec.nr - 1] + 1;
43624513264SHugh Dickins 		shmem_deswap_pagevec(&pvec);
43724513264SHugh Dickins 		check_move_unevictable_pages(pvec.pages, pvec.nr);
43824513264SHugh Dickins 		pagevec_release(&pvec);
43924513264SHugh Dickins 		cond_resched();
44024513264SHugh Dickins 	}
4417a5d0fbbSHugh Dickins }
4427a5d0fbbSHugh Dickins 
4437a5d0fbbSHugh Dickins /*
4447a5d0fbbSHugh Dickins  * Remove range of pages and swap entries from radix tree, and free them.
4451635f6a7SHugh Dickins  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
4467a5d0fbbSHugh Dickins  */
4471635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
4481635f6a7SHugh Dickins 								 bool unfalloc)
4491da177e4SLinus Torvalds {
450285b2c4fSHugh Dickins 	struct address_space *mapping = inode->i_mapping;
4511da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
452285b2c4fSHugh Dickins 	pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
45383e4fa9cSHugh Dickins 	pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
45483e4fa9cSHugh Dickins 	unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
45583e4fa9cSHugh Dickins 	unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
456bda97eabSHugh Dickins 	struct pagevec pvec;
4577a5d0fbbSHugh Dickins 	pgoff_t indices[PAGEVEC_SIZE];
4587a5d0fbbSHugh Dickins 	long nr_swaps_freed = 0;
459285b2c4fSHugh Dickins 	pgoff_t index;
460bda97eabSHugh Dickins 	int i;
4611da177e4SLinus Torvalds 
46283e4fa9cSHugh Dickins 	if (lend == -1)
46383e4fa9cSHugh Dickins 		end = -1;	/* unsigned, so actually very big */
464bda97eabSHugh Dickins 
465bda97eabSHugh Dickins 	pagevec_init(&pvec, 0);
466bda97eabSHugh Dickins 	index = start;
46783e4fa9cSHugh Dickins 	while (index < end) {
4687a5d0fbbSHugh Dickins 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
46983e4fa9cSHugh Dickins 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
4707a5d0fbbSHugh Dickins 							pvec.pages, indices);
4717a5d0fbbSHugh Dickins 		if (!pvec.nr)
4727a5d0fbbSHugh Dickins 			break;
473bda97eabSHugh Dickins 		mem_cgroup_uncharge_start();
474bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
475bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
476bda97eabSHugh Dickins 
4777a5d0fbbSHugh Dickins 			index = indices[i];
47883e4fa9cSHugh Dickins 			if (index >= end)
479bda97eabSHugh Dickins 				break;
480bda97eabSHugh Dickins 
4817a5d0fbbSHugh Dickins 			if (radix_tree_exceptional_entry(page)) {
4821635f6a7SHugh Dickins 				if (unfalloc)
4831635f6a7SHugh Dickins 					continue;
4847a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
4857a5d0fbbSHugh Dickins 								index, page);
4867a5d0fbbSHugh Dickins 				continue;
4877a5d0fbbSHugh Dickins 			}
4887a5d0fbbSHugh Dickins 
489bda97eabSHugh Dickins 			if (!trylock_page(page))
490bda97eabSHugh Dickins 				continue;
4911635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
4927a5d0fbbSHugh Dickins 				if (page->mapping == mapping) {
4937a5d0fbbSHugh Dickins 					VM_BUG_ON(PageWriteback(page));
494bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
4957a5d0fbbSHugh Dickins 				}
4961635f6a7SHugh Dickins 			}
497bda97eabSHugh Dickins 			unlock_page(page);
498bda97eabSHugh Dickins 		}
49924513264SHugh Dickins 		shmem_deswap_pagevec(&pvec);
50024513264SHugh Dickins 		pagevec_release(&pvec);
501bda97eabSHugh Dickins 		mem_cgroup_uncharge_end();
502bda97eabSHugh Dickins 		cond_resched();
503bda97eabSHugh Dickins 		index++;
504bda97eabSHugh Dickins 	}
505bda97eabSHugh Dickins 
50683e4fa9cSHugh Dickins 	if (partial_start) {
507bda97eabSHugh Dickins 		struct page *page = NULL;
508bda97eabSHugh Dickins 		shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
509bda97eabSHugh Dickins 		if (page) {
51083e4fa9cSHugh Dickins 			unsigned int top = PAGE_CACHE_SIZE;
51183e4fa9cSHugh Dickins 			if (start > end) {
51283e4fa9cSHugh Dickins 				top = partial_end;
51383e4fa9cSHugh Dickins 				partial_end = 0;
51483e4fa9cSHugh Dickins 			}
51583e4fa9cSHugh Dickins 			zero_user_segment(page, partial_start, top);
516bda97eabSHugh Dickins 			set_page_dirty(page);
517bda97eabSHugh Dickins 			unlock_page(page);
518bda97eabSHugh Dickins 			page_cache_release(page);
519bda97eabSHugh Dickins 		}
520bda97eabSHugh Dickins 	}
52183e4fa9cSHugh Dickins 	if (partial_end) {
52283e4fa9cSHugh Dickins 		struct page *page = NULL;
52383e4fa9cSHugh Dickins 		shmem_getpage(inode, end, &page, SGP_READ, NULL);
52483e4fa9cSHugh Dickins 		if (page) {
52583e4fa9cSHugh Dickins 			zero_user_segment(page, 0, partial_end);
52683e4fa9cSHugh Dickins 			set_page_dirty(page);
52783e4fa9cSHugh Dickins 			unlock_page(page);
52883e4fa9cSHugh Dickins 			page_cache_release(page);
52983e4fa9cSHugh Dickins 		}
53083e4fa9cSHugh Dickins 	}
53183e4fa9cSHugh Dickins 	if (start >= end)
53283e4fa9cSHugh Dickins 		return;
533bda97eabSHugh Dickins 
534bda97eabSHugh Dickins 	index = start;
535bda97eabSHugh Dickins 	for ( ; ; ) {
536bda97eabSHugh Dickins 		cond_resched();
5377a5d0fbbSHugh Dickins 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
53883e4fa9cSHugh Dickins 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
5397a5d0fbbSHugh Dickins 							pvec.pages, indices);
5407a5d0fbbSHugh Dickins 		if (!pvec.nr) {
5411635f6a7SHugh Dickins 			if (index == start || unfalloc)
542bda97eabSHugh Dickins 				break;
543bda97eabSHugh Dickins 			index = start;
544bda97eabSHugh Dickins 			continue;
545bda97eabSHugh Dickins 		}
5461635f6a7SHugh Dickins 		if ((index == start || unfalloc) && indices[0] >= end) {
54724513264SHugh Dickins 			shmem_deswap_pagevec(&pvec);
54824513264SHugh Dickins 			pagevec_release(&pvec);
549bda97eabSHugh Dickins 			break;
550bda97eabSHugh Dickins 		}
551bda97eabSHugh Dickins 		mem_cgroup_uncharge_start();
552bda97eabSHugh Dickins 		for (i = 0; i < pagevec_count(&pvec); i++) {
553bda97eabSHugh Dickins 			struct page *page = pvec.pages[i];
554bda97eabSHugh Dickins 
5557a5d0fbbSHugh Dickins 			index = indices[i];
55683e4fa9cSHugh Dickins 			if (index >= end)
557bda97eabSHugh Dickins 				break;
558bda97eabSHugh Dickins 
5597a5d0fbbSHugh Dickins 			if (radix_tree_exceptional_entry(page)) {
5601635f6a7SHugh Dickins 				if (unfalloc)
5611635f6a7SHugh Dickins 					continue;
5627a5d0fbbSHugh Dickins 				nr_swaps_freed += !shmem_free_swap(mapping,
5637a5d0fbbSHugh Dickins 								index, page);
5647a5d0fbbSHugh Dickins 				continue;
5657a5d0fbbSHugh Dickins 			}
5667a5d0fbbSHugh Dickins 
567bda97eabSHugh Dickins 			lock_page(page);
5681635f6a7SHugh Dickins 			if (!unfalloc || !PageUptodate(page)) {
5697a5d0fbbSHugh Dickins 				if (page->mapping == mapping) {
5707a5d0fbbSHugh Dickins 					VM_BUG_ON(PageWriteback(page));
571bda97eabSHugh Dickins 					truncate_inode_page(mapping, page);
5727a5d0fbbSHugh Dickins 				}
5731635f6a7SHugh Dickins 			}
574bda97eabSHugh Dickins 			unlock_page(page);
575bda97eabSHugh Dickins 		}
57624513264SHugh Dickins 		shmem_deswap_pagevec(&pvec);
57724513264SHugh Dickins 		pagevec_release(&pvec);
578bda97eabSHugh Dickins 		mem_cgroup_uncharge_end();
579bda97eabSHugh Dickins 		index++;
580bda97eabSHugh Dickins 	}
58194c1e62dSHugh Dickins 
5821da177e4SLinus Torvalds 	spin_lock(&info->lock);
5837a5d0fbbSHugh Dickins 	info->swapped -= nr_swaps_freed;
5841da177e4SLinus Torvalds 	shmem_recalc_inode(inode);
5851da177e4SLinus Torvalds 	spin_unlock(&info->lock);
5861635f6a7SHugh Dickins }
5871da177e4SLinus Torvalds 
5881635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5891635f6a7SHugh Dickins {
5901635f6a7SHugh Dickins 	shmem_undo_range(inode, lstart, lend, false);
591285b2c4fSHugh Dickins 	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
5921da177e4SLinus Torvalds }
59394c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
5941da177e4SLinus Torvalds 
59594c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
5961da177e4SLinus Torvalds {
5971da177e4SLinus Torvalds 	struct inode *inode = dentry->d_inode;
5981da177e4SLinus Torvalds 	int error;
5991da177e4SLinus Torvalds 
600db78b877SChristoph Hellwig 	error = inode_change_ok(inode, attr);
601db78b877SChristoph Hellwig 	if (error)
602db78b877SChristoph Hellwig 		return error;
603db78b877SChristoph Hellwig 
60494c1e62dSHugh Dickins 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
60594c1e62dSHugh Dickins 		loff_t oldsize = inode->i_size;
60694c1e62dSHugh Dickins 		loff_t newsize = attr->ia_size;
6073889e6e7Snpiggin@suse.de 
60894c1e62dSHugh Dickins 		if (newsize != oldsize) {
60994c1e62dSHugh Dickins 			i_size_write(inode, newsize);
61094c1e62dSHugh Dickins 			inode->i_ctime = inode->i_mtime = CURRENT_TIME;
61194c1e62dSHugh Dickins 		}
61294c1e62dSHugh Dickins 		if (newsize < oldsize) {
61394c1e62dSHugh Dickins 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
61494c1e62dSHugh Dickins 			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
61594c1e62dSHugh Dickins 			shmem_truncate_range(inode, newsize, (loff_t)-1);
61694c1e62dSHugh Dickins 			/* unmap again to remove racily COWed private pages */
61794c1e62dSHugh Dickins 			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
61894c1e62dSHugh Dickins 		}
6191da177e4SLinus Torvalds 	}
6201da177e4SLinus Torvalds 
6216a1a90adSChristoph Hellwig 	setattr_copy(inode, attr);
62239f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
623db78b877SChristoph Hellwig 	if (attr->ia_valid & ATTR_MODE)
6241c7c474cSChristoph Hellwig 		error = generic_acl_chmod(inode);
62539f0247dSAndreas Gruenbacher #endif
6261da177e4SLinus Torvalds 	return error;
6271da177e4SLinus Torvalds }
6281da177e4SLinus Torvalds 
6291f895f75SAl Viro static void shmem_evict_inode(struct inode *inode)
6301da177e4SLinus Torvalds {
6311da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
6321da177e4SLinus Torvalds 
6333889e6e7Snpiggin@suse.de 	if (inode->i_mapping->a_ops == &shmem_aops) {
6341da177e4SLinus Torvalds 		shmem_unacct_size(info->flags, inode->i_size);
6351da177e4SLinus Torvalds 		inode->i_size = 0;
6363889e6e7Snpiggin@suse.de 		shmem_truncate_range(inode, 0, (loff_t)-1);
6371da177e4SLinus Torvalds 		if (!list_empty(&info->swaplist)) {
638cb5f7b9aSHugh Dickins 			mutex_lock(&shmem_swaplist_mutex);
6391da177e4SLinus Torvalds 			list_del_init(&info->swaplist);
640cb5f7b9aSHugh Dickins 			mutex_unlock(&shmem_swaplist_mutex);
6411da177e4SLinus Torvalds 		}
64269f07ec9SHugh Dickins 	} else
64369f07ec9SHugh Dickins 		kfree(info->symlink);
644b09e0fa4SEric Paris 
645*38f38657SAristeu Rozanski 	simple_xattrs_free(&info->xattrs);
6461da177e4SLinus Torvalds 	BUG_ON(inode->i_blocks);
6475b04c689SPavel Emelyanov 	shmem_free_inode(inode->i_sb);
648dbd5768fSJan Kara 	clear_inode(inode);
6491da177e4SLinus Torvalds }
6501da177e4SLinus Torvalds 
65146f65ec1SHugh Dickins /*
65246f65ec1SHugh Dickins  * If swap found in inode, free it and move page from swapcache to filecache.
65346f65ec1SHugh Dickins  */
65441ffe5d5SHugh Dickins static int shmem_unuse_inode(struct shmem_inode_info *info,
655bde05d1cSHugh Dickins 			     swp_entry_t swap, struct page **pagep)
6561da177e4SLinus Torvalds {
657285b2c4fSHugh Dickins 	struct address_space *mapping = info->vfs_inode.i_mapping;
65846f65ec1SHugh Dickins 	void *radswap;
65941ffe5d5SHugh Dickins 	pgoff_t index;
660bde05d1cSHugh Dickins 	gfp_t gfp;
661bde05d1cSHugh Dickins 	int error = 0;
6621da177e4SLinus Torvalds 
66346f65ec1SHugh Dickins 	radswap = swp_to_radix_entry(swap);
664e504f3fdSHugh Dickins 	index = radix_tree_locate_item(&mapping->page_tree, radswap);
66546f65ec1SHugh Dickins 	if (index == -1)
6661da177e4SLinus Torvalds 		return 0;
6672e0e26c7SHugh Dickins 
6681b1b32f2SHugh Dickins 	/*
6691b1b32f2SHugh Dickins 	 * Move _head_ to start search for next from here.
6701f895f75SAl Viro 	 * But be careful: shmem_evict_inode checks list_empty without taking
6711b1b32f2SHugh Dickins 	 * mutex, and there's an instant in list_move_tail when info->swaplist
672285b2c4fSHugh Dickins 	 * would appear empty, if it were the only one on shmem_swaplist.
6731b1b32f2SHugh Dickins 	 */
6741b1b32f2SHugh Dickins 	if (shmem_swaplist.next != &info->swaplist)
6752e0e26c7SHugh Dickins 		list_move_tail(&shmem_swaplist, &info->swaplist);
6762e0e26c7SHugh Dickins 
677bde05d1cSHugh Dickins 	gfp = mapping_gfp_mask(mapping);
678bde05d1cSHugh Dickins 	if (shmem_should_replace_page(*pagep, gfp)) {
679bde05d1cSHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
680bde05d1cSHugh Dickins 		error = shmem_replace_page(pagep, gfp, info, index);
681bde05d1cSHugh Dickins 		mutex_lock(&shmem_swaplist_mutex);
682bde05d1cSHugh Dickins 		/*
683bde05d1cSHugh Dickins 		 * We needed to drop mutex to make that restrictive page
6840142ef6cSHugh Dickins 		 * allocation, but the inode might have been freed while we
6850142ef6cSHugh Dickins 		 * dropped it: although a racing shmem_evict_inode() cannot
6860142ef6cSHugh Dickins 		 * complete without emptying the radix_tree, our page lock
6870142ef6cSHugh Dickins 		 * on this swapcache page is not enough to prevent that -
6880142ef6cSHugh Dickins 		 * free_swap_and_cache() of our swap entry will only
6890142ef6cSHugh Dickins 		 * trylock_page(), removing swap from radix_tree whatever.
6900142ef6cSHugh Dickins 		 *
6910142ef6cSHugh Dickins 		 * We must not proceed to shmem_add_to_page_cache() if the
6920142ef6cSHugh Dickins 		 * inode has been freed, but of course we cannot rely on
6930142ef6cSHugh Dickins 		 * inode or mapping or info to check that.  However, we can
6940142ef6cSHugh Dickins 		 * safely check if our swap entry is still in use (and here
6950142ef6cSHugh Dickins 		 * it can't have got reused for another page): if it's still
6960142ef6cSHugh Dickins 		 * in use, then the inode cannot have been freed yet, and we
6970142ef6cSHugh Dickins 		 * can safely proceed (if it's no longer in use, that tells
6980142ef6cSHugh Dickins 		 * nothing about the inode, but we don't need to unuse swap).
699bde05d1cSHugh Dickins 		 */
700bde05d1cSHugh Dickins 		if (!page_swapcount(*pagep))
701bde05d1cSHugh Dickins 			error = -ENOENT;
702bde05d1cSHugh Dickins 	}
703bde05d1cSHugh Dickins 
704d13d1443SKAMEZAWA Hiroyuki 	/*
705778dd893SHugh Dickins 	 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
706778dd893SHugh Dickins 	 * but also to hold up shmem_evict_inode(): so inode cannot be freed
707778dd893SHugh Dickins 	 * beneath us (pagelock doesn't help until the page is in pagecache).
708d13d1443SKAMEZAWA Hiroyuki 	 */
709bde05d1cSHugh Dickins 	if (!error)
710bde05d1cSHugh Dickins 		error = shmem_add_to_page_cache(*pagep, mapping, index,
71146f65ec1SHugh Dickins 						GFP_NOWAIT, radswap);
71248f170fbSHugh Dickins 	if (error != -ENOMEM) {
71346f65ec1SHugh Dickins 		/*
71446f65ec1SHugh Dickins 		 * Truncation and eviction use free_swap_and_cache(), which
71546f65ec1SHugh Dickins 		 * only does trylock page: if we raced, best clean up here.
71646f65ec1SHugh Dickins 		 */
717bde05d1cSHugh Dickins 		delete_from_swap_cache(*pagep);
718bde05d1cSHugh Dickins 		set_page_dirty(*pagep);
71946f65ec1SHugh Dickins 		if (!error) {
72046f65ec1SHugh Dickins 			spin_lock(&info->lock);
721285b2c4fSHugh Dickins 			info->swapped--;
72246f65ec1SHugh Dickins 			spin_unlock(&info->lock);
72341ffe5d5SHugh Dickins 			swap_free(swap);
72446f65ec1SHugh Dickins 		}
7252e0e26c7SHugh Dickins 		error = 1;	/* not an error, but entry was found */
7261da177e4SLinus Torvalds 	}
7272e0e26c7SHugh Dickins 	return error;
7281da177e4SLinus Torvalds }
7291da177e4SLinus Torvalds 
7301da177e4SLinus Torvalds /*
73146f65ec1SHugh Dickins  * Search through swapped inodes to find and replace swap by page.
7321da177e4SLinus Torvalds  */
73341ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page)
7341da177e4SLinus Torvalds {
73541ffe5d5SHugh Dickins 	struct list_head *this, *next;
7361da177e4SLinus Torvalds 	struct shmem_inode_info *info;
7371da177e4SLinus Torvalds 	int found = 0;
738bde05d1cSHugh Dickins 	int error = 0;
739bde05d1cSHugh Dickins 
740bde05d1cSHugh Dickins 	/*
741bde05d1cSHugh Dickins 	 * There's a faint possibility that swap page was replaced before
7420142ef6cSHugh Dickins 	 * caller locked it: caller will come back later with the right page.
743bde05d1cSHugh Dickins 	 */
7440142ef6cSHugh Dickins 	if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
745bde05d1cSHugh Dickins 		goto out;
746778dd893SHugh Dickins 
747778dd893SHugh Dickins 	/*
748778dd893SHugh Dickins 	 * Charge page using GFP_KERNEL while we can wait, before taking
749778dd893SHugh Dickins 	 * the shmem_swaplist_mutex which might hold up shmem_writepage().
750778dd893SHugh Dickins 	 * Charged back to the user (not to caller) when swap account is used.
751778dd893SHugh Dickins 	 */
752778dd893SHugh Dickins 	error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
753778dd893SHugh Dickins 	if (error)
754778dd893SHugh Dickins 		goto out;
75546f65ec1SHugh Dickins 	/* No radix_tree_preload: swap entry keeps a place for page in tree */
7561da177e4SLinus Torvalds 
757cb5f7b9aSHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
75841ffe5d5SHugh Dickins 	list_for_each_safe(this, next, &shmem_swaplist) {
75941ffe5d5SHugh Dickins 		info = list_entry(this, struct shmem_inode_info, swaplist);
760285b2c4fSHugh Dickins 		if (info->swapped)
761bde05d1cSHugh Dickins 			found = shmem_unuse_inode(info, swap, &page);
7626922c0c7SHugh Dickins 		else
7636922c0c7SHugh Dickins 			list_del_init(&info->swaplist);
764cb5f7b9aSHugh Dickins 		cond_resched();
7652e0e26c7SHugh Dickins 		if (found)
766778dd893SHugh Dickins 			break;
7671da177e4SLinus Torvalds 	}
768cb5f7b9aSHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
769778dd893SHugh Dickins 
770778dd893SHugh Dickins 	if (found < 0)
771778dd893SHugh Dickins 		error = found;
772778dd893SHugh Dickins out:
773aaa46865SHugh Dickins 	unlock_page(page);
774aaa46865SHugh Dickins 	page_cache_release(page);
775778dd893SHugh Dickins 	return error;
7761da177e4SLinus Torvalds }
7771da177e4SLinus Torvalds 
7781da177e4SLinus Torvalds /*
7791da177e4SLinus Torvalds  * Move the page from the page cache to the swap cache.
7801da177e4SLinus Torvalds  */
7811da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc)
7821da177e4SLinus Torvalds {
7831da177e4SLinus Torvalds 	struct shmem_inode_info *info;
7841da177e4SLinus Torvalds 	struct address_space *mapping;
7851da177e4SLinus Torvalds 	struct inode *inode;
7866922c0c7SHugh Dickins 	swp_entry_t swap;
7876922c0c7SHugh Dickins 	pgoff_t index;
7881da177e4SLinus Torvalds 
7891da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
7901da177e4SLinus Torvalds 	mapping = page->mapping;
7911da177e4SLinus Torvalds 	index = page->index;
7921da177e4SLinus Torvalds 	inode = mapping->host;
7931da177e4SLinus Torvalds 	info = SHMEM_I(inode);
7941da177e4SLinus Torvalds 	if (info->flags & VM_LOCKED)
7951da177e4SLinus Torvalds 		goto redirty;
796d9fe526aSHugh Dickins 	if (!total_swap_pages)
7971da177e4SLinus Torvalds 		goto redirty;
7981da177e4SLinus Torvalds 
799d9fe526aSHugh Dickins 	/*
800d9fe526aSHugh Dickins 	 * shmem_backing_dev_info's capabilities prevent regular writeback or
801d9fe526aSHugh Dickins 	 * sync from ever calling shmem_writepage; but a stacking filesystem
80248f170fbSHugh Dickins 	 * might use ->writepage of its underlying filesystem, in which case
803d9fe526aSHugh Dickins 	 * tmpfs should write out to swap only in response to memory pressure,
80448f170fbSHugh Dickins 	 * and not for the writeback threads or sync.
805d9fe526aSHugh Dickins 	 */
80648f170fbSHugh Dickins 	if (!wbc->for_reclaim) {
80748f170fbSHugh Dickins 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
80848f170fbSHugh Dickins 		goto redirty;
80948f170fbSHugh Dickins 	}
8101635f6a7SHugh Dickins 
8111635f6a7SHugh Dickins 	/*
8121635f6a7SHugh Dickins 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
8131635f6a7SHugh Dickins 	 * value into swapfile.c, the only way we can correctly account for a
8141635f6a7SHugh Dickins 	 * fallocated page arriving here is now to initialize it and write it.
8151aac1400SHugh Dickins 	 *
8161aac1400SHugh Dickins 	 * That's okay for a page already fallocated earlier, but if we have
8171aac1400SHugh Dickins 	 * not yet completed the fallocation, then (a) we want to keep track
8181aac1400SHugh Dickins 	 * of this page in case we have to undo it, and (b) it may not be a
8191aac1400SHugh Dickins 	 * good idea to continue anyway, once we're pushing into swap.  So
8201aac1400SHugh Dickins 	 * reactivate the page, and let shmem_fallocate() quit when too many.
8211635f6a7SHugh Dickins 	 */
8221635f6a7SHugh Dickins 	if (!PageUptodate(page)) {
8231aac1400SHugh Dickins 		if (inode->i_private) {
8241aac1400SHugh Dickins 			struct shmem_falloc *shmem_falloc;
8251aac1400SHugh Dickins 			spin_lock(&inode->i_lock);
8261aac1400SHugh Dickins 			shmem_falloc = inode->i_private;
8271aac1400SHugh Dickins 			if (shmem_falloc &&
8281aac1400SHugh Dickins 			    index >= shmem_falloc->start &&
8291aac1400SHugh Dickins 			    index < shmem_falloc->next)
8301aac1400SHugh Dickins 				shmem_falloc->nr_unswapped++;
8311aac1400SHugh Dickins 			else
8321aac1400SHugh Dickins 				shmem_falloc = NULL;
8331aac1400SHugh Dickins 			spin_unlock(&inode->i_lock);
8341aac1400SHugh Dickins 			if (shmem_falloc)
8351aac1400SHugh Dickins 				goto redirty;
8361aac1400SHugh Dickins 		}
8371635f6a7SHugh Dickins 		clear_highpage(page);
8381635f6a7SHugh Dickins 		flush_dcache_page(page);
8391635f6a7SHugh Dickins 		SetPageUptodate(page);
8401635f6a7SHugh Dickins 	}
8411635f6a7SHugh Dickins 
842d9fe526aSHugh Dickins 	swap = get_swap_page();
84348f170fbSHugh Dickins 	if (!swap.val)
84448f170fbSHugh Dickins 		goto redirty;
845d9fe526aSHugh Dickins 
846b1dea800SHugh Dickins 	/*
847b1dea800SHugh Dickins 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
8486922c0c7SHugh Dickins 	 * if it's not already there.  Do it now before the page is
8496922c0c7SHugh Dickins 	 * moved to swap cache, when its pagelock no longer protects
850b1dea800SHugh Dickins 	 * the inode from eviction.  But don't unlock the mutex until
8516922c0c7SHugh Dickins 	 * we've incremented swapped, because shmem_unuse_inode() will
8526922c0c7SHugh Dickins 	 * prune a !swapped inode from the swaplist under this mutex.
853b1dea800SHugh Dickins 	 */
854b1dea800SHugh Dickins 	mutex_lock(&shmem_swaplist_mutex);
85505bf86b4SHugh Dickins 	if (list_empty(&info->swaplist))
85605bf86b4SHugh Dickins 		list_add_tail(&info->swaplist, &shmem_swaplist);
857b1dea800SHugh Dickins 
85848f170fbSHugh Dickins 	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
859aaa46865SHugh Dickins 		swap_shmem_alloc(swap);
8606922c0c7SHugh Dickins 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
8616922c0c7SHugh Dickins 
8626922c0c7SHugh Dickins 		spin_lock(&info->lock);
8636922c0c7SHugh Dickins 		info->swapped++;
8646922c0c7SHugh Dickins 		shmem_recalc_inode(inode);
865826267cfSHugh Dickins 		spin_unlock(&info->lock);
8666922c0c7SHugh Dickins 
8676922c0c7SHugh Dickins 		mutex_unlock(&shmem_swaplist_mutex);
868d9fe526aSHugh Dickins 		BUG_ON(page_mapped(page));
8699fab5619SHugh Dickins 		swap_writepage(page, wbc);
8701da177e4SLinus Torvalds 		return 0;
8711da177e4SLinus Torvalds 	}
8721da177e4SLinus Torvalds 
8736922c0c7SHugh Dickins 	mutex_unlock(&shmem_swaplist_mutex);
874cb4b86baSKAMEZAWA Hiroyuki 	swapcache_free(swap, NULL);
8751da177e4SLinus Torvalds redirty:
8761da177e4SLinus Torvalds 	set_page_dirty(page);
877d9fe526aSHugh Dickins 	if (wbc->for_reclaim)
878d9fe526aSHugh Dickins 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
879d9fe526aSHugh Dickins 	unlock_page(page);
880d9fe526aSHugh Dickins 	return 0;
8811da177e4SLinus Torvalds }
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds #ifdef CONFIG_NUMA
884680d794bSakpm@linux-foundation.org #ifdef CONFIG_TMPFS
88571fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
886680d794bSakpm@linux-foundation.org {
887680d794bSakpm@linux-foundation.org 	char buffer[64];
888680d794bSakpm@linux-foundation.org 
88971fe804bSLee Schermerhorn 	if (!mpol || mpol->mode == MPOL_DEFAULT)
890095f1fc4SLee Schermerhorn 		return;		/* show nothing */
891095f1fc4SLee Schermerhorn 
89271fe804bSLee Schermerhorn 	mpol_to_str(buffer, sizeof(buffer), mpol, 1);
893095f1fc4SLee Schermerhorn 
894095f1fc4SLee Schermerhorn 	seq_printf(seq, ",mpol=%s", buffer);
895680d794bSakpm@linux-foundation.org }
89671fe804bSLee Schermerhorn 
89771fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
89871fe804bSLee Schermerhorn {
89971fe804bSLee Schermerhorn 	struct mempolicy *mpol = NULL;
90071fe804bSLee Schermerhorn 	if (sbinfo->mpol) {
90171fe804bSLee Schermerhorn 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
90271fe804bSLee Schermerhorn 		mpol = sbinfo->mpol;
90371fe804bSLee Schermerhorn 		mpol_get(mpol);
90471fe804bSLee Schermerhorn 		spin_unlock(&sbinfo->stat_lock);
90571fe804bSLee Schermerhorn 	}
90671fe804bSLee Schermerhorn 	return mpol;
90771fe804bSLee Schermerhorn }
908680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
909680d794bSakpm@linux-foundation.org 
91041ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
91141ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
9121da177e4SLinus Torvalds {
91352cd3b07SLee Schermerhorn 	struct mempolicy mpol, *spol;
9141da177e4SLinus Torvalds 	struct vm_area_struct pvma;
9151da177e4SLinus Torvalds 
91652cd3b07SLee Schermerhorn 	spol = mpol_cond_copy(&mpol,
91741ffe5d5SHugh Dickins 			mpol_shared_policy_lookup(&info->policy, index));
91852cd3b07SLee Schermerhorn 
9191da177e4SLinus Torvalds 	/* Create a pseudo vma that just contains the policy */
920c4cc6d07SHugh Dickins 	pvma.vm_start = 0;
92109c231cbSNathan Zimmer 	/* Bias interleave by inode number to distribute better across nodes */
92209c231cbSNathan Zimmer 	pvma.vm_pgoff = index + info->vfs_inode.i_ino;
923c4cc6d07SHugh Dickins 	pvma.vm_ops = NULL;
92452cd3b07SLee Schermerhorn 	pvma.vm_policy = spol;
92541ffe5d5SHugh Dickins 	return swapin_readahead(swap, gfp, &pvma, 0);
9261da177e4SLinus Torvalds }
9271da177e4SLinus Torvalds 
92802098feaSHugh Dickins static struct page *shmem_alloc_page(gfp_t gfp,
92941ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
9301da177e4SLinus Torvalds {
9311da177e4SLinus Torvalds 	struct vm_area_struct pvma;
9321da177e4SLinus Torvalds 
933c4cc6d07SHugh Dickins 	/* Create a pseudo vma that just contains the policy */
934c4cc6d07SHugh Dickins 	pvma.vm_start = 0;
93509c231cbSNathan Zimmer 	/* Bias interleave by inode number to distribute better across nodes */
93609c231cbSNathan Zimmer 	pvma.vm_pgoff = index + info->vfs_inode.i_ino;
937c4cc6d07SHugh Dickins 	pvma.vm_ops = NULL;
93841ffe5d5SHugh Dickins 	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
93952cd3b07SLee Schermerhorn 
94052cd3b07SLee Schermerhorn 	/*
94152cd3b07SLee Schermerhorn 	 * alloc_page_vma() will drop the shared policy reference
94252cd3b07SLee Schermerhorn 	 */
94352cd3b07SLee Schermerhorn 	return alloc_page_vma(gfp, &pvma, 0);
9441da177e4SLinus Torvalds }
945680d794bSakpm@linux-foundation.org #else /* !CONFIG_NUMA */
946680d794bSakpm@linux-foundation.org #ifdef CONFIG_TMPFS
94741ffe5d5SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
948680d794bSakpm@linux-foundation.org {
949680d794bSakpm@linux-foundation.org }
950680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
951680d794bSakpm@linux-foundation.org 
95241ffe5d5SHugh Dickins static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
95341ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
9541da177e4SLinus Torvalds {
95541ffe5d5SHugh Dickins 	return swapin_readahead(swap, gfp, NULL, 0);
9561da177e4SLinus Torvalds }
9571da177e4SLinus Torvalds 
95802098feaSHugh Dickins static inline struct page *shmem_alloc_page(gfp_t gfp,
95941ffe5d5SHugh Dickins 			struct shmem_inode_info *info, pgoff_t index)
9601da177e4SLinus Torvalds {
961e84e2e13SHugh Dickins 	return alloc_page(gfp);
9621da177e4SLinus Torvalds }
963680d794bSakpm@linux-foundation.org #endif /* CONFIG_NUMA */
9641da177e4SLinus Torvalds 
96571fe804bSLee Schermerhorn #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
96671fe804bSLee Schermerhorn static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
96771fe804bSLee Schermerhorn {
96871fe804bSLee Schermerhorn 	return NULL;
96971fe804bSLee Schermerhorn }
97071fe804bSLee Schermerhorn #endif
97171fe804bSLee Schermerhorn 
9721da177e4SLinus Torvalds /*
973bde05d1cSHugh Dickins  * When a page is moved from swapcache to shmem filecache (either by the
974bde05d1cSHugh Dickins  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
975bde05d1cSHugh Dickins  * shmem_unuse_inode()), it may have been read in earlier from swap, in
976bde05d1cSHugh Dickins  * ignorance of the mapping it belongs to.  If that mapping has special
977bde05d1cSHugh Dickins  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
978bde05d1cSHugh Dickins  * we may need to copy to a suitable page before moving to filecache.
979bde05d1cSHugh Dickins  *
980bde05d1cSHugh Dickins  * In a future release, this may well be extended to respect cpuset and
981bde05d1cSHugh Dickins  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
982bde05d1cSHugh Dickins  * but for now it is a simple matter of zone.
983bde05d1cSHugh Dickins  */
984bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
985bde05d1cSHugh Dickins {
986bde05d1cSHugh Dickins 	return page_zonenum(page) > gfp_zone(gfp);
987bde05d1cSHugh Dickins }
988bde05d1cSHugh Dickins 
989bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp,
990bde05d1cSHugh Dickins 				struct shmem_inode_info *info, pgoff_t index)
991bde05d1cSHugh Dickins {
992bde05d1cSHugh Dickins 	struct page *oldpage, *newpage;
993bde05d1cSHugh Dickins 	struct address_space *swap_mapping;
994bde05d1cSHugh Dickins 	pgoff_t swap_index;
995bde05d1cSHugh Dickins 	int error;
996bde05d1cSHugh Dickins 
997bde05d1cSHugh Dickins 	oldpage = *pagep;
998bde05d1cSHugh Dickins 	swap_index = page_private(oldpage);
999bde05d1cSHugh Dickins 	swap_mapping = page_mapping(oldpage);
1000bde05d1cSHugh Dickins 
1001bde05d1cSHugh Dickins 	/*
1002bde05d1cSHugh Dickins 	 * We have arrived here because our zones are constrained, so don't
1003bde05d1cSHugh Dickins 	 * limit chance of success by further cpuset and node constraints.
1004bde05d1cSHugh Dickins 	 */
1005bde05d1cSHugh Dickins 	gfp &= ~GFP_CONSTRAINT_MASK;
1006bde05d1cSHugh Dickins 	newpage = shmem_alloc_page(gfp, info, index);
1007bde05d1cSHugh Dickins 	if (!newpage)
1008bde05d1cSHugh Dickins 		return -ENOMEM;
1009bde05d1cSHugh Dickins 
1010bde05d1cSHugh Dickins 	page_cache_get(newpage);
1011bde05d1cSHugh Dickins 	copy_highpage(newpage, oldpage);
10120142ef6cSHugh Dickins 	flush_dcache_page(newpage);
1013bde05d1cSHugh Dickins 
1014bde05d1cSHugh Dickins 	__set_page_locked(newpage);
1015bde05d1cSHugh Dickins 	SetPageUptodate(newpage);
1016bde05d1cSHugh Dickins 	SetPageSwapBacked(newpage);
1017bde05d1cSHugh Dickins 	set_page_private(newpage, swap_index);
1018bde05d1cSHugh Dickins 	SetPageSwapCache(newpage);
1019bde05d1cSHugh Dickins 
1020bde05d1cSHugh Dickins 	/*
1021bde05d1cSHugh Dickins 	 * Our caller will very soon move newpage out of swapcache, but it's
1022bde05d1cSHugh Dickins 	 * a nice clean interface for us to replace oldpage by newpage there.
1023bde05d1cSHugh Dickins 	 */
1024bde05d1cSHugh Dickins 	spin_lock_irq(&swap_mapping->tree_lock);
1025bde05d1cSHugh Dickins 	error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1026bde05d1cSHugh Dickins 								   newpage);
10270142ef6cSHugh Dickins 	if (!error) {
1028bde05d1cSHugh Dickins 		__inc_zone_page_state(newpage, NR_FILE_PAGES);
1029bde05d1cSHugh Dickins 		__dec_zone_page_state(oldpage, NR_FILE_PAGES);
10300142ef6cSHugh Dickins 	}
1031bde05d1cSHugh Dickins 	spin_unlock_irq(&swap_mapping->tree_lock);
1032bde05d1cSHugh Dickins 
10330142ef6cSHugh Dickins 	if (unlikely(error)) {
10340142ef6cSHugh Dickins 		/*
10350142ef6cSHugh Dickins 		 * Is this possible?  I think not, now that our callers check
10360142ef6cSHugh Dickins 		 * both PageSwapCache and page_private after getting page lock;
10370142ef6cSHugh Dickins 		 * but be defensive.  Reverse old to newpage for clear and free.
10380142ef6cSHugh Dickins 		 */
10390142ef6cSHugh Dickins 		oldpage = newpage;
10400142ef6cSHugh Dickins 	} else {
1041bde05d1cSHugh Dickins 		mem_cgroup_replace_page_cache(oldpage, newpage);
1042bde05d1cSHugh Dickins 		lru_cache_add_anon(newpage);
10430142ef6cSHugh Dickins 		*pagep = newpage;
10440142ef6cSHugh Dickins 	}
1045bde05d1cSHugh Dickins 
1046bde05d1cSHugh Dickins 	ClearPageSwapCache(oldpage);
1047bde05d1cSHugh Dickins 	set_page_private(oldpage, 0);
1048bde05d1cSHugh Dickins 
1049bde05d1cSHugh Dickins 	unlock_page(oldpage);
1050bde05d1cSHugh Dickins 	page_cache_release(oldpage);
1051bde05d1cSHugh Dickins 	page_cache_release(oldpage);
10520142ef6cSHugh Dickins 	return error;
1053bde05d1cSHugh Dickins }
1054bde05d1cSHugh Dickins 
1055bde05d1cSHugh Dickins /*
105668da9f05SHugh Dickins  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
10571da177e4SLinus Torvalds  *
10581da177e4SLinus Torvalds  * If we allocate a new one we do not mark it dirty. That's up to the
10591da177e4SLinus Torvalds  * vm. If we swap it in we mark it dirty since we also free the swap
10601da177e4SLinus Torvalds  * entry since a page cannot live in both the swap and page cache
10611da177e4SLinus Torvalds  */
106241ffe5d5SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
106368da9f05SHugh Dickins 	struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
10641da177e4SLinus Torvalds {
10651da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
106654af6042SHugh Dickins 	struct shmem_inode_info *info;
10671da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo;
106827ab7006SHugh Dickins 	struct page *page;
10691da177e4SLinus Torvalds 	swp_entry_t swap;
10701da177e4SLinus Torvalds 	int error;
107154af6042SHugh Dickins 	int once = 0;
10721635f6a7SHugh Dickins 	int alloced = 0;
10731da177e4SLinus Torvalds 
107441ffe5d5SHugh Dickins 	if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
10751da177e4SLinus Torvalds 		return -EFBIG;
10761da177e4SLinus Torvalds repeat:
107754af6042SHugh Dickins 	swap.val = 0;
107841ffe5d5SHugh Dickins 	page = find_lock_page(mapping, index);
107954af6042SHugh Dickins 	if (radix_tree_exceptional_entry(page)) {
108054af6042SHugh Dickins 		swap = radix_to_swp_entry(page);
108154af6042SHugh Dickins 		page = NULL;
108254af6042SHugh Dickins 	}
108354af6042SHugh Dickins 
10841635f6a7SHugh Dickins 	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
108554af6042SHugh Dickins 	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
108654af6042SHugh Dickins 		error = -EINVAL;
108754af6042SHugh Dickins 		goto failed;
108854af6042SHugh Dickins 	}
108954af6042SHugh Dickins 
10901635f6a7SHugh Dickins 	/* fallocated page? */
10911635f6a7SHugh Dickins 	if (page && !PageUptodate(page)) {
10921635f6a7SHugh Dickins 		if (sgp != SGP_READ)
10931635f6a7SHugh Dickins 			goto clear;
10941635f6a7SHugh Dickins 		unlock_page(page);
10951635f6a7SHugh Dickins 		page_cache_release(page);
10961635f6a7SHugh Dickins 		page = NULL;
10971635f6a7SHugh Dickins 	}
109854af6042SHugh Dickins 	if (page || (sgp == SGP_READ && !swap.val)) {
109954af6042SHugh Dickins 		*pagep = page;
110054af6042SHugh Dickins 		return 0;
110127ab7006SHugh Dickins 	}
110227ab7006SHugh Dickins 
1103b409f9fcSHugh Dickins 	/*
110454af6042SHugh Dickins 	 * Fast cache lookup did not find it:
110554af6042SHugh Dickins 	 * bring it back from swap or allocate.
1106b409f9fcSHugh Dickins 	 */
110754af6042SHugh Dickins 	info = SHMEM_I(inode);
110854af6042SHugh Dickins 	sbinfo = SHMEM_SB(inode->i_sb);
110927ab7006SHugh Dickins 
11101da177e4SLinus Torvalds 	if (swap.val) {
11111da177e4SLinus Torvalds 		/* Look it up and read it in.. */
111227ab7006SHugh Dickins 		page = lookup_swap_cache(swap);
111327ab7006SHugh Dickins 		if (!page) {
1114456f998eSYing Han 			/* here we actually do the io */
111568da9f05SHugh Dickins 			if (fault_type)
111668da9f05SHugh Dickins 				*fault_type |= VM_FAULT_MAJOR;
111741ffe5d5SHugh Dickins 			page = shmem_swapin(swap, gfp, info, index);
111827ab7006SHugh Dickins 			if (!page) {
11191da177e4SLinus Torvalds 				error = -ENOMEM;
112054af6042SHugh Dickins 				goto failed;
1121285b2c4fSHugh Dickins 			}
11221da177e4SLinus Torvalds 		}
11231da177e4SLinus Torvalds 
11241da177e4SLinus Torvalds 		/* We have to do this with page locked to prevent races */
112554af6042SHugh Dickins 		lock_page(page);
11260142ef6cSHugh Dickins 		if (!PageSwapCache(page) || page_private(page) != swap.val ||
1127d1899228SHugh Dickins 		    !shmem_confirm_swap(mapping, index, swap)) {
1128bde05d1cSHugh Dickins 			error = -EEXIST;	/* try again */
1129d1899228SHugh Dickins 			goto unlock;
1130bde05d1cSHugh Dickins 		}
113127ab7006SHugh Dickins 		if (!PageUptodate(page)) {
11321da177e4SLinus Torvalds 			error = -EIO;
113354af6042SHugh Dickins 			goto failed;
113454af6042SHugh Dickins 		}
113554af6042SHugh Dickins 		wait_on_page_writeback(page);
113654af6042SHugh Dickins 
1137bde05d1cSHugh Dickins 		if (shmem_should_replace_page(page, gfp)) {
1138bde05d1cSHugh Dickins 			error = shmem_replace_page(&page, gfp, info, index);
1139bde05d1cSHugh Dickins 			if (error)
114054af6042SHugh Dickins 				goto failed;
11411da177e4SLinus Torvalds 		}
11421da177e4SLinus Torvalds 
1143aa3b1895SHugh Dickins 		error = mem_cgroup_cache_charge(page, current->mm,
1144aa3b1895SHugh Dickins 						gfp & GFP_RECLAIM_MASK);
1145d1899228SHugh Dickins 		if (!error) {
114654af6042SHugh Dickins 			error = shmem_add_to_page_cache(page, mapping, index,
114754af6042SHugh Dickins 						gfp, swp_to_radix_entry(swap));
1148d1899228SHugh Dickins 			/* We already confirmed swap, and make no allocation */
1149d1899228SHugh Dickins 			VM_BUG_ON(error);
1150d1899228SHugh Dickins 		}
115154af6042SHugh Dickins 		if (error)
115254af6042SHugh Dickins 			goto failed;
115354af6042SHugh Dickins 
115454af6042SHugh Dickins 		spin_lock(&info->lock);
115554af6042SHugh Dickins 		info->swapped--;
115654af6042SHugh Dickins 		shmem_recalc_inode(inode);
11571da177e4SLinus Torvalds 		spin_unlock(&info->lock);
115827ab7006SHugh Dickins 
115927ab7006SHugh Dickins 		delete_from_swap_cache(page);
116027ab7006SHugh Dickins 		set_page_dirty(page);
116127ab7006SHugh Dickins 		swap_free(swap);
116227ab7006SHugh Dickins 
116354af6042SHugh Dickins 	} else {
116454af6042SHugh Dickins 		if (shmem_acct_block(info->flags)) {
116554af6042SHugh Dickins 			error = -ENOSPC;
116654af6042SHugh Dickins 			goto failed;
11671da177e4SLinus Torvalds 		}
11680edd73b3SHugh Dickins 		if (sbinfo->max_blocks) {
1169fc5da22aSHugh Dickins 			if (percpu_counter_compare(&sbinfo->used_blocks,
117054af6042SHugh Dickins 						sbinfo->max_blocks) >= 0) {
117154af6042SHugh Dickins 				error = -ENOSPC;
117254af6042SHugh Dickins 				goto unacct;
117354af6042SHugh Dickins 			}
11747e496299STim Chen 			percpu_counter_inc(&sbinfo->used_blocks);
117559a16eadSHugh Dickins 		}
11761da177e4SLinus Torvalds 
117754af6042SHugh Dickins 		page = shmem_alloc_page(gfp, info, index);
117854af6042SHugh Dickins 		if (!page) {
117954af6042SHugh Dickins 			error = -ENOMEM;
118054af6042SHugh Dickins 			goto decused;
118154af6042SHugh Dickins 		}
118254af6042SHugh Dickins 
118354af6042SHugh Dickins 		SetPageSwapBacked(page);
118454af6042SHugh Dickins 		__set_page_locked(page);
1185aa3b1895SHugh Dickins 		error = mem_cgroup_cache_charge(page, current->mm,
1186aa3b1895SHugh Dickins 						gfp & GFP_RECLAIM_MASK);
118754af6042SHugh Dickins 		if (error)
118854af6042SHugh Dickins 			goto decused;
1189b065b432SHugh Dickins 		error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
1190b065b432SHugh Dickins 		if (!error) {
1191b065b432SHugh Dickins 			error = shmem_add_to_page_cache(page, mapping, index,
1192b065b432SHugh Dickins 							gfp, NULL);
1193b065b432SHugh Dickins 			radix_tree_preload_end();
1194b065b432SHugh Dickins 		}
1195b065b432SHugh Dickins 		if (error) {
1196b065b432SHugh Dickins 			mem_cgroup_uncharge_cache_page(page);
1197b065b432SHugh Dickins 			goto decused;
1198b065b432SHugh Dickins 		}
119954af6042SHugh Dickins 		lru_cache_add_anon(page);
120054af6042SHugh Dickins 
120154af6042SHugh Dickins 		spin_lock(&info->lock);
12021da177e4SLinus Torvalds 		info->alloced++;
120354af6042SHugh Dickins 		inode->i_blocks += BLOCKS_PER_PAGE;
120454af6042SHugh Dickins 		shmem_recalc_inode(inode);
120559a16eadSHugh Dickins 		spin_unlock(&info->lock);
12061635f6a7SHugh Dickins 		alloced = true;
120754af6042SHugh Dickins 
1208ec9516fbSHugh Dickins 		/*
12091635f6a7SHugh Dickins 		 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
12101635f6a7SHugh Dickins 		 */
12111635f6a7SHugh Dickins 		if (sgp == SGP_FALLOC)
12121635f6a7SHugh Dickins 			sgp = SGP_WRITE;
12131635f6a7SHugh Dickins clear:
12141635f6a7SHugh Dickins 		/*
12151635f6a7SHugh Dickins 		 * Let SGP_WRITE caller clear ends if write does not fill page;
12161635f6a7SHugh Dickins 		 * but SGP_FALLOC on a page fallocated earlier must initialize
12171635f6a7SHugh Dickins 		 * it now, lest undo on failure cancel our earlier guarantee.
1218ec9516fbSHugh Dickins 		 */
1219ec9516fbSHugh Dickins 		if (sgp != SGP_WRITE) {
122027ab7006SHugh Dickins 			clear_highpage(page);
122127ab7006SHugh Dickins 			flush_dcache_page(page);
122227ab7006SHugh Dickins 			SetPageUptodate(page);
1223ec9516fbSHugh Dickins 		}
1224a0ee5ec5SHugh Dickins 		if (sgp == SGP_DIRTY)
122527ab7006SHugh Dickins 			set_page_dirty(page);
12261da177e4SLinus Torvalds 	}
1227bde05d1cSHugh Dickins 
122854af6042SHugh Dickins 	/* Perhaps the file has been truncated since we checked */
12291635f6a7SHugh Dickins 	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
123054af6042SHugh Dickins 	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
123154af6042SHugh Dickins 		error = -EINVAL;
12321635f6a7SHugh Dickins 		if (alloced)
123354af6042SHugh Dickins 			goto trunc;
12341635f6a7SHugh Dickins 		else
12351635f6a7SHugh Dickins 			goto failed;
1236ff36b801SShaohua Li 	}
123754af6042SHugh Dickins 	*pagep = page;
123854af6042SHugh Dickins 	return 0;
1239d00806b1SNick Piggin 
1240d0217ac0SNick Piggin 	/*
124154af6042SHugh Dickins 	 * Error recovery.
12421da177e4SLinus Torvalds 	 */
124354af6042SHugh Dickins trunc:
12441635f6a7SHugh Dickins 	info = SHMEM_I(inode);
124554af6042SHugh Dickins 	ClearPageDirty(page);
124654af6042SHugh Dickins 	delete_from_page_cache(page);
124754af6042SHugh Dickins 	spin_lock(&info->lock);
124854af6042SHugh Dickins 	info->alloced--;
124954af6042SHugh Dickins 	inode->i_blocks -= BLOCKS_PER_PAGE;
12501da177e4SLinus Torvalds 	spin_unlock(&info->lock);
125154af6042SHugh Dickins decused:
12521635f6a7SHugh Dickins 	sbinfo = SHMEM_SB(inode->i_sb);
125354af6042SHugh Dickins 	if (sbinfo->max_blocks)
125454af6042SHugh Dickins 		percpu_counter_add(&sbinfo->used_blocks, -1);
125554af6042SHugh Dickins unacct:
125654af6042SHugh Dickins 	shmem_unacct_blocks(info->flags, 1);
125754af6042SHugh Dickins failed:
1258d1899228SHugh Dickins 	if (swap.val && error != -EINVAL &&
1259d1899228SHugh Dickins 	    !shmem_confirm_swap(mapping, index, swap))
126054af6042SHugh Dickins 		error = -EEXIST;
1261d1899228SHugh Dickins unlock:
126227ab7006SHugh Dickins 	if (page) {
126354af6042SHugh Dickins 		unlock_page(page);
12641da177e4SLinus Torvalds 		page_cache_release(page);
126554af6042SHugh Dickins 	}
126654af6042SHugh Dickins 	if (error == -ENOSPC && !once++) {
126754af6042SHugh Dickins 		info = SHMEM_I(inode);
126854af6042SHugh Dickins 		spin_lock(&info->lock);
126954af6042SHugh Dickins 		shmem_recalc_inode(inode);
127054af6042SHugh Dickins 		spin_unlock(&info->lock);
12711da177e4SLinus Torvalds 		goto repeat;
1272d8dc74f2SAdrian Bunk 	}
1273d1899228SHugh Dickins 	if (error == -EEXIST)	/* from above or from radix_tree_insert */
127454af6042SHugh Dickins 		goto repeat;
127554af6042SHugh Dickins 	return error;
12761da177e4SLinus Torvalds }
12771da177e4SLinus Torvalds 
12781da177e4SLinus Torvalds static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12791da177e4SLinus Torvalds {
12801da177e4SLinus Torvalds 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
12811da177e4SLinus Torvalds 	int error;
128268da9f05SHugh Dickins 	int ret = VM_FAULT_LOCKED;
12831da177e4SLinus Torvalds 
12841da177e4SLinus Torvalds 	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
12851da177e4SLinus Torvalds 	if (error)
12861da177e4SLinus Torvalds 		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
128768da9f05SHugh Dickins 
1288456f998eSYing Han 	if (ret & VM_FAULT_MAJOR) {
1289456f998eSYing Han 		count_vm_event(PGMAJFAULT);
1290456f998eSYing Han 		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1291456f998eSYing Han 	}
129268da9f05SHugh Dickins 	return ret;
12931da177e4SLinus Torvalds }
12941da177e4SLinus Torvalds 
12951da177e4SLinus Torvalds #ifdef CONFIG_NUMA
129641ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
12971da177e4SLinus Torvalds {
129841ffe5d5SHugh Dickins 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
129941ffe5d5SHugh Dickins 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
13001da177e4SLinus Torvalds }
13011da177e4SLinus Torvalds 
1302d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1303d8dc74f2SAdrian Bunk 					  unsigned long addr)
13041da177e4SLinus Torvalds {
130541ffe5d5SHugh Dickins 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
130641ffe5d5SHugh Dickins 	pgoff_t index;
13071da177e4SLinus Torvalds 
130841ffe5d5SHugh Dickins 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
130941ffe5d5SHugh Dickins 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
13101da177e4SLinus Torvalds }
13111da177e4SLinus Torvalds #endif
13121da177e4SLinus Torvalds 
13131da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user)
13141da177e4SLinus Torvalds {
1315d3ac7f89SJosef "Jeff" Sipek 	struct inode *inode = file->f_path.dentry->d_inode;
13161da177e4SLinus Torvalds 	struct shmem_inode_info *info = SHMEM_I(inode);
13171da177e4SLinus Torvalds 	int retval = -ENOMEM;
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 	spin_lock(&info->lock);
13201da177e4SLinus Torvalds 	if (lock && !(info->flags & VM_LOCKED)) {
13211da177e4SLinus Torvalds 		if (!user_shm_lock(inode->i_size, user))
13221da177e4SLinus Torvalds 			goto out_nomem;
13231da177e4SLinus Torvalds 		info->flags |= VM_LOCKED;
132489e004eaSLee Schermerhorn 		mapping_set_unevictable(file->f_mapping);
13251da177e4SLinus Torvalds 	}
13261da177e4SLinus Torvalds 	if (!lock && (info->flags & VM_LOCKED) && user) {
13271da177e4SLinus Torvalds 		user_shm_unlock(inode->i_size, user);
13281da177e4SLinus Torvalds 		info->flags &= ~VM_LOCKED;
132989e004eaSLee Schermerhorn 		mapping_clear_unevictable(file->f_mapping);
13301da177e4SLinus Torvalds 	}
13311da177e4SLinus Torvalds 	retval = 0;
133289e004eaSLee Schermerhorn 
13331da177e4SLinus Torvalds out_nomem:
13341da177e4SLinus Torvalds 	spin_unlock(&info->lock);
13351da177e4SLinus Torvalds 	return retval;
13361da177e4SLinus Torvalds }
13371da177e4SLinus Torvalds 
13389b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
13391da177e4SLinus Torvalds {
13401da177e4SLinus Torvalds 	file_accessed(file);
13411da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
1342d0217ac0SNick Piggin 	vma->vm_flags |= VM_CAN_NONLINEAR;
13431da177e4SLinus Torvalds 	return 0;
13441da177e4SLinus Torvalds }
13451da177e4SLinus Torvalds 
1346454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
134709208d15SAl Viro 				     umode_t mode, dev_t dev, unsigned long flags)
13481da177e4SLinus Torvalds {
13491da177e4SLinus Torvalds 	struct inode *inode;
13501da177e4SLinus Torvalds 	struct shmem_inode_info *info;
13511da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
13521da177e4SLinus Torvalds 
13535b04c689SPavel Emelyanov 	if (shmem_reserve_inode(sb))
13541da177e4SLinus Torvalds 		return NULL;
13551da177e4SLinus Torvalds 
13561da177e4SLinus Torvalds 	inode = new_inode(sb);
13571da177e4SLinus Torvalds 	if (inode) {
135885fe4025SChristoph Hellwig 		inode->i_ino = get_next_ino();
1359454abafeSDmitry Monakhov 		inode_init_owner(inode, dir, mode);
13601da177e4SLinus Torvalds 		inode->i_blocks = 0;
13611da177e4SLinus Torvalds 		inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
13621da177e4SLinus Torvalds 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
136391828a40SDavid M. Grimes 		inode->i_generation = get_seconds();
13641da177e4SLinus Torvalds 		info = SHMEM_I(inode);
13651da177e4SLinus Torvalds 		memset(info, 0, (char *)inode - (char *)info);
13661da177e4SLinus Torvalds 		spin_lock_init(&info->lock);
13670b0a0806SHugh Dickins 		info->flags = flags & VM_NORESERVE;
13681da177e4SLinus Torvalds 		INIT_LIST_HEAD(&info->swaplist);
1369*38f38657SAristeu Rozanski 		simple_xattrs_init(&info->xattrs);
137072c04902SAl Viro 		cache_no_acl(inode);
13711da177e4SLinus Torvalds 
13721da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
13731da177e4SLinus Torvalds 		default:
137439f0247dSAndreas Gruenbacher 			inode->i_op = &shmem_special_inode_operations;
13751da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
13761da177e4SLinus Torvalds 			break;
13771da177e4SLinus Torvalds 		case S_IFREG:
137814fcc23fSHugh Dickins 			inode->i_mapping->a_ops = &shmem_aops;
13791da177e4SLinus Torvalds 			inode->i_op = &shmem_inode_operations;
13801da177e4SLinus Torvalds 			inode->i_fop = &shmem_file_operations;
138171fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy,
138271fe804bSLee Schermerhorn 						 shmem_get_sbmpol(sbinfo));
13831da177e4SLinus Torvalds 			break;
13841da177e4SLinus Torvalds 		case S_IFDIR:
1385d8c76e6fSDave Hansen 			inc_nlink(inode);
13861da177e4SLinus Torvalds 			/* Some things misbehave if size == 0 on a directory */
13871da177e4SLinus Torvalds 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
13881da177e4SLinus Torvalds 			inode->i_op = &shmem_dir_inode_operations;
13891da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
13901da177e4SLinus Torvalds 			break;
13911da177e4SLinus Torvalds 		case S_IFLNK:
13921da177e4SLinus Torvalds 			/*
13931da177e4SLinus Torvalds 			 * Must not load anything in the rbtree,
13941da177e4SLinus Torvalds 			 * mpol_free_shared_policy will not be called.
13951da177e4SLinus Torvalds 			 */
139671fe804bSLee Schermerhorn 			mpol_shared_policy_init(&info->policy, NULL);
13971da177e4SLinus Torvalds 			break;
13981da177e4SLinus Torvalds 		}
13995b04c689SPavel Emelyanov 	} else
14005b04c689SPavel Emelyanov 		shmem_free_inode(sb);
14011da177e4SLinus Torvalds 	return inode;
14021da177e4SLinus Torvalds }
14031da177e4SLinus Torvalds 
14041da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
140592e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations;
140669f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations;
14071da177e4SLinus Torvalds 
14086d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR
14096d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
14106d9d88d0SJarkko Sakkinen #else
14116d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL
14126d9d88d0SJarkko Sakkinen #endif
14136d9d88d0SJarkko Sakkinen 
14141da177e4SLinus Torvalds static int
1415800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping,
1416800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
1417800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
14181da177e4SLinus Torvalds {
1419800d15a5SNick Piggin 	struct inode *inode = mapping->host;
1420800d15a5SNick Piggin 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1421800d15a5SNick Piggin 	return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1422800d15a5SNick Piggin }
1423800d15a5SNick Piggin 
1424800d15a5SNick Piggin static int
1425800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping,
1426800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
1427800d15a5SNick Piggin 			struct page *page, void *fsdata)
1428800d15a5SNick Piggin {
1429800d15a5SNick Piggin 	struct inode *inode = mapping->host;
1430800d15a5SNick Piggin 
1431800d15a5SNick Piggin 	if (pos + copied > inode->i_size)
1432800d15a5SNick Piggin 		i_size_write(inode, pos + copied);
1433800d15a5SNick Piggin 
1434ec9516fbSHugh Dickins 	if (!PageUptodate(page)) {
1435ec9516fbSHugh Dickins 		if (copied < PAGE_CACHE_SIZE) {
1436ec9516fbSHugh Dickins 			unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1437ec9516fbSHugh Dickins 			zero_user_segments(page, 0, from,
1438ec9516fbSHugh Dickins 					from + copied, PAGE_CACHE_SIZE);
1439ec9516fbSHugh Dickins 		}
1440ec9516fbSHugh Dickins 		SetPageUptodate(page);
1441ec9516fbSHugh Dickins 	}
1442d3602444SHugh Dickins 	set_page_dirty(page);
14436746aff7SWu Fengguang 	unlock_page(page);
1444d3602444SHugh Dickins 	page_cache_release(page);
1445d3602444SHugh Dickins 
1446800d15a5SNick Piggin 	return copied;
14471da177e4SLinus Torvalds }
14481da177e4SLinus Torvalds 
14491da177e4SLinus Torvalds static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
14501da177e4SLinus Torvalds {
1451d3ac7f89SJosef "Jeff" Sipek 	struct inode *inode = filp->f_path.dentry->d_inode;
14521da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
145341ffe5d5SHugh Dickins 	pgoff_t index;
145441ffe5d5SHugh Dickins 	unsigned long offset;
1455a0ee5ec5SHugh Dickins 	enum sgp_type sgp = SGP_READ;
1456a0ee5ec5SHugh Dickins 
1457a0ee5ec5SHugh Dickins 	/*
1458a0ee5ec5SHugh Dickins 	 * Might this read be for a stacking filesystem?  Then when reading
1459a0ee5ec5SHugh Dickins 	 * holes of a sparse file, we actually need to allocate those pages,
1460a0ee5ec5SHugh Dickins 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1461a0ee5ec5SHugh Dickins 	 */
1462a0ee5ec5SHugh Dickins 	if (segment_eq(get_fs(), KERNEL_DS))
1463a0ee5ec5SHugh Dickins 		sgp = SGP_DIRTY;
14641da177e4SLinus Torvalds 
14651da177e4SLinus Torvalds 	index = *ppos >> PAGE_CACHE_SHIFT;
14661da177e4SLinus Torvalds 	offset = *ppos & ~PAGE_CACHE_MASK;
14671da177e4SLinus Torvalds 
14681da177e4SLinus Torvalds 	for (;;) {
14691da177e4SLinus Torvalds 		struct page *page = NULL;
147041ffe5d5SHugh Dickins 		pgoff_t end_index;
147141ffe5d5SHugh Dickins 		unsigned long nr, ret;
14721da177e4SLinus Torvalds 		loff_t i_size = i_size_read(inode);
14731da177e4SLinus Torvalds 
14741da177e4SLinus Torvalds 		end_index = i_size >> PAGE_CACHE_SHIFT;
14751da177e4SLinus Torvalds 		if (index > end_index)
14761da177e4SLinus Torvalds 			break;
14771da177e4SLinus Torvalds 		if (index == end_index) {
14781da177e4SLinus Torvalds 			nr = i_size & ~PAGE_CACHE_MASK;
14791da177e4SLinus Torvalds 			if (nr <= offset)
14801da177e4SLinus Torvalds 				break;
14811da177e4SLinus Torvalds 		}
14821da177e4SLinus Torvalds 
1483a0ee5ec5SHugh Dickins 		desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
14841da177e4SLinus Torvalds 		if (desc->error) {
14851da177e4SLinus Torvalds 			if (desc->error == -EINVAL)
14861da177e4SLinus Torvalds 				desc->error = 0;
14871da177e4SLinus Torvalds 			break;
14881da177e4SLinus Torvalds 		}
1489d3602444SHugh Dickins 		if (page)
1490d3602444SHugh Dickins 			unlock_page(page);
14911da177e4SLinus Torvalds 
14921da177e4SLinus Torvalds 		/*
14931da177e4SLinus Torvalds 		 * We must evaluate after, since reads (unlike writes)
14941b1dcc1bSJes Sorensen 		 * are called without i_mutex protection against truncate
14951da177e4SLinus Torvalds 		 */
14961da177e4SLinus Torvalds 		nr = PAGE_CACHE_SIZE;
14971da177e4SLinus Torvalds 		i_size = i_size_read(inode);
14981da177e4SLinus Torvalds 		end_index = i_size >> PAGE_CACHE_SHIFT;
14991da177e4SLinus Torvalds 		if (index == end_index) {
15001da177e4SLinus Torvalds 			nr = i_size & ~PAGE_CACHE_MASK;
15011da177e4SLinus Torvalds 			if (nr <= offset) {
15021da177e4SLinus Torvalds 				if (page)
15031da177e4SLinus Torvalds 					page_cache_release(page);
15041da177e4SLinus Torvalds 				break;
15051da177e4SLinus Torvalds 			}
15061da177e4SLinus Torvalds 		}
15071da177e4SLinus Torvalds 		nr -= offset;
15081da177e4SLinus Torvalds 
15091da177e4SLinus Torvalds 		if (page) {
15101da177e4SLinus Torvalds 			/*
15111da177e4SLinus Torvalds 			 * If users can be writing to this page using arbitrary
15121da177e4SLinus Torvalds 			 * virtual addresses, take care about potential aliasing
15131da177e4SLinus Torvalds 			 * before reading the page on the kernel side.
15141da177e4SLinus Torvalds 			 */
15151da177e4SLinus Torvalds 			if (mapping_writably_mapped(mapping))
15161da177e4SLinus Torvalds 				flush_dcache_page(page);
15171da177e4SLinus Torvalds 			/*
15181da177e4SLinus Torvalds 			 * Mark the page accessed if we read the beginning.
15191da177e4SLinus Torvalds 			 */
15201da177e4SLinus Torvalds 			if (!offset)
15211da177e4SLinus Torvalds 				mark_page_accessed(page);
1522b5810039SNick Piggin 		} else {
15231da177e4SLinus Torvalds 			page = ZERO_PAGE(0);
1524b5810039SNick Piggin 			page_cache_get(page);
1525b5810039SNick Piggin 		}
15261da177e4SLinus Torvalds 
15271da177e4SLinus Torvalds 		/*
15281da177e4SLinus Torvalds 		 * Ok, we have the page, and it's up-to-date, so
15291da177e4SLinus Torvalds 		 * now we can copy it to user space...
15301da177e4SLinus Torvalds 		 *
15311da177e4SLinus Torvalds 		 * The actor routine returns how many bytes were actually used..
15321da177e4SLinus Torvalds 		 * NOTE! This may not be the same as how much of a user buffer
15331da177e4SLinus Torvalds 		 * we filled up (we may be padding etc), so we can only update
15341da177e4SLinus Torvalds 		 * "pos" here (the actor routine has to update the user buffer
15351da177e4SLinus Torvalds 		 * pointers and the remaining count).
15361da177e4SLinus Torvalds 		 */
15371da177e4SLinus Torvalds 		ret = actor(desc, page, offset, nr);
15381da177e4SLinus Torvalds 		offset += ret;
15391da177e4SLinus Torvalds 		index += offset >> PAGE_CACHE_SHIFT;
15401da177e4SLinus Torvalds 		offset &= ~PAGE_CACHE_MASK;
15411da177e4SLinus Torvalds 
15421da177e4SLinus Torvalds 		page_cache_release(page);
15431da177e4SLinus Torvalds 		if (ret != nr || !desc->count)
15441da177e4SLinus Torvalds 			break;
15451da177e4SLinus Torvalds 
15461da177e4SLinus Torvalds 		cond_resched();
15471da177e4SLinus Torvalds 	}
15481da177e4SLinus Torvalds 
15491da177e4SLinus Torvalds 	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
15501da177e4SLinus Torvalds 	file_accessed(filp);
15511da177e4SLinus Torvalds }
15521da177e4SLinus Torvalds 
1553bcd78e49SHugh Dickins static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1554bcd78e49SHugh Dickins 		const struct iovec *iov, unsigned long nr_segs, loff_t pos)
15551da177e4SLinus Torvalds {
1556bcd78e49SHugh Dickins 	struct file *filp = iocb->ki_filp;
1557bcd78e49SHugh Dickins 	ssize_t retval;
1558bcd78e49SHugh Dickins 	unsigned long seg;
1559bcd78e49SHugh Dickins 	size_t count;
1560bcd78e49SHugh Dickins 	loff_t *ppos = &iocb->ki_pos;
1561bcd78e49SHugh Dickins 
1562bcd78e49SHugh Dickins 	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1563bcd78e49SHugh Dickins 	if (retval)
1564bcd78e49SHugh Dickins 		return retval;
1565bcd78e49SHugh Dickins 
1566bcd78e49SHugh Dickins 	for (seg = 0; seg < nr_segs; seg++) {
15671da177e4SLinus Torvalds 		read_descriptor_t desc;
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds 		desc.written = 0;
1570bcd78e49SHugh Dickins 		desc.arg.buf = iov[seg].iov_base;
1571bcd78e49SHugh Dickins 		desc.count = iov[seg].iov_len;
1572bcd78e49SHugh Dickins 		if (desc.count == 0)
1573bcd78e49SHugh Dickins 			continue;
15741da177e4SLinus Torvalds 		desc.error = 0;
15751da177e4SLinus Torvalds 		do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1576bcd78e49SHugh Dickins 		retval += desc.written;
1577bcd78e49SHugh Dickins 		if (desc.error) {
1578bcd78e49SHugh Dickins 			retval = retval ?: desc.error;
1579bcd78e49SHugh Dickins 			break;
1580bcd78e49SHugh Dickins 		}
1581bcd78e49SHugh Dickins 		if (desc.count > 0)
1582bcd78e49SHugh Dickins 			break;
1583bcd78e49SHugh Dickins 	}
1584bcd78e49SHugh Dickins 	return retval;
15851da177e4SLinus Torvalds }
15861da177e4SLinus Torvalds 
1587708e3508SHugh Dickins static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1588708e3508SHugh Dickins 				struct pipe_inode_info *pipe, size_t len,
1589708e3508SHugh Dickins 				unsigned int flags)
1590708e3508SHugh Dickins {
1591708e3508SHugh Dickins 	struct address_space *mapping = in->f_mapping;
159271f0e07aSHugh Dickins 	struct inode *inode = mapping->host;
1593708e3508SHugh Dickins 	unsigned int loff, nr_pages, req_pages;
1594708e3508SHugh Dickins 	struct page *pages[PIPE_DEF_BUFFERS];
1595708e3508SHugh Dickins 	struct partial_page partial[PIPE_DEF_BUFFERS];
1596708e3508SHugh Dickins 	struct page *page;
1597708e3508SHugh Dickins 	pgoff_t index, end_index;
1598708e3508SHugh Dickins 	loff_t isize, left;
1599708e3508SHugh Dickins 	int error, page_nr;
1600708e3508SHugh Dickins 	struct splice_pipe_desc spd = {
1601708e3508SHugh Dickins 		.pages = pages,
1602708e3508SHugh Dickins 		.partial = partial,
1603047fe360SEric Dumazet 		.nr_pages_max = PIPE_DEF_BUFFERS,
1604708e3508SHugh Dickins 		.flags = flags,
1605708e3508SHugh Dickins 		.ops = &page_cache_pipe_buf_ops,
1606708e3508SHugh Dickins 		.spd_release = spd_release_page,
1607708e3508SHugh Dickins 	};
1608708e3508SHugh Dickins 
160971f0e07aSHugh Dickins 	isize = i_size_read(inode);
1610708e3508SHugh Dickins 	if (unlikely(*ppos >= isize))
1611708e3508SHugh Dickins 		return 0;
1612708e3508SHugh Dickins 
1613708e3508SHugh Dickins 	left = isize - *ppos;
1614708e3508SHugh Dickins 	if (unlikely(left < len))
1615708e3508SHugh Dickins 		len = left;
1616708e3508SHugh Dickins 
1617708e3508SHugh Dickins 	if (splice_grow_spd(pipe, &spd))
1618708e3508SHugh Dickins 		return -ENOMEM;
1619708e3508SHugh Dickins 
1620708e3508SHugh Dickins 	index = *ppos >> PAGE_CACHE_SHIFT;
1621708e3508SHugh Dickins 	loff = *ppos & ~PAGE_CACHE_MASK;
1622708e3508SHugh Dickins 	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1623708e3508SHugh Dickins 	nr_pages = min(req_pages, pipe->buffers);
1624708e3508SHugh Dickins 
1625708e3508SHugh Dickins 	spd.nr_pages = find_get_pages_contig(mapping, index,
1626708e3508SHugh Dickins 						nr_pages, spd.pages);
1627708e3508SHugh Dickins 	index += spd.nr_pages;
1628708e3508SHugh Dickins 	error = 0;
162971f0e07aSHugh Dickins 
1630708e3508SHugh Dickins 	while (spd.nr_pages < nr_pages) {
163171f0e07aSHugh Dickins 		error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
163271f0e07aSHugh Dickins 		if (error)
1633708e3508SHugh Dickins 			break;
1634708e3508SHugh Dickins 		unlock_page(page);
1635708e3508SHugh Dickins 		spd.pages[spd.nr_pages++] = page;
1636708e3508SHugh Dickins 		index++;
1637708e3508SHugh Dickins 	}
1638708e3508SHugh Dickins 
1639708e3508SHugh Dickins 	index = *ppos >> PAGE_CACHE_SHIFT;
1640708e3508SHugh Dickins 	nr_pages = spd.nr_pages;
1641708e3508SHugh Dickins 	spd.nr_pages = 0;
164271f0e07aSHugh Dickins 
1643708e3508SHugh Dickins 	for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1644708e3508SHugh Dickins 		unsigned int this_len;
1645708e3508SHugh Dickins 
1646708e3508SHugh Dickins 		if (!len)
1647708e3508SHugh Dickins 			break;
1648708e3508SHugh Dickins 
1649708e3508SHugh Dickins 		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1650708e3508SHugh Dickins 		page = spd.pages[page_nr];
1651708e3508SHugh Dickins 
165271f0e07aSHugh Dickins 		if (!PageUptodate(page) || page->mapping != mapping) {
165371f0e07aSHugh Dickins 			error = shmem_getpage(inode, index, &page,
165471f0e07aSHugh Dickins 							SGP_CACHE, NULL);
165571f0e07aSHugh Dickins 			if (error)
1656708e3508SHugh Dickins 				break;
165771f0e07aSHugh Dickins 			unlock_page(page);
1658708e3508SHugh Dickins 			page_cache_release(spd.pages[page_nr]);
1659708e3508SHugh Dickins 			spd.pages[page_nr] = page;
1660708e3508SHugh Dickins 		}
1661708e3508SHugh Dickins 
166271f0e07aSHugh Dickins 		isize = i_size_read(inode);
1663708e3508SHugh Dickins 		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1664708e3508SHugh Dickins 		if (unlikely(!isize || index > end_index))
1665708e3508SHugh Dickins 			break;
1666708e3508SHugh Dickins 
1667708e3508SHugh Dickins 		if (end_index == index) {
1668708e3508SHugh Dickins 			unsigned int plen;
1669708e3508SHugh Dickins 
1670708e3508SHugh Dickins 			plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1671708e3508SHugh Dickins 			if (plen <= loff)
1672708e3508SHugh Dickins 				break;
1673708e3508SHugh Dickins 
1674708e3508SHugh Dickins 			this_len = min(this_len, plen - loff);
1675708e3508SHugh Dickins 			len = this_len;
1676708e3508SHugh Dickins 		}
1677708e3508SHugh Dickins 
1678708e3508SHugh Dickins 		spd.partial[page_nr].offset = loff;
1679708e3508SHugh Dickins 		spd.partial[page_nr].len = this_len;
1680708e3508SHugh Dickins 		len -= this_len;
1681708e3508SHugh Dickins 		loff = 0;
1682708e3508SHugh Dickins 		spd.nr_pages++;
1683708e3508SHugh Dickins 		index++;
1684708e3508SHugh Dickins 	}
1685708e3508SHugh Dickins 
1686708e3508SHugh Dickins 	while (page_nr < nr_pages)
1687708e3508SHugh Dickins 		page_cache_release(spd.pages[page_nr++]);
1688708e3508SHugh Dickins 
1689708e3508SHugh Dickins 	if (spd.nr_pages)
1690708e3508SHugh Dickins 		error = splice_to_pipe(pipe, &spd);
1691708e3508SHugh Dickins 
1692047fe360SEric Dumazet 	splice_shrink_spd(&spd);
1693708e3508SHugh Dickins 
1694708e3508SHugh Dickins 	if (error > 0) {
1695708e3508SHugh Dickins 		*ppos += error;
1696708e3508SHugh Dickins 		file_accessed(in);
1697708e3508SHugh Dickins 	}
1698708e3508SHugh Dickins 	return error;
1699708e3508SHugh Dickins }
1700708e3508SHugh Dickins 
170183e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset,
170283e4fa9cSHugh Dickins 							 loff_t len)
170383e4fa9cSHugh Dickins {
170483e4fa9cSHugh Dickins 	struct inode *inode = file->f_path.dentry->d_inode;
1705e2d12e22SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
17061aac1400SHugh Dickins 	struct shmem_falloc shmem_falloc;
1707e2d12e22SHugh Dickins 	pgoff_t start, index, end;
1708e2d12e22SHugh Dickins 	int error;
170983e4fa9cSHugh Dickins 
171083e4fa9cSHugh Dickins 	mutex_lock(&inode->i_mutex);
171183e4fa9cSHugh Dickins 
171283e4fa9cSHugh Dickins 	if (mode & FALLOC_FL_PUNCH_HOLE) {
171383e4fa9cSHugh Dickins 		struct address_space *mapping = file->f_mapping;
171483e4fa9cSHugh Dickins 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
171583e4fa9cSHugh Dickins 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
171683e4fa9cSHugh Dickins 
171783e4fa9cSHugh Dickins 		if ((u64)unmap_end > (u64)unmap_start)
171883e4fa9cSHugh Dickins 			unmap_mapping_range(mapping, unmap_start,
171983e4fa9cSHugh Dickins 					    1 + unmap_end - unmap_start, 0);
172083e4fa9cSHugh Dickins 		shmem_truncate_range(inode, offset, offset + len - 1);
172183e4fa9cSHugh Dickins 		/* No need to unmap again: hole-punching leaves COWed pages */
172283e4fa9cSHugh Dickins 		error = 0;
1723e2d12e22SHugh Dickins 		goto out;
172483e4fa9cSHugh Dickins 	}
172583e4fa9cSHugh Dickins 
1726e2d12e22SHugh Dickins 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
1727e2d12e22SHugh Dickins 	error = inode_newsize_ok(inode, offset + len);
1728e2d12e22SHugh Dickins 	if (error)
1729e2d12e22SHugh Dickins 		goto out;
1730e2d12e22SHugh Dickins 
1731e2d12e22SHugh Dickins 	start = offset >> PAGE_CACHE_SHIFT;
1732e2d12e22SHugh Dickins 	end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1733e2d12e22SHugh Dickins 	/* Try to avoid a swapstorm if len is impossible to satisfy */
1734e2d12e22SHugh Dickins 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
1735e2d12e22SHugh Dickins 		error = -ENOSPC;
1736e2d12e22SHugh Dickins 		goto out;
1737e2d12e22SHugh Dickins 	}
1738e2d12e22SHugh Dickins 
17391aac1400SHugh Dickins 	shmem_falloc.start = start;
17401aac1400SHugh Dickins 	shmem_falloc.next  = start;
17411aac1400SHugh Dickins 	shmem_falloc.nr_falloced = 0;
17421aac1400SHugh Dickins 	shmem_falloc.nr_unswapped = 0;
17431aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
17441aac1400SHugh Dickins 	inode->i_private = &shmem_falloc;
17451aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
17461aac1400SHugh Dickins 
1747e2d12e22SHugh Dickins 	for (index = start; index < end; index++) {
1748e2d12e22SHugh Dickins 		struct page *page;
1749e2d12e22SHugh Dickins 
1750e2d12e22SHugh Dickins 		/*
1751e2d12e22SHugh Dickins 		 * Good, the fallocate(2) manpage permits EINTR: we may have
1752e2d12e22SHugh Dickins 		 * been interrupted because we are using up too much memory.
1753e2d12e22SHugh Dickins 		 */
1754e2d12e22SHugh Dickins 		if (signal_pending(current))
1755e2d12e22SHugh Dickins 			error = -EINTR;
17561aac1400SHugh Dickins 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
17571aac1400SHugh Dickins 			error = -ENOMEM;
1758e2d12e22SHugh Dickins 		else
17591635f6a7SHugh Dickins 			error = shmem_getpage(inode, index, &page, SGP_FALLOC,
1760e2d12e22SHugh Dickins 									NULL);
1761e2d12e22SHugh Dickins 		if (error) {
17621635f6a7SHugh Dickins 			/* Remove the !PageUptodate pages we added */
17631635f6a7SHugh Dickins 			shmem_undo_range(inode,
17641635f6a7SHugh Dickins 				(loff_t)start << PAGE_CACHE_SHIFT,
17651635f6a7SHugh Dickins 				(loff_t)index << PAGE_CACHE_SHIFT, true);
17661aac1400SHugh Dickins 			goto undone;
1767e2d12e22SHugh Dickins 		}
1768e2d12e22SHugh Dickins 
1769e2d12e22SHugh Dickins 		/*
17701aac1400SHugh Dickins 		 * Inform shmem_writepage() how far we have reached.
17711aac1400SHugh Dickins 		 * No need for lock or barrier: we have the page lock.
17721aac1400SHugh Dickins 		 */
17731aac1400SHugh Dickins 		shmem_falloc.next++;
17741aac1400SHugh Dickins 		if (!PageUptodate(page))
17751aac1400SHugh Dickins 			shmem_falloc.nr_falloced++;
17761aac1400SHugh Dickins 
17771aac1400SHugh Dickins 		/*
17781635f6a7SHugh Dickins 		 * If !PageUptodate, leave it that way so that freeable pages
17791635f6a7SHugh Dickins 		 * can be recognized if we need to rollback on error later.
17801635f6a7SHugh Dickins 		 * But set_page_dirty so that memory pressure will swap rather
1781e2d12e22SHugh Dickins 		 * than free the pages we are allocating (and SGP_CACHE pages
1782e2d12e22SHugh Dickins 		 * might still be clean: we now need to mark those dirty too).
1783e2d12e22SHugh Dickins 		 */
1784e2d12e22SHugh Dickins 		set_page_dirty(page);
1785e2d12e22SHugh Dickins 		unlock_page(page);
1786e2d12e22SHugh Dickins 		page_cache_release(page);
1787e2d12e22SHugh Dickins 		cond_resched();
1788e2d12e22SHugh Dickins 	}
1789e2d12e22SHugh Dickins 
1790e2d12e22SHugh Dickins 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
1791e2d12e22SHugh Dickins 		i_size_write(inode, offset + len);
1792e2d12e22SHugh Dickins 	inode->i_ctime = CURRENT_TIME;
17931aac1400SHugh Dickins undone:
17941aac1400SHugh Dickins 	spin_lock(&inode->i_lock);
17951aac1400SHugh Dickins 	inode->i_private = NULL;
17961aac1400SHugh Dickins 	spin_unlock(&inode->i_lock);
1797e2d12e22SHugh Dickins out:
179883e4fa9cSHugh Dickins 	mutex_unlock(&inode->i_mutex);
179983e4fa9cSHugh Dickins 	return error;
180083e4fa9cSHugh Dickins }
180183e4fa9cSHugh Dickins 
1802726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
18031da177e4SLinus Torvalds {
1804726c3342SDavid Howells 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
18051da177e4SLinus Torvalds 
18061da177e4SLinus Torvalds 	buf->f_type = TMPFS_MAGIC;
18071da177e4SLinus Torvalds 	buf->f_bsize = PAGE_CACHE_SIZE;
18081da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
18090edd73b3SHugh Dickins 	if (sbinfo->max_blocks) {
18101da177e4SLinus Torvalds 		buf->f_blocks = sbinfo->max_blocks;
181141ffe5d5SHugh Dickins 		buf->f_bavail =
181241ffe5d5SHugh Dickins 		buf->f_bfree  = sbinfo->max_blocks -
181341ffe5d5SHugh Dickins 				percpu_counter_sum(&sbinfo->used_blocks);
18140edd73b3SHugh Dickins 	}
18150edd73b3SHugh Dickins 	if (sbinfo->max_inodes) {
18161da177e4SLinus Torvalds 		buf->f_files = sbinfo->max_inodes;
18171da177e4SLinus Torvalds 		buf->f_ffree = sbinfo->free_inodes;
18181da177e4SLinus Torvalds 	}
18191da177e4SLinus Torvalds 	/* else leave those fields 0 like simple_statfs */
18201da177e4SLinus Torvalds 	return 0;
18211da177e4SLinus Torvalds }
18221da177e4SLinus Torvalds 
18231da177e4SLinus Torvalds /*
18241da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
18251da177e4SLinus Torvalds  */
18261da177e4SLinus Torvalds static int
18271a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
18281da177e4SLinus Torvalds {
18290b0a0806SHugh Dickins 	struct inode *inode;
18301da177e4SLinus Torvalds 	int error = -ENOSPC;
18311da177e4SLinus Torvalds 
1832454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
18331da177e4SLinus Torvalds 	if (inode) {
18342a7dba39SEric Paris 		error = security_inode_init_security(inode, dir,
18359d8f13baSMimi Zohar 						     &dentry->d_name,
18366d9d88d0SJarkko Sakkinen 						     shmem_initxattrs, NULL);
1837570bc1c2SStephen Smalley 		if (error) {
1838570bc1c2SStephen Smalley 			if (error != -EOPNOTSUPP) {
1839570bc1c2SStephen Smalley 				iput(inode);
1840570bc1c2SStephen Smalley 				return error;
1841570bc1c2SStephen Smalley 			}
184239f0247dSAndreas Gruenbacher 		}
18431c7c474cSChristoph Hellwig #ifdef CONFIG_TMPFS_POSIX_ACL
18441c7c474cSChristoph Hellwig 		error = generic_acl_init(inode, dir);
184539f0247dSAndreas Gruenbacher 		if (error) {
184639f0247dSAndreas Gruenbacher 			iput(inode);
184739f0247dSAndreas Gruenbacher 			return error;
1848570bc1c2SStephen Smalley 		}
1849718deb6bSAl Viro #else
1850718deb6bSAl Viro 		error = 0;
18511c7c474cSChristoph Hellwig #endif
18521da177e4SLinus Torvalds 		dir->i_size += BOGO_DIRENT_SIZE;
18531da177e4SLinus Torvalds 		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
18541da177e4SLinus Torvalds 		d_instantiate(dentry, inode);
18551da177e4SLinus Torvalds 		dget(dentry); /* Extra count - pin the dentry in core */
18561da177e4SLinus Torvalds 	}
18571da177e4SLinus Torvalds 	return error;
18581da177e4SLinus Torvalds }
18591da177e4SLinus Torvalds 
186018bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
18611da177e4SLinus Torvalds {
18621da177e4SLinus Torvalds 	int error;
18631da177e4SLinus Torvalds 
18641da177e4SLinus Torvalds 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
18651da177e4SLinus Torvalds 		return error;
1866d8c76e6fSDave Hansen 	inc_nlink(dir);
18671da177e4SLinus Torvalds 	return 0;
18681da177e4SLinus Torvalds }
18691da177e4SLinus Torvalds 
18704acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1871ebfc3b49SAl Viro 		bool excl)
18721da177e4SLinus Torvalds {
18731da177e4SLinus Torvalds 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
18741da177e4SLinus Torvalds }
18751da177e4SLinus Torvalds 
18761da177e4SLinus Torvalds /*
18771da177e4SLinus Torvalds  * Link a file..
18781da177e4SLinus Torvalds  */
18791da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
18801da177e4SLinus Torvalds {
18811da177e4SLinus Torvalds 	struct inode *inode = old_dentry->d_inode;
18825b04c689SPavel Emelyanov 	int ret;
18831da177e4SLinus Torvalds 
18841da177e4SLinus Torvalds 	/*
18851da177e4SLinus Torvalds 	 * No ordinary (disk based) filesystem counts links as inodes;
18861da177e4SLinus Torvalds 	 * but each new link needs a new dentry, pinning lowmem, and
18871da177e4SLinus Torvalds 	 * tmpfs dentries cannot be pruned until they are unlinked.
18881da177e4SLinus Torvalds 	 */
18895b04c689SPavel Emelyanov 	ret = shmem_reserve_inode(inode->i_sb);
18905b04c689SPavel Emelyanov 	if (ret)
18915b04c689SPavel Emelyanov 		goto out;
18921da177e4SLinus Torvalds 
18931da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
18941da177e4SLinus Torvalds 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1895d8c76e6fSDave Hansen 	inc_nlink(inode);
18967de9c6eeSAl Viro 	ihold(inode);	/* New dentry reference */
18971da177e4SLinus Torvalds 	dget(dentry);		/* Extra pinning count for the created dentry */
18981da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
18995b04c689SPavel Emelyanov out:
19005b04c689SPavel Emelyanov 	return ret;
19011da177e4SLinus Torvalds }
19021da177e4SLinus Torvalds 
19031da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry)
19041da177e4SLinus Torvalds {
19051da177e4SLinus Torvalds 	struct inode *inode = dentry->d_inode;
19061da177e4SLinus Torvalds 
19075b04c689SPavel Emelyanov 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
19085b04c689SPavel Emelyanov 		shmem_free_inode(inode->i_sb);
19091da177e4SLinus Torvalds 
19101da177e4SLinus Torvalds 	dir->i_size -= BOGO_DIRENT_SIZE;
19111da177e4SLinus Torvalds 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
19129a53c3a7SDave Hansen 	drop_nlink(inode);
19131da177e4SLinus Torvalds 	dput(dentry);	/* Undo the count from "create" - this does all the work */
19141da177e4SLinus Torvalds 	return 0;
19151da177e4SLinus Torvalds }
19161da177e4SLinus Torvalds 
19171da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
19181da177e4SLinus Torvalds {
19191da177e4SLinus Torvalds 	if (!simple_empty(dentry))
19201da177e4SLinus Torvalds 		return -ENOTEMPTY;
19211da177e4SLinus Torvalds 
19229a53c3a7SDave Hansen 	drop_nlink(dentry->d_inode);
19239a53c3a7SDave Hansen 	drop_nlink(dir);
19241da177e4SLinus Torvalds 	return shmem_unlink(dir, dentry);
19251da177e4SLinus Torvalds }
19261da177e4SLinus Torvalds 
19271da177e4SLinus Torvalds /*
19281da177e4SLinus Torvalds  * The VFS layer already does all the dentry stuff for rename,
19291da177e4SLinus Torvalds  * we just have to decrement the usage count for the target if
19301da177e4SLinus Torvalds  * it exists so that the VFS layer correctly free's it when it
19311da177e4SLinus Torvalds  * gets overwritten.
19321da177e4SLinus Torvalds  */
19331da177e4SLinus Torvalds static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
19341da177e4SLinus Torvalds {
19351da177e4SLinus Torvalds 	struct inode *inode = old_dentry->d_inode;
19361da177e4SLinus Torvalds 	int they_are_dirs = S_ISDIR(inode->i_mode);
19371da177e4SLinus Torvalds 
19381da177e4SLinus Torvalds 	if (!simple_empty(new_dentry))
19391da177e4SLinus Torvalds 		return -ENOTEMPTY;
19401da177e4SLinus Torvalds 
19411da177e4SLinus Torvalds 	if (new_dentry->d_inode) {
19421da177e4SLinus Torvalds 		(void) shmem_unlink(new_dir, new_dentry);
19431da177e4SLinus Torvalds 		if (they_are_dirs)
19449a53c3a7SDave Hansen 			drop_nlink(old_dir);
19451da177e4SLinus Torvalds 	} else if (they_are_dirs) {
19469a53c3a7SDave Hansen 		drop_nlink(old_dir);
1947d8c76e6fSDave Hansen 		inc_nlink(new_dir);
19481da177e4SLinus Torvalds 	}
19491da177e4SLinus Torvalds 
19501da177e4SLinus Torvalds 	old_dir->i_size -= BOGO_DIRENT_SIZE;
19511da177e4SLinus Torvalds 	new_dir->i_size += BOGO_DIRENT_SIZE;
19521da177e4SLinus Torvalds 	old_dir->i_ctime = old_dir->i_mtime =
19531da177e4SLinus Torvalds 	new_dir->i_ctime = new_dir->i_mtime =
19541da177e4SLinus Torvalds 	inode->i_ctime = CURRENT_TIME;
19551da177e4SLinus Torvalds 	return 0;
19561da177e4SLinus Torvalds }
19571da177e4SLinus Torvalds 
19581da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
19591da177e4SLinus Torvalds {
19601da177e4SLinus Torvalds 	int error;
19611da177e4SLinus Torvalds 	int len;
19621da177e4SLinus Torvalds 	struct inode *inode;
19639276aad6SHugh Dickins 	struct page *page;
19641da177e4SLinus Torvalds 	char *kaddr;
19651da177e4SLinus Torvalds 	struct shmem_inode_info *info;
19661da177e4SLinus Torvalds 
19671da177e4SLinus Torvalds 	len = strlen(symname) + 1;
19681da177e4SLinus Torvalds 	if (len > PAGE_CACHE_SIZE)
19691da177e4SLinus Torvalds 		return -ENAMETOOLONG;
19701da177e4SLinus Torvalds 
1971454abafeSDmitry Monakhov 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
19721da177e4SLinus Torvalds 	if (!inode)
19731da177e4SLinus Torvalds 		return -ENOSPC;
19741da177e4SLinus Torvalds 
19759d8f13baSMimi Zohar 	error = security_inode_init_security(inode, dir, &dentry->d_name,
19766d9d88d0SJarkko Sakkinen 					     shmem_initxattrs, NULL);
1977570bc1c2SStephen Smalley 	if (error) {
1978570bc1c2SStephen Smalley 		if (error != -EOPNOTSUPP) {
1979570bc1c2SStephen Smalley 			iput(inode);
1980570bc1c2SStephen Smalley 			return error;
1981570bc1c2SStephen Smalley 		}
1982570bc1c2SStephen Smalley 		error = 0;
1983570bc1c2SStephen Smalley 	}
1984570bc1c2SStephen Smalley 
19851da177e4SLinus Torvalds 	info = SHMEM_I(inode);
19861da177e4SLinus Torvalds 	inode->i_size = len-1;
198769f07ec9SHugh Dickins 	if (len <= SHORT_SYMLINK_LEN) {
198869f07ec9SHugh Dickins 		info->symlink = kmemdup(symname, len, GFP_KERNEL);
198969f07ec9SHugh Dickins 		if (!info->symlink) {
199069f07ec9SHugh Dickins 			iput(inode);
199169f07ec9SHugh Dickins 			return -ENOMEM;
199269f07ec9SHugh Dickins 		}
199369f07ec9SHugh Dickins 		inode->i_op = &shmem_short_symlink_operations;
19941da177e4SLinus Torvalds 	} else {
19951da177e4SLinus Torvalds 		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
19961da177e4SLinus Torvalds 		if (error) {
19971da177e4SLinus Torvalds 			iput(inode);
19981da177e4SLinus Torvalds 			return error;
19991da177e4SLinus Torvalds 		}
200014fcc23fSHugh Dickins 		inode->i_mapping->a_ops = &shmem_aops;
20011da177e4SLinus Torvalds 		inode->i_op = &shmem_symlink_inode_operations;
20029b04c5feSCong Wang 		kaddr = kmap_atomic(page);
20031da177e4SLinus Torvalds 		memcpy(kaddr, symname, len);
20049b04c5feSCong Wang 		kunmap_atomic(kaddr);
2005ec9516fbSHugh Dickins 		SetPageUptodate(page);
20061da177e4SLinus Torvalds 		set_page_dirty(page);
20076746aff7SWu Fengguang 		unlock_page(page);
20081da177e4SLinus Torvalds 		page_cache_release(page);
20091da177e4SLinus Torvalds 	}
20101da177e4SLinus Torvalds 	dir->i_size += BOGO_DIRENT_SIZE;
20111da177e4SLinus Torvalds 	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
20121da177e4SLinus Torvalds 	d_instantiate(dentry, inode);
20131da177e4SLinus Torvalds 	dget(dentry);
20141da177e4SLinus Torvalds 	return 0;
20151da177e4SLinus Torvalds }
20161da177e4SLinus Torvalds 
201769f07ec9SHugh Dickins static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
20181da177e4SLinus Torvalds {
201969f07ec9SHugh Dickins 	nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
2020cc314eefSLinus Torvalds 	return NULL;
20211da177e4SLinus Torvalds }
20221da177e4SLinus Torvalds 
2023cc314eefSLinus Torvalds static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
20241da177e4SLinus Torvalds {
20251da177e4SLinus Torvalds 	struct page *page = NULL;
202641ffe5d5SHugh Dickins 	int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
202741ffe5d5SHugh Dickins 	nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
2028d3602444SHugh Dickins 	if (page)
2029d3602444SHugh Dickins 		unlock_page(page);
2030cc314eefSLinus Torvalds 	return page;
20311da177e4SLinus Torvalds }
20321da177e4SLinus Torvalds 
2033cc314eefSLinus Torvalds static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
20341da177e4SLinus Torvalds {
20351da177e4SLinus Torvalds 	if (!IS_ERR(nd_get_link(nd))) {
2036cc314eefSLinus Torvalds 		struct page *page = cookie;
20371da177e4SLinus Torvalds 		kunmap(page);
20381da177e4SLinus Torvalds 		mark_page_accessed(page);
20391da177e4SLinus Torvalds 		page_cache_release(page);
20401da177e4SLinus Torvalds 	}
20411da177e4SLinus Torvalds }
20421da177e4SLinus Torvalds 
2043b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
2044b09e0fa4SEric Paris /*
2045b09e0fa4SEric Paris  * Superblocks without xattr inode operations may get some security.* xattr
2046b09e0fa4SEric Paris  * support from the LSM "for free". As soon as we have any other xattrs
2047b09e0fa4SEric Paris  * like ACLs, we also need to implement the security.* handlers at
2048b09e0fa4SEric Paris  * filesystem level, though.
2049b09e0fa4SEric Paris  */
2050b09e0fa4SEric Paris 
20516d9d88d0SJarkko Sakkinen /*
20526d9d88d0SJarkko Sakkinen  * Callback for security_inode_init_security() for acquiring xattrs.
20536d9d88d0SJarkko Sakkinen  */
20546d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode,
20556d9d88d0SJarkko Sakkinen 			    const struct xattr *xattr_array,
20566d9d88d0SJarkko Sakkinen 			    void *fs_info)
20576d9d88d0SJarkko Sakkinen {
20586d9d88d0SJarkko Sakkinen 	struct shmem_inode_info *info = SHMEM_I(inode);
20596d9d88d0SJarkko Sakkinen 	const struct xattr *xattr;
2060*38f38657SAristeu Rozanski 	struct simple_xattr *new_xattr;
20616d9d88d0SJarkko Sakkinen 	size_t len;
20626d9d88d0SJarkko Sakkinen 
20636d9d88d0SJarkko Sakkinen 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
2064*38f38657SAristeu Rozanski 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
20656d9d88d0SJarkko Sakkinen 		if (!new_xattr)
20666d9d88d0SJarkko Sakkinen 			return -ENOMEM;
20676d9d88d0SJarkko Sakkinen 
20686d9d88d0SJarkko Sakkinen 		len = strlen(xattr->name) + 1;
20696d9d88d0SJarkko Sakkinen 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
20706d9d88d0SJarkko Sakkinen 					  GFP_KERNEL);
20716d9d88d0SJarkko Sakkinen 		if (!new_xattr->name) {
20726d9d88d0SJarkko Sakkinen 			kfree(new_xattr);
20736d9d88d0SJarkko Sakkinen 			return -ENOMEM;
20746d9d88d0SJarkko Sakkinen 		}
20756d9d88d0SJarkko Sakkinen 
20766d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
20776d9d88d0SJarkko Sakkinen 		       XATTR_SECURITY_PREFIX_LEN);
20786d9d88d0SJarkko Sakkinen 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
20796d9d88d0SJarkko Sakkinen 		       xattr->name, len);
20806d9d88d0SJarkko Sakkinen 
2081*38f38657SAristeu Rozanski 		simple_xattr_list_add(&info->xattrs, new_xattr);
20826d9d88d0SJarkko Sakkinen 	}
20836d9d88d0SJarkko Sakkinen 
20846d9d88d0SJarkko Sakkinen 	return 0;
20856d9d88d0SJarkko Sakkinen }
20866d9d88d0SJarkko Sakkinen 
2087b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = {
2088b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
2089b09e0fa4SEric Paris 	&generic_acl_access_handler,
2090b09e0fa4SEric Paris 	&generic_acl_default_handler,
2091b09e0fa4SEric Paris #endif
2092b09e0fa4SEric Paris 	NULL
2093b09e0fa4SEric Paris };
2094b09e0fa4SEric Paris 
2095b09e0fa4SEric Paris static int shmem_xattr_validate(const char *name)
2096b09e0fa4SEric Paris {
2097b09e0fa4SEric Paris 	struct { const char *prefix; size_t len; } arr[] = {
2098b09e0fa4SEric Paris 		{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2099b09e0fa4SEric Paris 		{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2100b09e0fa4SEric Paris 	};
2101b09e0fa4SEric Paris 	int i;
2102b09e0fa4SEric Paris 
2103b09e0fa4SEric Paris 	for (i = 0; i < ARRAY_SIZE(arr); i++) {
2104b09e0fa4SEric Paris 		size_t preflen = arr[i].len;
2105b09e0fa4SEric Paris 		if (strncmp(name, arr[i].prefix, preflen) == 0) {
2106b09e0fa4SEric Paris 			if (!name[preflen])
2107b09e0fa4SEric Paris 				return -EINVAL;
2108b09e0fa4SEric Paris 			return 0;
2109b09e0fa4SEric Paris 		}
2110b09e0fa4SEric Paris 	}
2111b09e0fa4SEric Paris 	return -EOPNOTSUPP;
2112b09e0fa4SEric Paris }
2113b09e0fa4SEric Paris 
2114b09e0fa4SEric Paris static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2115b09e0fa4SEric Paris 			      void *buffer, size_t size)
2116b09e0fa4SEric Paris {
2117*38f38657SAristeu Rozanski 	struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2118b09e0fa4SEric Paris 	int err;
2119b09e0fa4SEric Paris 
2120b09e0fa4SEric Paris 	/*
2121b09e0fa4SEric Paris 	 * If this is a request for a synthetic attribute in the system.*
2122b09e0fa4SEric Paris 	 * namespace use the generic infrastructure to resolve a handler
2123b09e0fa4SEric Paris 	 * for it via sb->s_xattr.
2124b09e0fa4SEric Paris 	 */
2125b09e0fa4SEric Paris 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2126b09e0fa4SEric Paris 		return generic_getxattr(dentry, name, buffer, size);
2127b09e0fa4SEric Paris 
2128b09e0fa4SEric Paris 	err = shmem_xattr_validate(name);
2129b09e0fa4SEric Paris 	if (err)
2130b09e0fa4SEric Paris 		return err;
2131b09e0fa4SEric Paris 
2132*38f38657SAristeu Rozanski 	return simple_xattr_get(&info->xattrs, name, buffer, size);
2133b09e0fa4SEric Paris }
2134b09e0fa4SEric Paris 
2135b09e0fa4SEric Paris static int shmem_setxattr(struct dentry *dentry, const char *name,
2136b09e0fa4SEric Paris 			  const void *value, size_t size, int flags)
2137b09e0fa4SEric Paris {
2138*38f38657SAristeu Rozanski 	struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2139b09e0fa4SEric Paris 	int err;
2140b09e0fa4SEric Paris 
2141b09e0fa4SEric Paris 	/*
2142b09e0fa4SEric Paris 	 * If this is a request for a synthetic attribute in the system.*
2143b09e0fa4SEric Paris 	 * namespace use the generic infrastructure to resolve a handler
2144b09e0fa4SEric Paris 	 * for it via sb->s_xattr.
2145b09e0fa4SEric Paris 	 */
2146b09e0fa4SEric Paris 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2147b09e0fa4SEric Paris 		return generic_setxattr(dentry, name, value, size, flags);
2148b09e0fa4SEric Paris 
2149b09e0fa4SEric Paris 	err = shmem_xattr_validate(name);
2150b09e0fa4SEric Paris 	if (err)
2151b09e0fa4SEric Paris 		return err;
2152b09e0fa4SEric Paris 
2153*38f38657SAristeu Rozanski 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
2154b09e0fa4SEric Paris }
2155b09e0fa4SEric Paris 
2156b09e0fa4SEric Paris static int shmem_removexattr(struct dentry *dentry, const char *name)
2157b09e0fa4SEric Paris {
2158*38f38657SAristeu Rozanski 	struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2159b09e0fa4SEric Paris 	int err;
2160b09e0fa4SEric Paris 
2161b09e0fa4SEric Paris 	/*
2162b09e0fa4SEric Paris 	 * If this is a request for a synthetic attribute in the system.*
2163b09e0fa4SEric Paris 	 * namespace use the generic infrastructure to resolve a handler
2164b09e0fa4SEric Paris 	 * for it via sb->s_xattr.
2165b09e0fa4SEric Paris 	 */
2166b09e0fa4SEric Paris 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2167b09e0fa4SEric Paris 		return generic_removexattr(dentry, name);
2168b09e0fa4SEric Paris 
2169b09e0fa4SEric Paris 	err = shmem_xattr_validate(name);
2170b09e0fa4SEric Paris 	if (err)
2171b09e0fa4SEric Paris 		return err;
2172b09e0fa4SEric Paris 
2173*38f38657SAristeu Rozanski 	return simple_xattr_remove(&info->xattrs, name);
2174b09e0fa4SEric Paris }
2175b09e0fa4SEric Paris 
2176b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2177b09e0fa4SEric Paris {
2178*38f38657SAristeu Rozanski 	struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2179*38f38657SAristeu Rozanski 	return simple_xattr_list(&info->xattrs, buffer, size);
2180b09e0fa4SEric Paris }
2181b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */
2182b09e0fa4SEric Paris 
218369f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = {
21841da177e4SLinus Torvalds 	.readlink	= generic_readlink,
218569f07ec9SHugh Dickins 	.follow_link	= shmem_follow_short_symlink,
2186b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
2187b09e0fa4SEric Paris 	.setxattr	= shmem_setxattr,
2188b09e0fa4SEric Paris 	.getxattr	= shmem_getxattr,
2189b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
2190b09e0fa4SEric Paris 	.removexattr	= shmem_removexattr,
2191b09e0fa4SEric Paris #endif
21921da177e4SLinus Torvalds };
21931da177e4SLinus Torvalds 
219492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = {
21951da177e4SLinus Torvalds 	.readlink	= generic_readlink,
21961da177e4SLinus Torvalds 	.follow_link	= shmem_follow_link,
21971da177e4SLinus Torvalds 	.put_link	= shmem_put_link,
2198b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
2199b09e0fa4SEric Paris 	.setxattr	= shmem_setxattr,
2200b09e0fa4SEric Paris 	.getxattr	= shmem_getxattr,
2201b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
2202b09e0fa4SEric Paris 	.removexattr	= shmem_removexattr,
220339f0247dSAndreas Gruenbacher #endif
2204b09e0fa4SEric Paris };
220539f0247dSAndreas Gruenbacher 
220691828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child)
220791828a40SDavid M. Grimes {
220891828a40SDavid M. Grimes 	return ERR_PTR(-ESTALE);
220991828a40SDavid M. Grimes }
221091828a40SDavid M. Grimes 
221191828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh)
221291828a40SDavid M. Grimes {
221391828a40SDavid M. Grimes 	__u32 *fh = vfh;
221491828a40SDavid M. Grimes 	__u64 inum = fh[2];
221591828a40SDavid M. Grimes 	inum = (inum << 32) | fh[1];
221691828a40SDavid M. Grimes 	return ino->i_ino == inum && fh[0] == ino->i_generation;
221791828a40SDavid M. Grimes }
221891828a40SDavid M. Grimes 
2219480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2220480b116cSChristoph Hellwig 		struct fid *fid, int fh_len, int fh_type)
222191828a40SDavid M. Grimes {
222291828a40SDavid M. Grimes 	struct inode *inode;
2223480b116cSChristoph Hellwig 	struct dentry *dentry = NULL;
2224480b116cSChristoph Hellwig 	u64 inum = fid->raw[2];
2225480b116cSChristoph Hellwig 	inum = (inum << 32) | fid->raw[1];
222691828a40SDavid M. Grimes 
2227480b116cSChristoph Hellwig 	if (fh_len < 3)
2228480b116cSChristoph Hellwig 		return NULL;
2229480b116cSChristoph Hellwig 
2230480b116cSChristoph Hellwig 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2231480b116cSChristoph Hellwig 			shmem_match, fid->raw);
223291828a40SDavid M. Grimes 	if (inode) {
2233480b116cSChristoph Hellwig 		dentry = d_find_alias(inode);
223491828a40SDavid M. Grimes 		iput(inode);
223591828a40SDavid M. Grimes 	}
223691828a40SDavid M. Grimes 
2237480b116cSChristoph Hellwig 	return dentry;
223891828a40SDavid M. Grimes }
223991828a40SDavid M. Grimes 
2240b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
2241b0b0382bSAl Viro 				struct inode *parent)
224291828a40SDavid M. Grimes {
22435fe0c237SAneesh Kumar K.V 	if (*len < 3) {
22445fe0c237SAneesh Kumar K.V 		*len = 3;
224591828a40SDavid M. Grimes 		return 255;
22465fe0c237SAneesh Kumar K.V 	}
224791828a40SDavid M. Grimes 
22481d3382cbSAl Viro 	if (inode_unhashed(inode)) {
224991828a40SDavid M. Grimes 		/* Unfortunately insert_inode_hash is not idempotent,
225091828a40SDavid M. Grimes 		 * so as we hash inodes here rather than at creation
225191828a40SDavid M. Grimes 		 * time, we need a lock to ensure we only try
225291828a40SDavid M. Grimes 		 * to do it once
225391828a40SDavid M. Grimes 		 */
225491828a40SDavid M. Grimes 		static DEFINE_SPINLOCK(lock);
225591828a40SDavid M. Grimes 		spin_lock(&lock);
22561d3382cbSAl Viro 		if (inode_unhashed(inode))
225791828a40SDavid M. Grimes 			__insert_inode_hash(inode,
225891828a40SDavid M. Grimes 					    inode->i_ino + inode->i_generation);
225991828a40SDavid M. Grimes 		spin_unlock(&lock);
226091828a40SDavid M. Grimes 	}
226191828a40SDavid M. Grimes 
226291828a40SDavid M. Grimes 	fh[0] = inode->i_generation;
226391828a40SDavid M. Grimes 	fh[1] = inode->i_ino;
226491828a40SDavid M. Grimes 	fh[2] = ((__u64)inode->i_ino) >> 32;
226591828a40SDavid M. Grimes 
226691828a40SDavid M. Grimes 	*len = 3;
226791828a40SDavid M. Grimes 	return 1;
226891828a40SDavid M. Grimes }
226991828a40SDavid M. Grimes 
227039655164SChristoph Hellwig static const struct export_operations shmem_export_ops = {
227191828a40SDavid M. Grimes 	.get_parent     = shmem_get_parent,
227291828a40SDavid M. Grimes 	.encode_fh      = shmem_encode_fh,
2273480b116cSChristoph Hellwig 	.fh_to_dentry	= shmem_fh_to_dentry,
227491828a40SDavid M. Grimes };
227591828a40SDavid M. Grimes 
2276680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2277680d794bSakpm@linux-foundation.org 			       bool remount)
22781da177e4SLinus Torvalds {
22791da177e4SLinus Torvalds 	char *this_char, *value, *rest;
22808751e039SEric W. Biederman 	uid_t uid;
22818751e039SEric W. Biederman 	gid_t gid;
22821da177e4SLinus Torvalds 
2283b00dc3adSHugh Dickins 	while (options != NULL) {
2284b00dc3adSHugh Dickins 		this_char = options;
2285b00dc3adSHugh Dickins 		for (;;) {
2286b00dc3adSHugh Dickins 			/*
2287b00dc3adSHugh Dickins 			 * NUL-terminate this option: unfortunately,
2288b00dc3adSHugh Dickins 			 * mount options form a comma-separated list,
2289b00dc3adSHugh Dickins 			 * but mpol's nodelist may also contain commas.
2290b00dc3adSHugh Dickins 			 */
2291b00dc3adSHugh Dickins 			options = strchr(options, ',');
2292b00dc3adSHugh Dickins 			if (options == NULL)
2293b00dc3adSHugh Dickins 				break;
2294b00dc3adSHugh Dickins 			options++;
2295b00dc3adSHugh Dickins 			if (!isdigit(*options)) {
2296b00dc3adSHugh Dickins 				options[-1] = '\0';
2297b00dc3adSHugh Dickins 				break;
2298b00dc3adSHugh Dickins 			}
2299b00dc3adSHugh Dickins 		}
23001da177e4SLinus Torvalds 		if (!*this_char)
23011da177e4SLinus Torvalds 			continue;
23021da177e4SLinus Torvalds 		if ((value = strchr(this_char,'=')) != NULL) {
23031da177e4SLinus Torvalds 			*value++ = 0;
23041da177e4SLinus Torvalds 		} else {
23051da177e4SLinus Torvalds 			printk(KERN_ERR
23061da177e4SLinus Torvalds 			    "tmpfs: No value for mount option '%s'\n",
23071da177e4SLinus Torvalds 			    this_char);
23081da177e4SLinus Torvalds 			return 1;
23091da177e4SLinus Torvalds 		}
23101da177e4SLinus Torvalds 
23111da177e4SLinus Torvalds 		if (!strcmp(this_char,"size")) {
23121da177e4SLinus Torvalds 			unsigned long long size;
23131da177e4SLinus Torvalds 			size = memparse(value,&rest);
23141da177e4SLinus Torvalds 			if (*rest == '%') {
23151da177e4SLinus Torvalds 				size <<= PAGE_SHIFT;
23161da177e4SLinus Torvalds 				size *= totalram_pages;
23171da177e4SLinus Torvalds 				do_div(size, 100);
23181da177e4SLinus Torvalds 				rest++;
23191da177e4SLinus Torvalds 			}
23201da177e4SLinus Torvalds 			if (*rest)
23211da177e4SLinus Torvalds 				goto bad_val;
2322680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks =
2323680d794bSakpm@linux-foundation.org 				DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
23241da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"nr_blocks")) {
2325680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks = memparse(value, &rest);
23261da177e4SLinus Torvalds 			if (*rest)
23271da177e4SLinus Torvalds 				goto bad_val;
23281da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"nr_inodes")) {
2329680d794bSakpm@linux-foundation.org 			sbinfo->max_inodes = memparse(value, &rest);
23301da177e4SLinus Torvalds 			if (*rest)
23311da177e4SLinus Torvalds 				goto bad_val;
23321da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"mode")) {
2333680d794bSakpm@linux-foundation.org 			if (remount)
23341da177e4SLinus Torvalds 				continue;
2335680d794bSakpm@linux-foundation.org 			sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
23361da177e4SLinus Torvalds 			if (*rest)
23371da177e4SLinus Torvalds 				goto bad_val;
23381da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"uid")) {
2339680d794bSakpm@linux-foundation.org 			if (remount)
23401da177e4SLinus Torvalds 				continue;
23418751e039SEric W. Biederman 			uid = simple_strtoul(value, &rest, 0);
23421da177e4SLinus Torvalds 			if (*rest)
23431da177e4SLinus Torvalds 				goto bad_val;
23448751e039SEric W. Biederman 			sbinfo->uid = make_kuid(current_user_ns(), uid);
23458751e039SEric W. Biederman 			if (!uid_valid(sbinfo->uid))
23468751e039SEric W. Biederman 				goto bad_val;
23471da177e4SLinus Torvalds 		} else if (!strcmp(this_char,"gid")) {
2348680d794bSakpm@linux-foundation.org 			if (remount)
23491da177e4SLinus Torvalds 				continue;
23508751e039SEric W. Biederman 			gid = simple_strtoul(value, &rest, 0);
23511da177e4SLinus Torvalds 			if (*rest)
23521da177e4SLinus Torvalds 				goto bad_val;
23538751e039SEric W. Biederman 			sbinfo->gid = make_kgid(current_user_ns(), gid);
23548751e039SEric W. Biederman 			if (!gid_valid(sbinfo->gid))
23558751e039SEric W. Biederman 				goto bad_val;
23567339ff83SRobin Holt 		} else if (!strcmp(this_char,"mpol")) {
235771fe804bSLee Schermerhorn 			if (mpol_parse_str(value, &sbinfo->mpol, 1))
23587339ff83SRobin Holt 				goto bad_val;
23591da177e4SLinus Torvalds 		} else {
23601da177e4SLinus Torvalds 			printk(KERN_ERR "tmpfs: Bad mount option %s\n",
23611da177e4SLinus Torvalds 			       this_char);
23621da177e4SLinus Torvalds 			return 1;
23631da177e4SLinus Torvalds 		}
23641da177e4SLinus Torvalds 	}
23651da177e4SLinus Torvalds 	return 0;
23661da177e4SLinus Torvalds 
23671da177e4SLinus Torvalds bad_val:
23681da177e4SLinus Torvalds 	printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
23691da177e4SLinus Torvalds 	       value, this_char);
23701da177e4SLinus Torvalds 	return 1;
23711da177e4SLinus Torvalds 
23721da177e4SLinus Torvalds }
23731da177e4SLinus Torvalds 
23741da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
23751da177e4SLinus Torvalds {
23761da177e4SLinus Torvalds 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2377680d794bSakpm@linux-foundation.org 	struct shmem_sb_info config = *sbinfo;
23780edd73b3SHugh Dickins 	unsigned long inodes;
23790edd73b3SHugh Dickins 	int error = -EINVAL;
23801da177e4SLinus Torvalds 
2381680d794bSakpm@linux-foundation.org 	if (shmem_parse_options(data, &config, true))
23820edd73b3SHugh Dickins 		return error;
23830edd73b3SHugh Dickins 
23840edd73b3SHugh Dickins 	spin_lock(&sbinfo->stat_lock);
23850edd73b3SHugh Dickins 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
23867e496299STim Chen 	if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
23870edd73b3SHugh Dickins 		goto out;
2388680d794bSakpm@linux-foundation.org 	if (config.max_inodes < inodes)
23890edd73b3SHugh Dickins 		goto out;
23900edd73b3SHugh Dickins 	/*
239154af6042SHugh Dickins 	 * Those tests disallow limited->unlimited while any are in use;
23920edd73b3SHugh Dickins 	 * but we must separately disallow unlimited->limited, because
23930edd73b3SHugh Dickins 	 * in that case we have no record of how much is already in use.
23940edd73b3SHugh Dickins 	 */
2395680d794bSakpm@linux-foundation.org 	if (config.max_blocks && !sbinfo->max_blocks)
23960edd73b3SHugh Dickins 		goto out;
2397680d794bSakpm@linux-foundation.org 	if (config.max_inodes && !sbinfo->max_inodes)
23980edd73b3SHugh Dickins 		goto out;
23990edd73b3SHugh Dickins 
24000edd73b3SHugh Dickins 	error = 0;
2401680d794bSakpm@linux-foundation.org 	sbinfo->max_blocks  = config.max_blocks;
2402680d794bSakpm@linux-foundation.org 	sbinfo->max_inodes  = config.max_inodes;
2403680d794bSakpm@linux-foundation.org 	sbinfo->free_inodes = config.max_inodes - inodes;
240471fe804bSLee Schermerhorn 
240571fe804bSLee Schermerhorn 	mpol_put(sbinfo->mpol);
240671fe804bSLee Schermerhorn 	sbinfo->mpol        = config.mpol;	/* transfers initial ref */
24070edd73b3SHugh Dickins out:
24080edd73b3SHugh Dickins 	spin_unlock(&sbinfo->stat_lock);
24090edd73b3SHugh Dickins 	return error;
24101da177e4SLinus Torvalds }
2411680d794bSakpm@linux-foundation.org 
241234c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2413680d794bSakpm@linux-foundation.org {
241434c80b1dSAl Viro 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
2415680d794bSakpm@linux-foundation.org 
2416680d794bSakpm@linux-foundation.org 	if (sbinfo->max_blocks != shmem_default_max_blocks())
2417680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",size=%luk",
2418680d794bSakpm@linux-foundation.org 			sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2419680d794bSakpm@linux-foundation.org 	if (sbinfo->max_inodes != shmem_default_max_inodes())
2420680d794bSakpm@linux-foundation.org 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2421680d794bSakpm@linux-foundation.org 	if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
242209208d15SAl Viro 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
24238751e039SEric W. Biederman 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
24248751e039SEric W. Biederman 		seq_printf(seq, ",uid=%u",
24258751e039SEric W. Biederman 				from_kuid_munged(&init_user_ns, sbinfo->uid));
24268751e039SEric W. Biederman 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
24278751e039SEric W. Biederman 		seq_printf(seq, ",gid=%u",
24288751e039SEric W. Biederman 				from_kgid_munged(&init_user_ns, sbinfo->gid));
242971fe804bSLee Schermerhorn 	shmem_show_mpol(seq, sbinfo->mpol);
2430680d794bSakpm@linux-foundation.org 	return 0;
2431680d794bSakpm@linux-foundation.org }
2432680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */
24331da177e4SLinus Torvalds 
24341da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb)
24351da177e4SLinus Torvalds {
2436602586a8SHugh Dickins 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2437602586a8SHugh Dickins 
2438602586a8SHugh Dickins 	percpu_counter_destroy(&sbinfo->used_blocks);
2439602586a8SHugh Dickins 	kfree(sbinfo);
24401da177e4SLinus Torvalds 	sb->s_fs_info = NULL;
24411da177e4SLinus Torvalds }
24421da177e4SLinus Torvalds 
24432b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent)
24441da177e4SLinus Torvalds {
24451da177e4SLinus Torvalds 	struct inode *inode;
24460edd73b3SHugh Dickins 	struct shmem_sb_info *sbinfo;
2447680d794bSakpm@linux-foundation.org 	int err = -ENOMEM;
2448680d794bSakpm@linux-foundation.org 
2449680d794bSakpm@linux-foundation.org 	/* Round up to L1_CACHE_BYTES to resist false sharing */
2450425fbf04SPekka Enberg 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2451680d794bSakpm@linux-foundation.org 				L1_CACHE_BYTES), GFP_KERNEL);
2452680d794bSakpm@linux-foundation.org 	if (!sbinfo)
2453680d794bSakpm@linux-foundation.org 		return -ENOMEM;
2454680d794bSakpm@linux-foundation.org 
2455680d794bSakpm@linux-foundation.org 	sbinfo->mode = S_IRWXUGO | S_ISVTX;
245676aac0e9SDavid Howells 	sbinfo->uid = current_fsuid();
245776aac0e9SDavid Howells 	sbinfo->gid = current_fsgid();
2458680d794bSakpm@linux-foundation.org 	sb->s_fs_info = sbinfo;
24591da177e4SLinus Torvalds 
24600edd73b3SHugh Dickins #ifdef CONFIG_TMPFS
24611da177e4SLinus Torvalds 	/*
24621da177e4SLinus Torvalds 	 * Per default we only allow half of the physical ram per
24631da177e4SLinus Torvalds 	 * tmpfs instance, limiting inodes to one per page of lowmem;
24641da177e4SLinus Torvalds 	 * but the internal instance is left unlimited.
24651da177e4SLinus Torvalds 	 */
24661da177e4SLinus Torvalds 	if (!(sb->s_flags & MS_NOUSER)) {
2467680d794bSakpm@linux-foundation.org 		sbinfo->max_blocks = shmem_default_max_blocks();
2468680d794bSakpm@linux-foundation.org 		sbinfo->max_inodes = shmem_default_max_inodes();
2469680d794bSakpm@linux-foundation.org 		if (shmem_parse_options(data, sbinfo, false)) {
2470680d794bSakpm@linux-foundation.org 			err = -EINVAL;
2471680d794bSakpm@linux-foundation.org 			goto failed;
2472680d794bSakpm@linux-foundation.org 		}
24731da177e4SLinus Torvalds 	}
247491828a40SDavid M. Grimes 	sb->s_export_op = &shmem_export_ops;
24752f6e38f3SHugh Dickins 	sb->s_flags |= MS_NOSEC;
24760edd73b3SHugh Dickins #else
24770edd73b3SHugh Dickins 	sb->s_flags |= MS_NOUSER;
24780edd73b3SHugh Dickins #endif
24791da177e4SLinus Torvalds 
24801da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
2481602586a8SHugh Dickins 	if (percpu_counter_init(&sbinfo->used_blocks, 0))
2482602586a8SHugh Dickins 		goto failed;
2483680d794bSakpm@linux-foundation.org 	sbinfo->free_inodes = sbinfo->max_inodes;
24841da177e4SLinus Torvalds 
2485285b2c4fSHugh Dickins 	sb->s_maxbytes = MAX_LFS_FILESIZE;
24861da177e4SLinus Torvalds 	sb->s_blocksize = PAGE_CACHE_SIZE;
24871da177e4SLinus Torvalds 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
24881da177e4SLinus Torvalds 	sb->s_magic = TMPFS_MAGIC;
24891da177e4SLinus Torvalds 	sb->s_op = &shmem_ops;
2490cfd95a9cSRobin H. Johnson 	sb->s_time_gran = 1;
2491b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
249239f0247dSAndreas Gruenbacher 	sb->s_xattr = shmem_xattr_handlers;
2493b09e0fa4SEric Paris #endif
2494b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL
249539f0247dSAndreas Gruenbacher 	sb->s_flags |= MS_POSIXACL;
249639f0247dSAndreas Gruenbacher #endif
24970edd73b3SHugh Dickins 
2498454abafeSDmitry Monakhov 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
24991da177e4SLinus Torvalds 	if (!inode)
25001da177e4SLinus Torvalds 		goto failed;
2501680d794bSakpm@linux-foundation.org 	inode->i_uid = sbinfo->uid;
2502680d794bSakpm@linux-foundation.org 	inode->i_gid = sbinfo->gid;
2503318ceed0SAl Viro 	sb->s_root = d_make_root(inode);
2504318ceed0SAl Viro 	if (!sb->s_root)
250548fde701SAl Viro 		goto failed;
25061da177e4SLinus Torvalds 	return 0;
25071da177e4SLinus Torvalds 
25081da177e4SLinus Torvalds failed:
25091da177e4SLinus Torvalds 	shmem_put_super(sb);
25101da177e4SLinus Torvalds 	return err;
25111da177e4SLinus Torvalds }
25121da177e4SLinus Torvalds 
2513fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep;
25141da177e4SLinus Torvalds 
25151da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb)
25161da177e4SLinus Torvalds {
251741ffe5d5SHugh Dickins 	struct shmem_inode_info *info;
251841ffe5d5SHugh Dickins 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
251941ffe5d5SHugh Dickins 	if (!info)
25201da177e4SLinus Torvalds 		return NULL;
252141ffe5d5SHugh Dickins 	return &info->vfs_inode;
25221da177e4SLinus Torvalds }
25231da177e4SLinus Torvalds 
252441ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head)
2525fa0d7e3dSNick Piggin {
2526fa0d7e3dSNick Piggin 	struct inode *inode = container_of(head, struct inode, i_rcu);
2527fa0d7e3dSNick Piggin 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2528fa0d7e3dSNick Piggin }
2529fa0d7e3dSNick Piggin 
25301da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode)
25311da177e4SLinus Torvalds {
253209208d15SAl Viro 	if (S_ISREG(inode->i_mode))
25331da177e4SLinus Torvalds 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
253441ffe5d5SHugh Dickins 	call_rcu(&inode->i_rcu, shmem_destroy_callback);
25351da177e4SLinus Torvalds }
25361da177e4SLinus Torvalds 
253741ffe5d5SHugh Dickins static void shmem_init_inode(void *foo)
25381da177e4SLinus Torvalds {
253941ffe5d5SHugh Dickins 	struct shmem_inode_info *info = foo;
254041ffe5d5SHugh Dickins 	inode_init_once(&info->vfs_inode);
25411da177e4SLinus Torvalds }
25421da177e4SLinus Torvalds 
254341ffe5d5SHugh Dickins static int shmem_init_inodecache(void)
25441da177e4SLinus Torvalds {
25451da177e4SLinus Torvalds 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
25461da177e4SLinus Torvalds 				sizeof(struct shmem_inode_info),
254741ffe5d5SHugh Dickins 				0, SLAB_PANIC, shmem_init_inode);
25481da177e4SLinus Torvalds 	return 0;
25491da177e4SLinus Torvalds }
25501da177e4SLinus Torvalds 
255141ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void)
25521da177e4SLinus Torvalds {
25531a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(shmem_inode_cachep);
25541da177e4SLinus Torvalds }
25551da177e4SLinus Torvalds 
2556f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = {
25571da177e4SLinus Torvalds 	.writepage	= shmem_writepage,
255876719325SKen Chen 	.set_page_dirty	= __set_page_dirty_no_writeback,
25591da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
2560800d15a5SNick Piggin 	.write_begin	= shmem_write_begin,
2561800d15a5SNick Piggin 	.write_end	= shmem_write_end,
25621da177e4SLinus Torvalds #endif
2563304dbdb7SLee Schermerhorn 	.migratepage	= migrate_page,
2564aa261f54SAndi Kleen 	.error_remove_page = generic_error_remove_page,
25651da177e4SLinus Torvalds };
25661da177e4SLinus Torvalds 
256715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = {
25681da177e4SLinus Torvalds 	.mmap		= shmem_mmap,
25691da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
2570f21f8062SHugh Dickins 	.llseek		= generic_file_llseek,
2571bcd78e49SHugh Dickins 	.read		= do_sync_read,
25725402b976SHugh Dickins 	.write		= do_sync_write,
2573bcd78e49SHugh Dickins 	.aio_read	= shmem_file_aio_read,
25745402b976SHugh Dickins 	.aio_write	= generic_file_aio_write,
25751b061d92SChristoph Hellwig 	.fsync		= noop_fsync,
2576708e3508SHugh Dickins 	.splice_read	= shmem_file_splice_read,
2577ae976416SHugh Dickins 	.splice_write	= generic_file_splice_write,
257883e4fa9cSHugh Dickins 	.fallocate	= shmem_fallocate,
25791da177e4SLinus Torvalds #endif
25801da177e4SLinus Torvalds };
25811da177e4SLinus Torvalds 
258292e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = {
258394c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
2584b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
2585b09e0fa4SEric Paris 	.setxattr	= shmem_setxattr,
2586b09e0fa4SEric Paris 	.getxattr	= shmem_getxattr,
2587b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
2588b09e0fa4SEric Paris 	.removexattr	= shmem_removexattr,
2589b09e0fa4SEric Paris #endif
25901da177e4SLinus Torvalds };
25911da177e4SLinus Torvalds 
259292e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = {
25931da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
25941da177e4SLinus Torvalds 	.create		= shmem_create,
25951da177e4SLinus Torvalds 	.lookup		= simple_lookup,
25961da177e4SLinus Torvalds 	.link		= shmem_link,
25971da177e4SLinus Torvalds 	.unlink		= shmem_unlink,
25981da177e4SLinus Torvalds 	.symlink	= shmem_symlink,
25991da177e4SLinus Torvalds 	.mkdir		= shmem_mkdir,
26001da177e4SLinus Torvalds 	.rmdir		= shmem_rmdir,
26011da177e4SLinus Torvalds 	.mknod		= shmem_mknod,
26021da177e4SLinus Torvalds 	.rename		= shmem_rename,
26031da177e4SLinus Torvalds #endif
2604b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
2605b09e0fa4SEric Paris 	.setxattr	= shmem_setxattr,
2606b09e0fa4SEric Paris 	.getxattr	= shmem_getxattr,
2607b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
2608b09e0fa4SEric Paris 	.removexattr	= shmem_removexattr,
2609b09e0fa4SEric Paris #endif
261039f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
261194c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
261239f0247dSAndreas Gruenbacher #endif
261339f0247dSAndreas Gruenbacher };
261439f0247dSAndreas Gruenbacher 
261592e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = {
2616b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR
2617b09e0fa4SEric Paris 	.setxattr	= shmem_setxattr,
2618b09e0fa4SEric Paris 	.getxattr	= shmem_getxattr,
2619b09e0fa4SEric Paris 	.listxattr	= shmem_listxattr,
2620b09e0fa4SEric Paris 	.removexattr	= shmem_removexattr,
2621b09e0fa4SEric Paris #endif
262239f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL
262394c1e62dSHugh Dickins 	.setattr	= shmem_setattr,
262439f0247dSAndreas Gruenbacher #endif
26251da177e4SLinus Torvalds };
26261da177e4SLinus Torvalds 
2627759b9775SHugh Dickins static const struct super_operations shmem_ops = {
26281da177e4SLinus Torvalds 	.alloc_inode	= shmem_alloc_inode,
26291da177e4SLinus Torvalds 	.destroy_inode	= shmem_destroy_inode,
26301da177e4SLinus Torvalds #ifdef CONFIG_TMPFS
26311da177e4SLinus Torvalds 	.statfs		= shmem_statfs,
26321da177e4SLinus Torvalds 	.remount_fs	= shmem_remount_fs,
2633680d794bSakpm@linux-foundation.org 	.show_options	= shmem_show_options,
26341da177e4SLinus Torvalds #endif
26351f895f75SAl Viro 	.evict_inode	= shmem_evict_inode,
26361da177e4SLinus Torvalds 	.drop_inode	= generic_delete_inode,
26371da177e4SLinus Torvalds 	.put_super	= shmem_put_super,
26381da177e4SLinus Torvalds };
26391da177e4SLinus Torvalds 
2640f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = {
264154cb8821SNick Piggin 	.fault		= shmem_fault,
26421da177e4SLinus Torvalds #ifdef CONFIG_NUMA
26431da177e4SLinus Torvalds 	.set_policy     = shmem_set_policy,
26441da177e4SLinus Torvalds 	.get_policy     = shmem_get_policy,
26451da177e4SLinus Torvalds #endif
26461da177e4SLinus Torvalds };
26471da177e4SLinus Torvalds 
26483c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type,
26493c26ff6eSAl Viro 	int flags, const char *dev_name, void *data)
26501da177e4SLinus Torvalds {
26513c26ff6eSAl Viro 	return mount_nodev(fs_type, flags, data, shmem_fill_super);
26521da177e4SLinus Torvalds }
26531da177e4SLinus Torvalds 
265441ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
26551da177e4SLinus Torvalds 	.owner		= THIS_MODULE,
26561da177e4SLinus Torvalds 	.name		= "tmpfs",
26573c26ff6eSAl Viro 	.mount		= shmem_mount,
26581da177e4SLinus Torvalds 	.kill_sb	= kill_litter_super,
26591da177e4SLinus Torvalds };
26601da177e4SLinus Torvalds 
266141ffe5d5SHugh Dickins int __init shmem_init(void)
26621da177e4SLinus Torvalds {
26631da177e4SLinus Torvalds 	int error;
26641da177e4SLinus Torvalds 
2665e0bf68ddSPeter Zijlstra 	error = bdi_init(&shmem_backing_dev_info);
2666e0bf68ddSPeter Zijlstra 	if (error)
2667e0bf68ddSPeter Zijlstra 		goto out4;
2668e0bf68ddSPeter Zijlstra 
266941ffe5d5SHugh Dickins 	error = shmem_init_inodecache();
26701da177e4SLinus Torvalds 	if (error)
26711da177e4SLinus Torvalds 		goto out3;
26721da177e4SLinus Torvalds 
267341ffe5d5SHugh Dickins 	error = register_filesystem(&shmem_fs_type);
26741da177e4SLinus Torvalds 	if (error) {
26751da177e4SLinus Torvalds 		printk(KERN_ERR "Could not register tmpfs\n");
26761da177e4SLinus Torvalds 		goto out2;
26771da177e4SLinus Torvalds 	}
267895dc112aSGreg Kroah-Hartman 
267941ffe5d5SHugh Dickins 	shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
268041ffe5d5SHugh Dickins 				 shmem_fs_type.name, NULL);
26811da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt)) {
26821da177e4SLinus Torvalds 		error = PTR_ERR(shm_mnt);
26831da177e4SLinus Torvalds 		printk(KERN_ERR "Could not kern_mount tmpfs\n");
26841da177e4SLinus Torvalds 		goto out1;
26851da177e4SLinus Torvalds 	}
26861da177e4SLinus Torvalds 	return 0;
26871da177e4SLinus Torvalds 
26881da177e4SLinus Torvalds out1:
268941ffe5d5SHugh Dickins 	unregister_filesystem(&shmem_fs_type);
26901da177e4SLinus Torvalds out2:
269141ffe5d5SHugh Dickins 	shmem_destroy_inodecache();
26921da177e4SLinus Torvalds out3:
2693e0bf68ddSPeter Zijlstra 	bdi_destroy(&shmem_backing_dev_info);
2694e0bf68ddSPeter Zijlstra out4:
26951da177e4SLinus Torvalds 	shm_mnt = ERR_PTR(error);
26961da177e4SLinus Torvalds 	return error;
26971da177e4SLinus Torvalds }
2698853ac43aSMatt Mackall 
2699853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */
2700853ac43aSMatt Mackall 
2701853ac43aSMatt Mackall /*
2702853ac43aSMatt Mackall  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2703853ac43aSMatt Mackall  *
2704853ac43aSMatt Mackall  * This is intended for small system where the benefits of the full
2705853ac43aSMatt Mackall  * shmem code (swap-backed and resource-limited) are outweighed by
2706853ac43aSMatt Mackall  * their complexity. On systems without swap this code should be
2707853ac43aSMatt Mackall  * effectively equivalent, but much lighter weight.
2708853ac43aSMatt Mackall  */
2709853ac43aSMatt Mackall 
2710853ac43aSMatt Mackall #include <linux/ramfs.h>
2711853ac43aSMatt Mackall 
271241ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = {
2713853ac43aSMatt Mackall 	.name		= "tmpfs",
27143c26ff6eSAl Viro 	.mount		= ramfs_mount,
2715853ac43aSMatt Mackall 	.kill_sb	= kill_litter_super,
2716853ac43aSMatt Mackall };
2717853ac43aSMatt Mackall 
271841ffe5d5SHugh Dickins int __init shmem_init(void)
2719853ac43aSMatt Mackall {
272041ffe5d5SHugh Dickins 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2721853ac43aSMatt Mackall 
272241ffe5d5SHugh Dickins 	shm_mnt = kern_mount(&shmem_fs_type);
2723853ac43aSMatt Mackall 	BUG_ON(IS_ERR(shm_mnt));
2724853ac43aSMatt Mackall 
2725853ac43aSMatt Mackall 	return 0;
2726853ac43aSMatt Mackall }
2727853ac43aSMatt Mackall 
272841ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page)
2729853ac43aSMatt Mackall {
2730853ac43aSMatt Mackall 	return 0;
2731853ac43aSMatt Mackall }
2732853ac43aSMatt Mackall 
27333f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user)
27343f96b79aSHugh Dickins {
27353f96b79aSHugh Dickins 	return 0;
27363f96b79aSHugh Dickins }
27373f96b79aSHugh Dickins 
273824513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping)
273924513264SHugh Dickins {
274024513264SHugh Dickins }
274124513264SHugh Dickins 
274241ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
274394c1e62dSHugh Dickins {
274441ffe5d5SHugh Dickins 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
274594c1e62dSHugh Dickins }
274694c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range);
274794c1e62dSHugh Dickins 
2748853ac43aSMatt Mackall #define shmem_vm_ops				generic_file_vm_ops
27490b0a0806SHugh Dickins #define shmem_file_operations			ramfs_file_operations
2750454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
27510b0a0806SHugh Dickins #define shmem_acct_size(flags, size)		0
27520b0a0806SHugh Dickins #define shmem_unacct_size(flags, size)		do {} while (0)
2753853ac43aSMatt Mackall 
2754853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */
2755853ac43aSMatt Mackall 
2756853ac43aSMatt Mackall /* common code */
27571da177e4SLinus Torvalds 
275846711810SRandy Dunlap /**
27591da177e4SLinus Torvalds  * shmem_file_setup - get an unlinked file living in tmpfs
27601da177e4SLinus Torvalds  * @name: name for dentry (to be seen in /proc/<pid>/maps
27611da177e4SLinus Torvalds  * @size: size to be set for the file
27620b0a0806SHugh Dickins  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
27631da177e4SLinus Torvalds  */
2764168f5ac6SSergei Trofimovich struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
27651da177e4SLinus Torvalds {
27661da177e4SLinus Torvalds 	int error;
27671da177e4SLinus Torvalds 	struct file *file;
27681da177e4SLinus Torvalds 	struct inode *inode;
27692c48b9c4SAl Viro 	struct path path;
27702c48b9c4SAl Viro 	struct dentry *root;
27711da177e4SLinus Torvalds 	struct qstr this;
27721da177e4SLinus Torvalds 
27731da177e4SLinus Torvalds 	if (IS_ERR(shm_mnt))
27741da177e4SLinus Torvalds 		return (void *)shm_mnt;
27751da177e4SLinus Torvalds 
2776285b2c4fSHugh Dickins 	if (size < 0 || size > MAX_LFS_FILESIZE)
27771da177e4SLinus Torvalds 		return ERR_PTR(-EINVAL);
27781da177e4SLinus Torvalds 
27791da177e4SLinus Torvalds 	if (shmem_acct_size(flags, size))
27801da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
27811da177e4SLinus Torvalds 
27821da177e4SLinus Torvalds 	error = -ENOMEM;
27831da177e4SLinus Torvalds 	this.name = name;
27841da177e4SLinus Torvalds 	this.len = strlen(name);
27851da177e4SLinus Torvalds 	this.hash = 0; /* will go */
27861da177e4SLinus Torvalds 	root = shm_mnt->mnt_root;
27872c48b9c4SAl Viro 	path.dentry = d_alloc(root, &this);
27882c48b9c4SAl Viro 	if (!path.dentry)
27891da177e4SLinus Torvalds 		goto put_memory;
27902c48b9c4SAl Viro 	path.mnt = mntget(shm_mnt);
27911da177e4SLinus Torvalds 
27921da177e4SLinus Torvalds 	error = -ENOSPC;
2793454abafeSDmitry Monakhov 	inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
27941da177e4SLinus Torvalds 	if (!inode)
27954b42af81SAl Viro 		goto put_dentry;
27961da177e4SLinus Torvalds 
27972c48b9c4SAl Viro 	d_instantiate(path.dentry, inode);
27981da177e4SLinus Torvalds 	inode->i_size = size;
27996d6b77f1SMiklos Szeredi 	clear_nlink(inode);	/* It is unlinked */
2800853ac43aSMatt Mackall #ifndef CONFIG_MMU
2801853ac43aSMatt Mackall 	error = ramfs_nommu_expand_for_mapping(inode, size);
2802853ac43aSMatt Mackall 	if (error)
28034b42af81SAl Viro 		goto put_dentry;
2804853ac43aSMatt Mackall #endif
28054b42af81SAl Viro 
28064b42af81SAl Viro 	error = -ENFILE;
28072c48b9c4SAl Viro 	file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
28084b42af81SAl Viro 		  &shmem_file_operations);
28094b42af81SAl Viro 	if (!file)
28104b42af81SAl Viro 		goto put_dentry;
28114b42af81SAl Viro 
28121da177e4SLinus Torvalds 	return file;
28131da177e4SLinus Torvalds 
28141da177e4SLinus Torvalds put_dentry:
28152c48b9c4SAl Viro 	path_put(&path);
28161da177e4SLinus Torvalds put_memory:
28171da177e4SLinus Torvalds 	shmem_unacct_size(flags, size);
28181da177e4SLinus Torvalds 	return ERR_PTR(error);
28191da177e4SLinus Torvalds }
2820395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup);
28211da177e4SLinus Torvalds 
282246711810SRandy Dunlap /**
28231da177e4SLinus Torvalds  * shmem_zero_setup - setup a shared anonymous mapping
28241da177e4SLinus Torvalds  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
28251da177e4SLinus Torvalds  */
28261da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma)
28271da177e4SLinus Torvalds {
28281da177e4SLinus Torvalds 	struct file *file;
28291da177e4SLinus Torvalds 	loff_t size = vma->vm_end - vma->vm_start;
28301da177e4SLinus Torvalds 
28311da177e4SLinus Torvalds 	file = shmem_file_setup("dev/zero", size, vma->vm_flags);
28321da177e4SLinus Torvalds 	if (IS_ERR(file))
28331da177e4SLinus Torvalds 		return PTR_ERR(file);
28341da177e4SLinus Torvalds 
28351da177e4SLinus Torvalds 	if (vma->vm_file)
28361da177e4SLinus Torvalds 		fput(vma->vm_file);
28371da177e4SLinus Torvalds 	vma->vm_file = file;
28381da177e4SLinus Torvalds 	vma->vm_ops = &shmem_vm_ops;
2839bee4c36aSHugh Dickins 	vma->vm_flags |= VM_CAN_NONLINEAR;
28401da177e4SLinus Torvalds 	return 0;
28411da177e4SLinus Torvalds }
2842d9d90e5eSHugh Dickins 
2843d9d90e5eSHugh Dickins /**
2844d9d90e5eSHugh Dickins  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2845d9d90e5eSHugh Dickins  * @mapping:	the page's address_space
2846d9d90e5eSHugh Dickins  * @index:	the page index
2847d9d90e5eSHugh Dickins  * @gfp:	the page allocator flags to use if allocating
2848d9d90e5eSHugh Dickins  *
2849d9d90e5eSHugh Dickins  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2850d9d90e5eSHugh Dickins  * with any new page allocations done using the specified allocation flags.
2851d9d90e5eSHugh Dickins  * But read_cache_page_gfp() uses the ->readpage() method: which does not
2852d9d90e5eSHugh Dickins  * suit tmpfs, since it may have pages in swapcache, and needs to find those
2853d9d90e5eSHugh Dickins  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2854d9d90e5eSHugh Dickins  *
285568da9f05SHugh Dickins  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
285668da9f05SHugh Dickins  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2857d9d90e5eSHugh Dickins  */
2858d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2859d9d90e5eSHugh Dickins 					 pgoff_t index, gfp_t gfp)
2860d9d90e5eSHugh Dickins {
286168da9f05SHugh Dickins #ifdef CONFIG_SHMEM
286268da9f05SHugh Dickins 	struct inode *inode = mapping->host;
28639276aad6SHugh Dickins 	struct page *page;
286468da9f05SHugh Dickins 	int error;
286568da9f05SHugh Dickins 
286668da9f05SHugh Dickins 	BUG_ON(mapping->a_ops != &shmem_aops);
286768da9f05SHugh Dickins 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
286868da9f05SHugh Dickins 	if (error)
286968da9f05SHugh Dickins 		page = ERR_PTR(error);
287068da9f05SHugh Dickins 	else
287168da9f05SHugh Dickins 		unlock_page(page);
287268da9f05SHugh Dickins 	return page;
287368da9f05SHugh Dickins #else
287468da9f05SHugh Dickins 	/*
287568da9f05SHugh Dickins 	 * The tiny !SHMEM case uses ramfs without swap
287668da9f05SHugh Dickins 	 */
2877d9d90e5eSHugh Dickins 	return read_cache_page_gfp(mapping, index, gfp);
287868da9f05SHugh Dickins #endif
2879d9d90e5eSHugh Dickins }
2880d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
2881