xref: /openbmc/linux/mm/shmem.c (revision 36bccb11)
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *		 2000 Transmeta Corp.
6  *		 2000-2001 Christoph Rohland
7  *		 2000-2001 SAP AG
8  *		 2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/export.h>
33 #include <linux/swap.h>
34 #include <linux/aio.h>
35 
36 static struct vfsmount *shm_mnt;
37 
38 #ifdef CONFIG_SHMEM
39 /*
40  * This virtual memory filesystem is heavily based on the ramfs. It
41  * extends ramfs by the ability to use swap and honor resource limits
42  * which makes it a completely usable filesystem.
43  */
44 
45 #include <linux/xattr.h>
46 #include <linux/exportfs.h>
47 #include <linux/posix_acl.h>
48 #include <linux/posix_acl_xattr.h>
49 #include <linux/mman.h>
50 #include <linux/string.h>
51 #include <linux/slab.h>
52 #include <linux/backing-dev.h>
53 #include <linux/shmem_fs.h>
54 #include <linux/writeback.h>
55 #include <linux/blkdev.h>
56 #include <linux/pagevec.h>
57 #include <linux/percpu_counter.h>
58 #include <linux/falloc.h>
59 #include <linux/splice.h>
60 #include <linux/security.h>
61 #include <linux/swapops.h>
62 #include <linux/mempolicy.h>
63 #include <linux/namei.h>
64 #include <linux/ctype.h>
65 #include <linux/migrate.h>
66 #include <linux/highmem.h>
67 #include <linux/seq_file.h>
68 #include <linux/magic.h>
69 
70 #include <asm/uaccess.h>
71 #include <asm/pgtable.h>
72 
73 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
74 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
75 
76 /* Pretend that each entry is of this size in directory's i_size */
77 #define BOGO_DIRENT_SIZE 20
78 
79 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
80 #define SHORT_SYMLINK_LEN 128
81 
82 /*
83  * shmem_fallocate and shmem_writepage communicate via inode->i_private
84  * (with i_mutex making sure that it has only one user at a time):
85  * we would prefer not to enlarge the shmem inode just for that.
86  */
87 struct shmem_falloc {
88 	pgoff_t start;		/* start of range currently being fallocated */
89 	pgoff_t next;		/* the next page offset to be fallocated */
90 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
91 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
92 };
93 
94 /* Flag allocation requirements to shmem_getpage */
95 enum sgp_type {
96 	SGP_READ,	/* don't exceed i_size, don't allocate page */
97 	SGP_CACHE,	/* don't exceed i_size, may allocate page */
98 	SGP_DIRTY,	/* like SGP_CACHE, but set new page dirty */
99 	SGP_WRITE,	/* may exceed i_size, may allocate !Uptodate page */
100 	SGP_FALLOC,	/* like SGP_WRITE, but make existing page Uptodate */
101 };
102 
103 #ifdef CONFIG_TMPFS
104 static unsigned long shmem_default_max_blocks(void)
105 {
106 	return totalram_pages / 2;
107 }
108 
109 static unsigned long shmem_default_max_inodes(void)
110 {
111 	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
112 }
113 #endif
114 
115 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
116 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
117 				struct shmem_inode_info *info, pgoff_t index);
118 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
119 	struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
120 
121 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
122 	struct page **pagep, enum sgp_type sgp, int *fault_type)
123 {
124 	return shmem_getpage_gfp(inode, index, pagep, sgp,
125 			mapping_gfp_mask(inode->i_mapping), fault_type);
126 }
127 
128 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
129 {
130 	return sb->s_fs_info;
131 }
132 
133 /*
134  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
135  * for shared memory and for shared anonymous (/dev/zero) mappings
136  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
137  * consistent with the pre-accounting of private mappings ...
138  */
139 static inline int shmem_acct_size(unsigned long flags, loff_t size)
140 {
141 	return (flags & VM_NORESERVE) ?
142 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
143 }
144 
145 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
146 {
147 	if (!(flags & VM_NORESERVE))
148 		vm_unacct_memory(VM_ACCT(size));
149 }
150 
151 /*
152  * ... whereas tmpfs objects are accounted incrementally as
153  * pages are allocated, in order to allow huge sparse files.
154  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
155  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
156  */
157 static inline int shmem_acct_block(unsigned long flags)
158 {
159 	return (flags & VM_NORESERVE) ?
160 		security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
161 }
162 
163 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
164 {
165 	if (flags & VM_NORESERVE)
166 		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
167 }
168 
169 static const struct super_operations shmem_ops;
170 static const struct address_space_operations shmem_aops;
171 static const struct file_operations shmem_file_operations;
172 static const struct inode_operations shmem_inode_operations;
173 static const struct inode_operations shmem_dir_inode_operations;
174 static const struct inode_operations shmem_special_inode_operations;
175 static const struct vm_operations_struct shmem_vm_ops;
176 
177 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
178 	.ra_pages	= 0,	/* No readahead */
179 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
180 };
181 
182 static LIST_HEAD(shmem_swaplist);
183 static DEFINE_MUTEX(shmem_swaplist_mutex);
184 
185 static int shmem_reserve_inode(struct super_block *sb)
186 {
187 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
188 	if (sbinfo->max_inodes) {
189 		spin_lock(&sbinfo->stat_lock);
190 		if (!sbinfo->free_inodes) {
191 			spin_unlock(&sbinfo->stat_lock);
192 			return -ENOSPC;
193 		}
194 		sbinfo->free_inodes--;
195 		spin_unlock(&sbinfo->stat_lock);
196 	}
197 	return 0;
198 }
199 
200 static void shmem_free_inode(struct super_block *sb)
201 {
202 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
203 	if (sbinfo->max_inodes) {
204 		spin_lock(&sbinfo->stat_lock);
205 		sbinfo->free_inodes++;
206 		spin_unlock(&sbinfo->stat_lock);
207 	}
208 }
209 
210 /**
211  * shmem_recalc_inode - recalculate the block usage of an inode
212  * @inode: inode to recalc
213  *
214  * We have to calculate the free blocks since the mm can drop
215  * undirtied hole pages behind our back.
216  *
217  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
218  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
219  *
220  * It has to be called with the spinlock held.
221  */
222 static void shmem_recalc_inode(struct inode *inode)
223 {
224 	struct shmem_inode_info *info = SHMEM_I(inode);
225 	long freed;
226 
227 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
228 	if (freed > 0) {
229 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
230 		if (sbinfo->max_blocks)
231 			percpu_counter_add(&sbinfo->used_blocks, -freed);
232 		info->alloced -= freed;
233 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
234 		shmem_unacct_blocks(info->flags, freed);
235 	}
236 }
237 
238 /*
239  * Replace item expected in radix tree by a new item, while holding tree lock.
240  */
241 static int shmem_radix_tree_replace(struct address_space *mapping,
242 			pgoff_t index, void *expected, void *replacement)
243 {
244 	void **pslot;
245 	void *item;
246 
247 	VM_BUG_ON(!expected);
248 	VM_BUG_ON(!replacement);
249 	pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
250 	if (!pslot)
251 		return -ENOENT;
252 	item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
253 	if (item != expected)
254 		return -ENOENT;
255 	radix_tree_replace_slot(pslot, replacement);
256 	return 0;
257 }
258 
259 /*
260  * Sometimes, before we decide whether to proceed or to fail, we must check
261  * that an entry was not already brought back from swap by a racing thread.
262  *
263  * Checking page is not enough: by the time a SwapCache page is locked, it
264  * might be reused, and again be SwapCache, using the same swap as before.
265  */
266 static bool shmem_confirm_swap(struct address_space *mapping,
267 			       pgoff_t index, swp_entry_t swap)
268 {
269 	void *item;
270 
271 	rcu_read_lock();
272 	item = radix_tree_lookup(&mapping->page_tree, index);
273 	rcu_read_unlock();
274 	return item == swp_to_radix_entry(swap);
275 }
276 
277 /*
278  * Like add_to_page_cache_locked, but error if expected item has gone.
279  */
280 static int shmem_add_to_page_cache(struct page *page,
281 				   struct address_space *mapping,
282 				   pgoff_t index, gfp_t gfp, void *expected)
283 {
284 	int error;
285 
286 	VM_BUG_ON_PAGE(!PageLocked(page), page);
287 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
288 
289 	page_cache_get(page);
290 	page->mapping = mapping;
291 	page->index = index;
292 
293 	spin_lock_irq(&mapping->tree_lock);
294 	if (!expected)
295 		error = radix_tree_insert(&mapping->page_tree, index, page);
296 	else
297 		error = shmem_radix_tree_replace(mapping, index, expected,
298 								 page);
299 	if (!error) {
300 		mapping->nrpages++;
301 		__inc_zone_page_state(page, NR_FILE_PAGES);
302 		__inc_zone_page_state(page, NR_SHMEM);
303 		spin_unlock_irq(&mapping->tree_lock);
304 	} else {
305 		page->mapping = NULL;
306 		spin_unlock_irq(&mapping->tree_lock);
307 		page_cache_release(page);
308 	}
309 	return error;
310 }
311 
312 /*
313  * Like delete_from_page_cache, but substitutes swap for page.
314  */
315 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
316 {
317 	struct address_space *mapping = page->mapping;
318 	int error;
319 
320 	spin_lock_irq(&mapping->tree_lock);
321 	error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
322 	page->mapping = NULL;
323 	mapping->nrpages--;
324 	__dec_zone_page_state(page, NR_FILE_PAGES);
325 	__dec_zone_page_state(page, NR_SHMEM);
326 	spin_unlock_irq(&mapping->tree_lock);
327 	page_cache_release(page);
328 	BUG_ON(error);
329 }
330 
331 /*
332  * Remove swap entry from radix tree, free the swap and its page cache.
333  */
334 static int shmem_free_swap(struct address_space *mapping,
335 			   pgoff_t index, void *radswap)
336 {
337 	void *old;
338 
339 	spin_lock_irq(&mapping->tree_lock);
340 	old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
341 	spin_unlock_irq(&mapping->tree_lock);
342 	if (old != radswap)
343 		return -ENOENT;
344 	free_swap_and_cache(radix_to_swp_entry(radswap));
345 	return 0;
346 }
347 
348 /*
349  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
350  */
351 void shmem_unlock_mapping(struct address_space *mapping)
352 {
353 	struct pagevec pvec;
354 	pgoff_t indices[PAGEVEC_SIZE];
355 	pgoff_t index = 0;
356 
357 	pagevec_init(&pvec, 0);
358 	/*
359 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
360 	 */
361 	while (!mapping_unevictable(mapping)) {
362 		/*
363 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
364 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
365 		 */
366 		pvec.nr = find_get_entries(mapping, index,
367 					   PAGEVEC_SIZE, pvec.pages, indices);
368 		if (!pvec.nr)
369 			break;
370 		index = indices[pvec.nr - 1] + 1;
371 		pagevec_remove_exceptionals(&pvec);
372 		check_move_unevictable_pages(pvec.pages, pvec.nr);
373 		pagevec_release(&pvec);
374 		cond_resched();
375 	}
376 }
377 
378 /*
379  * Remove range of pages and swap entries from radix tree, and free them.
380  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
381  */
382 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
383 								 bool unfalloc)
384 {
385 	struct address_space *mapping = inode->i_mapping;
386 	struct shmem_inode_info *info = SHMEM_I(inode);
387 	pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
388 	pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
389 	unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
390 	unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
391 	struct pagevec pvec;
392 	pgoff_t indices[PAGEVEC_SIZE];
393 	long nr_swaps_freed = 0;
394 	pgoff_t index;
395 	int i;
396 
397 	if (lend == -1)
398 		end = -1;	/* unsigned, so actually very big */
399 
400 	pagevec_init(&pvec, 0);
401 	index = start;
402 	while (index < end) {
403 		pvec.nr = find_get_entries(mapping, index,
404 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
405 			pvec.pages, indices);
406 		if (!pvec.nr)
407 			break;
408 		mem_cgroup_uncharge_start();
409 		for (i = 0; i < pagevec_count(&pvec); i++) {
410 			struct page *page = pvec.pages[i];
411 
412 			index = indices[i];
413 			if (index >= end)
414 				break;
415 
416 			if (radix_tree_exceptional_entry(page)) {
417 				if (unfalloc)
418 					continue;
419 				nr_swaps_freed += !shmem_free_swap(mapping,
420 								index, page);
421 				continue;
422 			}
423 
424 			if (!trylock_page(page))
425 				continue;
426 			if (!unfalloc || !PageUptodate(page)) {
427 				if (page->mapping == mapping) {
428 					VM_BUG_ON_PAGE(PageWriteback(page), page);
429 					truncate_inode_page(mapping, page);
430 				}
431 			}
432 			unlock_page(page);
433 		}
434 		pagevec_remove_exceptionals(&pvec);
435 		pagevec_release(&pvec);
436 		mem_cgroup_uncharge_end();
437 		cond_resched();
438 		index++;
439 	}
440 
441 	if (partial_start) {
442 		struct page *page = NULL;
443 		shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
444 		if (page) {
445 			unsigned int top = PAGE_CACHE_SIZE;
446 			if (start > end) {
447 				top = partial_end;
448 				partial_end = 0;
449 			}
450 			zero_user_segment(page, partial_start, top);
451 			set_page_dirty(page);
452 			unlock_page(page);
453 			page_cache_release(page);
454 		}
455 	}
456 	if (partial_end) {
457 		struct page *page = NULL;
458 		shmem_getpage(inode, end, &page, SGP_READ, NULL);
459 		if (page) {
460 			zero_user_segment(page, 0, partial_end);
461 			set_page_dirty(page);
462 			unlock_page(page);
463 			page_cache_release(page);
464 		}
465 	}
466 	if (start >= end)
467 		return;
468 
469 	index = start;
470 	for ( ; ; ) {
471 		cond_resched();
472 
473 		pvec.nr = find_get_entries(mapping, index,
474 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
475 				pvec.pages, indices);
476 		if (!pvec.nr) {
477 			if (index == start || unfalloc)
478 				break;
479 			index = start;
480 			continue;
481 		}
482 		if ((index == start || unfalloc) && indices[0] >= end) {
483 			pagevec_remove_exceptionals(&pvec);
484 			pagevec_release(&pvec);
485 			break;
486 		}
487 		mem_cgroup_uncharge_start();
488 		for (i = 0; i < pagevec_count(&pvec); i++) {
489 			struct page *page = pvec.pages[i];
490 
491 			index = indices[i];
492 			if (index >= end)
493 				break;
494 
495 			if (radix_tree_exceptional_entry(page)) {
496 				if (unfalloc)
497 					continue;
498 				nr_swaps_freed += !shmem_free_swap(mapping,
499 								index, page);
500 				continue;
501 			}
502 
503 			lock_page(page);
504 			if (!unfalloc || !PageUptodate(page)) {
505 				if (page->mapping == mapping) {
506 					VM_BUG_ON_PAGE(PageWriteback(page), page);
507 					truncate_inode_page(mapping, page);
508 				}
509 			}
510 			unlock_page(page);
511 		}
512 		pagevec_remove_exceptionals(&pvec);
513 		pagevec_release(&pvec);
514 		mem_cgroup_uncharge_end();
515 		index++;
516 	}
517 
518 	spin_lock(&info->lock);
519 	info->swapped -= nr_swaps_freed;
520 	shmem_recalc_inode(inode);
521 	spin_unlock(&info->lock);
522 }
523 
524 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
525 {
526 	shmem_undo_range(inode, lstart, lend, false);
527 	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
528 }
529 EXPORT_SYMBOL_GPL(shmem_truncate_range);
530 
531 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
532 {
533 	struct inode *inode = dentry->d_inode;
534 	int error;
535 
536 	error = inode_change_ok(inode, attr);
537 	if (error)
538 		return error;
539 
540 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
541 		loff_t oldsize = inode->i_size;
542 		loff_t newsize = attr->ia_size;
543 
544 		if (newsize != oldsize) {
545 			i_size_write(inode, newsize);
546 			inode->i_ctime = inode->i_mtime = CURRENT_TIME;
547 		}
548 		if (newsize < oldsize) {
549 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
550 			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
551 			shmem_truncate_range(inode, newsize, (loff_t)-1);
552 			/* unmap again to remove racily COWed private pages */
553 			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
554 		}
555 	}
556 
557 	setattr_copy(inode, attr);
558 	if (attr->ia_valid & ATTR_MODE)
559 		error = posix_acl_chmod(inode, inode->i_mode);
560 	return error;
561 }
562 
563 static void shmem_evict_inode(struct inode *inode)
564 {
565 	struct shmem_inode_info *info = SHMEM_I(inode);
566 
567 	if (inode->i_mapping->a_ops == &shmem_aops) {
568 		shmem_unacct_size(info->flags, inode->i_size);
569 		inode->i_size = 0;
570 		shmem_truncate_range(inode, 0, (loff_t)-1);
571 		if (!list_empty(&info->swaplist)) {
572 			mutex_lock(&shmem_swaplist_mutex);
573 			list_del_init(&info->swaplist);
574 			mutex_unlock(&shmem_swaplist_mutex);
575 		}
576 	} else
577 		kfree(info->symlink);
578 
579 	simple_xattrs_free(&info->xattrs);
580 	WARN_ON(inode->i_blocks);
581 	shmem_free_inode(inode->i_sb);
582 	clear_inode(inode);
583 }
584 
585 /*
586  * If swap found in inode, free it and move page from swapcache to filecache.
587  */
588 static int shmem_unuse_inode(struct shmem_inode_info *info,
589 			     swp_entry_t swap, struct page **pagep)
590 {
591 	struct address_space *mapping = info->vfs_inode.i_mapping;
592 	void *radswap;
593 	pgoff_t index;
594 	gfp_t gfp;
595 	int error = 0;
596 
597 	radswap = swp_to_radix_entry(swap);
598 	index = radix_tree_locate_item(&mapping->page_tree, radswap);
599 	if (index == -1)
600 		return 0;
601 
602 	/*
603 	 * Move _head_ to start search for next from here.
604 	 * But be careful: shmem_evict_inode checks list_empty without taking
605 	 * mutex, and there's an instant in list_move_tail when info->swaplist
606 	 * would appear empty, if it were the only one on shmem_swaplist.
607 	 */
608 	if (shmem_swaplist.next != &info->swaplist)
609 		list_move_tail(&shmem_swaplist, &info->swaplist);
610 
611 	gfp = mapping_gfp_mask(mapping);
612 	if (shmem_should_replace_page(*pagep, gfp)) {
613 		mutex_unlock(&shmem_swaplist_mutex);
614 		error = shmem_replace_page(pagep, gfp, info, index);
615 		mutex_lock(&shmem_swaplist_mutex);
616 		/*
617 		 * We needed to drop mutex to make that restrictive page
618 		 * allocation, but the inode might have been freed while we
619 		 * dropped it: although a racing shmem_evict_inode() cannot
620 		 * complete without emptying the radix_tree, our page lock
621 		 * on this swapcache page is not enough to prevent that -
622 		 * free_swap_and_cache() of our swap entry will only
623 		 * trylock_page(), removing swap from radix_tree whatever.
624 		 *
625 		 * We must not proceed to shmem_add_to_page_cache() if the
626 		 * inode has been freed, but of course we cannot rely on
627 		 * inode or mapping or info to check that.  However, we can
628 		 * safely check if our swap entry is still in use (and here
629 		 * it can't have got reused for another page): if it's still
630 		 * in use, then the inode cannot have been freed yet, and we
631 		 * can safely proceed (if it's no longer in use, that tells
632 		 * nothing about the inode, but we don't need to unuse swap).
633 		 */
634 		if (!page_swapcount(*pagep))
635 			error = -ENOENT;
636 	}
637 
638 	/*
639 	 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
640 	 * but also to hold up shmem_evict_inode(): so inode cannot be freed
641 	 * beneath us (pagelock doesn't help until the page is in pagecache).
642 	 */
643 	if (!error)
644 		error = shmem_add_to_page_cache(*pagep, mapping, index,
645 						GFP_NOWAIT, radswap);
646 	if (error != -ENOMEM) {
647 		/*
648 		 * Truncation and eviction use free_swap_and_cache(), which
649 		 * only does trylock page: if we raced, best clean up here.
650 		 */
651 		delete_from_swap_cache(*pagep);
652 		set_page_dirty(*pagep);
653 		if (!error) {
654 			spin_lock(&info->lock);
655 			info->swapped--;
656 			spin_unlock(&info->lock);
657 			swap_free(swap);
658 		}
659 		error = 1;	/* not an error, but entry was found */
660 	}
661 	return error;
662 }
663 
664 /*
665  * Search through swapped inodes to find and replace swap by page.
666  */
667 int shmem_unuse(swp_entry_t swap, struct page *page)
668 {
669 	struct list_head *this, *next;
670 	struct shmem_inode_info *info;
671 	int found = 0;
672 	int error = 0;
673 
674 	/*
675 	 * There's a faint possibility that swap page was replaced before
676 	 * caller locked it: caller will come back later with the right page.
677 	 */
678 	if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
679 		goto out;
680 
681 	/*
682 	 * Charge page using GFP_KERNEL while we can wait, before taking
683 	 * the shmem_swaplist_mutex which might hold up shmem_writepage().
684 	 * Charged back to the user (not to caller) when swap account is used.
685 	 */
686 	error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL);
687 	if (error)
688 		goto out;
689 	/* No radix_tree_preload: swap entry keeps a place for page in tree */
690 
691 	mutex_lock(&shmem_swaplist_mutex);
692 	list_for_each_safe(this, next, &shmem_swaplist) {
693 		info = list_entry(this, struct shmem_inode_info, swaplist);
694 		if (info->swapped)
695 			found = shmem_unuse_inode(info, swap, &page);
696 		else
697 			list_del_init(&info->swaplist);
698 		cond_resched();
699 		if (found)
700 			break;
701 	}
702 	mutex_unlock(&shmem_swaplist_mutex);
703 
704 	if (found < 0)
705 		error = found;
706 out:
707 	unlock_page(page);
708 	page_cache_release(page);
709 	return error;
710 }
711 
712 /*
713  * Move the page from the page cache to the swap cache.
714  */
715 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
716 {
717 	struct shmem_inode_info *info;
718 	struct address_space *mapping;
719 	struct inode *inode;
720 	swp_entry_t swap;
721 	pgoff_t index;
722 
723 	BUG_ON(!PageLocked(page));
724 	mapping = page->mapping;
725 	index = page->index;
726 	inode = mapping->host;
727 	info = SHMEM_I(inode);
728 	if (info->flags & VM_LOCKED)
729 		goto redirty;
730 	if (!total_swap_pages)
731 		goto redirty;
732 
733 	/*
734 	 * shmem_backing_dev_info's capabilities prevent regular writeback or
735 	 * sync from ever calling shmem_writepage; but a stacking filesystem
736 	 * might use ->writepage of its underlying filesystem, in which case
737 	 * tmpfs should write out to swap only in response to memory pressure,
738 	 * and not for the writeback threads or sync.
739 	 */
740 	if (!wbc->for_reclaim) {
741 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
742 		goto redirty;
743 	}
744 
745 	/*
746 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
747 	 * value into swapfile.c, the only way we can correctly account for a
748 	 * fallocated page arriving here is now to initialize it and write it.
749 	 *
750 	 * That's okay for a page already fallocated earlier, but if we have
751 	 * not yet completed the fallocation, then (a) we want to keep track
752 	 * of this page in case we have to undo it, and (b) it may not be a
753 	 * good idea to continue anyway, once we're pushing into swap.  So
754 	 * reactivate the page, and let shmem_fallocate() quit when too many.
755 	 */
756 	if (!PageUptodate(page)) {
757 		if (inode->i_private) {
758 			struct shmem_falloc *shmem_falloc;
759 			spin_lock(&inode->i_lock);
760 			shmem_falloc = inode->i_private;
761 			if (shmem_falloc &&
762 			    index >= shmem_falloc->start &&
763 			    index < shmem_falloc->next)
764 				shmem_falloc->nr_unswapped++;
765 			else
766 				shmem_falloc = NULL;
767 			spin_unlock(&inode->i_lock);
768 			if (shmem_falloc)
769 				goto redirty;
770 		}
771 		clear_highpage(page);
772 		flush_dcache_page(page);
773 		SetPageUptodate(page);
774 	}
775 
776 	swap = get_swap_page();
777 	if (!swap.val)
778 		goto redirty;
779 
780 	/*
781 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
782 	 * if it's not already there.  Do it now before the page is
783 	 * moved to swap cache, when its pagelock no longer protects
784 	 * the inode from eviction.  But don't unlock the mutex until
785 	 * we've incremented swapped, because shmem_unuse_inode() will
786 	 * prune a !swapped inode from the swaplist under this mutex.
787 	 */
788 	mutex_lock(&shmem_swaplist_mutex);
789 	if (list_empty(&info->swaplist))
790 		list_add_tail(&info->swaplist, &shmem_swaplist);
791 
792 	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
793 		swap_shmem_alloc(swap);
794 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
795 
796 		spin_lock(&info->lock);
797 		info->swapped++;
798 		shmem_recalc_inode(inode);
799 		spin_unlock(&info->lock);
800 
801 		mutex_unlock(&shmem_swaplist_mutex);
802 		BUG_ON(page_mapped(page));
803 		swap_writepage(page, wbc);
804 		return 0;
805 	}
806 
807 	mutex_unlock(&shmem_swaplist_mutex);
808 	swapcache_free(swap, NULL);
809 redirty:
810 	set_page_dirty(page);
811 	if (wbc->for_reclaim)
812 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
813 	unlock_page(page);
814 	return 0;
815 }
816 
817 #ifdef CONFIG_NUMA
818 #ifdef CONFIG_TMPFS
819 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
820 {
821 	char buffer[64];
822 
823 	if (!mpol || mpol->mode == MPOL_DEFAULT)
824 		return;		/* show nothing */
825 
826 	mpol_to_str(buffer, sizeof(buffer), mpol);
827 
828 	seq_printf(seq, ",mpol=%s", buffer);
829 }
830 
831 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
832 {
833 	struct mempolicy *mpol = NULL;
834 	if (sbinfo->mpol) {
835 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
836 		mpol = sbinfo->mpol;
837 		mpol_get(mpol);
838 		spin_unlock(&sbinfo->stat_lock);
839 	}
840 	return mpol;
841 }
842 #endif /* CONFIG_TMPFS */
843 
844 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
845 			struct shmem_inode_info *info, pgoff_t index)
846 {
847 	struct vm_area_struct pvma;
848 	struct page *page;
849 
850 	/* Create a pseudo vma that just contains the policy */
851 	pvma.vm_start = 0;
852 	/* Bias interleave by inode number to distribute better across nodes */
853 	pvma.vm_pgoff = index + info->vfs_inode.i_ino;
854 	pvma.vm_ops = NULL;
855 	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
856 
857 	page = swapin_readahead(swap, gfp, &pvma, 0);
858 
859 	/* Drop reference taken by mpol_shared_policy_lookup() */
860 	mpol_cond_put(pvma.vm_policy);
861 
862 	return page;
863 }
864 
865 static struct page *shmem_alloc_page(gfp_t gfp,
866 			struct shmem_inode_info *info, pgoff_t index)
867 {
868 	struct vm_area_struct pvma;
869 	struct page *page;
870 
871 	/* Create a pseudo vma that just contains the policy */
872 	pvma.vm_start = 0;
873 	/* Bias interleave by inode number to distribute better across nodes */
874 	pvma.vm_pgoff = index + info->vfs_inode.i_ino;
875 	pvma.vm_ops = NULL;
876 	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
877 
878 	page = alloc_page_vma(gfp, &pvma, 0);
879 
880 	/* Drop reference taken by mpol_shared_policy_lookup() */
881 	mpol_cond_put(pvma.vm_policy);
882 
883 	return page;
884 }
885 #else /* !CONFIG_NUMA */
886 #ifdef CONFIG_TMPFS
887 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
888 {
889 }
890 #endif /* CONFIG_TMPFS */
891 
892 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
893 			struct shmem_inode_info *info, pgoff_t index)
894 {
895 	return swapin_readahead(swap, gfp, NULL, 0);
896 }
897 
898 static inline struct page *shmem_alloc_page(gfp_t gfp,
899 			struct shmem_inode_info *info, pgoff_t index)
900 {
901 	return alloc_page(gfp);
902 }
903 #endif /* CONFIG_NUMA */
904 
905 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
906 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
907 {
908 	return NULL;
909 }
910 #endif
911 
912 /*
913  * When a page is moved from swapcache to shmem filecache (either by the
914  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
915  * shmem_unuse_inode()), it may have been read in earlier from swap, in
916  * ignorance of the mapping it belongs to.  If that mapping has special
917  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
918  * we may need to copy to a suitable page before moving to filecache.
919  *
920  * In a future release, this may well be extended to respect cpuset and
921  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
922  * but for now it is a simple matter of zone.
923  */
924 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
925 {
926 	return page_zonenum(page) > gfp_zone(gfp);
927 }
928 
929 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
930 				struct shmem_inode_info *info, pgoff_t index)
931 {
932 	struct page *oldpage, *newpage;
933 	struct address_space *swap_mapping;
934 	pgoff_t swap_index;
935 	int error;
936 
937 	oldpage = *pagep;
938 	swap_index = page_private(oldpage);
939 	swap_mapping = page_mapping(oldpage);
940 
941 	/*
942 	 * We have arrived here because our zones are constrained, so don't
943 	 * limit chance of success by further cpuset and node constraints.
944 	 */
945 	gfp &= ~GFP_CONSTRAINT_MASK;
946 	newpage = shmem_alloc_page(gfp, info, index);
947 	if (!newpage)
948 		return -ENOMEM;
949 
950 	page_cache_get(newpage);
951 	copy_highpage(newpage, oldpage);
952 	flush_dcache_page(newpage);
953 
954 	__set_page_locked(newpage);
955 	SetPageUptodate(newpage);
956 	SetPageSwapBacked(newpage);
957 	set_page_private(newpage, swap_index);
958 	SetPageSwapCache(newpage);
959 
960 	/*
961 	 * Our caller will very soon move newpage out of swapcache, but it's
962 	 * a nice clean interface for us to replace oldpage by newpage there.
963 	 */
964 	spin_lock_irq(&swap_mapping->tree_lock);
965 	error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
966 								   newpage);
967 	if (!error) {
968 		__inc_zone_page_state(newpage, NR_FILE_PAGES);
969 		__dec_zone_page_state(oldpage, NR_FILE_PAGES);
970 	}
971 	spin_unlock_irq(&swap_mapping->tree_lock);
972 
973 	if (unlikely(error)) {
974 		/*
975 		 * Is this possible?  I think not, now that our callers check
976 		 * both PageSwapCache and page_private after getting page lock;
977 		 * but be defensive.  Reverse old to newpage for clear and free.
978 		 */
979 		oldpage = newpage;
980 	} else {
981 		mem_cgroup_replace_page_cache(oldpage, newpage);
982 		lru_cache_add_anon(newpage);
983 		*pagep = newpage;
984 	}
985 
986 	ClearPageSwapCache(oldpage);
987 	set_page_private(oldpage, 0);
988 
989 	unlock_page(oldpage);
990 	page_cache_release(oldpage);
991 	page_cache_release(oldpage);
992 	return error;
993 }
994 
995 /*
996  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
997  *
998  * If we allocate a new one we do not mark it dirty. That's up to the
999  * vm. If we swap it in we mark it dirty since we also free the swap
1000  * entry since a page cannot live in both the swap and page cache
1001  */
1002 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1003 	struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1004 {
1005 	struct address_space *mapping = inode->i_mapping;
1006 	struct shmem_inode_info *info;
1007 	struct shmem_sb_info *sbinfo;
1008 	struct page *page;
1009 	swp_entry_t swap;
1010 	int error;
1011 	int once = 0;
1012 	int alloced = 0;
1013 
1014 	if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
1015 		return -EFBIG;
1016 repeat:
1017 	swap.val = 0;
1018 	page = find_lock_entry(mapping, index);
1019 	if (radix_tree_exceptional_entry(page)) {
1020 		swap = radix_to_swp_entry(page);
1021 		page = NULL;
1022 	}
1023 
1024 	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1025 	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1026 		error = -EINVAL;
1027 		goto failed;
1028 	}
1029 
1030 	/* fallocated page? */
1031 	if (page && !PageUptodate(page)) {
1032 		if (sgp != SGP_READ)
1033 			goto clear;
1034 		unlock_page(page);
1035 		page_cache_release(page);
1036 		page = NULL;
1037 	}
1038 	if (page || (sgp == SGP_READ && !swap.val)) {
1039 		*pagep = page;
1040 		return 0;
1041 	}
1042 
1043 	/*
1044 	 * Fast cache lookup did not find it:
1045 	 * bring it back from swap or allocate.
1046 	 */
1047 	info = SHMEM_I(inode);
1048 	sbinfo = SHMEM_SB(inode->i_sb);
1049 
1050 	if (swap.val) {
1051 		/* Look it up and read it in.. */
1052 		page = lookup_swap_cache(swap);
1053 		if (!page) {
1054 			/* here we actually do the io */
1055 			if (fault_type)
1056 				*fault_type |= VM_FAULT_MAJOR;
1057 			page = shmem_swapin(swap, gfp, info, index);
1058 			if (!page) {
1059 				error = -ENOMEM;
1060 				goto failed;
1061 			}
1062 		}
1063 
1064 		/* We have to do this with page locked to prevent races */
1065 		lock_page(page);
1066 		if (!PageSwapCache(page) || page_private(page) != swap.val ||
1067 		    !shmem_confirm_swap(mapping, index, swap)) {
1068 			error = -EEXIST;	/* try again */
1069 			goto unlock;
1070 		}
1071 		if (!PageUptodate(page)) {
1072 			error = -EIO;
1073 			goto failed;
1074 		}
1075 		wait_on_page_writeback(page);
1076 
1077 		if (shmem_should_replace_page(page, gfp)) {
1078 			error = shmem_replace_page(&page, gfp, info, index);
1079 			if (error)
1080 				goto failed;
1081 		}
1082 
1083 		error = mem_cgroup_charge_file(page, current->mm,
1084 						gfp & GFP_RECLAIM_MASK);
1085 		if (!error) {
1086 			error = shmem_add_to_page_cache(page, mapping, index,
1087 						gfp, swp_to_radix_entry(swap));
1088 			/*
1089 			 * We already confirmed swap under page lock, and make
1090 			 * no memory allocation here, so usually no possibility
1091 			 * of error; but free_swap_and_cache() only trylocks a
1092 			 * page, so it is just possible that the entry has been
1093 			 * truncated or holepunched since swap was confirmed.
1094 			 * shmem_undo_range() will have done some of the
1095 			 * unaccounting, now delete_from_swap_cache() will do
1096 			 * the rest (including mem_cgroup_uncharge_swapcache).
1097 			 * Reset swap.val? No, leave it so "failed" goes back to
1098 			 * "repeat": reading a hole and writing should succeed.
1099 			 */
1100 			if (error)
1101 				delete_from_swap_cache(page);
1102 		}
1103 		if (error)
1104 			goto failed;
1105 
1106 		spin_lock(&info->lock);
1107 		info->swapped--;
1108 		shmem_recalc_inode(inode);
1109 		spin_unlock(&info->lock);
1110 
1111 		delete_from_swap_cache(page);
1112 		set_page_dirty(page);
1113 		swap_free(swap);
1114 
1115 	} else {
1116 		if (shmem_acct_block(info->flags)) {
1117 			error = -ENOSPC;
1118 			goto failed;
1119 		}
1120 		if (sbinfo->max_blocks) {
1121 			if (percpu_counter_compare(&sbinfo->used_blocks,
1122 						sbinfo->max_blocks) >= 0) {
1123 				error = -ENOSPC;
1124 				goto unacct;
1125 			}
1126 			percpu_counter_inc(&sbinfo->used_blocks);
1127 		}
1128 
1129 		page = shmem_alloc_page(gfp, info, index);
1130 		if (!page) {
1131 			error = -ENOMEM;
1132 			goto decused;
1133 		}
1134 
1135 		SetPageSwapBacked(page);
1136 		__set_page_locked(page);
1137 		error = mem_cgroup_charge_file(page, current->mm,
1138 						gfp & GFP_RECLAIM_MASK);
1139 		if (error)
1140 			goto decused;
1141 		error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
1142 		if (!error) {
1143 			error = shmem_add_to_page_cache(page, mapping, index,
1144 							gfp, NULL);
1145 			radix_tree_preload_end();
1146 		}
1147 		if (error) {
1148 			mem_cgroup_uncharge_cache_page(page);
1149 			goto decused;
1150 		}
1151 		lru_cache_add_anon(page);
1152 
1153 		spin_lock(&info->lock);
1154 		info->alloced++;
1155 		inode->i_blocks += BLOCKS_PER_PAGE;
1156 		shmem_recalc_inode(inode);
1157 		spin_unlock(&info->lock);
1158 		alloced = true;
1159 
1160 		/*
1161 		 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1162 		 */
1163 		if (sgp == SGP_FALLOC)
1164 			sgp = SGP_WRITE;
1165 clear:
1166 		/*
1167 		 * Let SGP_WRITE caller clear ends if write does not fill page;
1168 		 * but SGP_FALLOC on a page fallocated earlier must initialize
1169 		 * it now, lest undo on failure cancel our earlier guarantee.
1170 		 */
1171 		if (sgp != SGP_WRITE) {
1172 			clear_highpage(page);
1173 			flush_dcache_page(page);
1174 			SetPageUptodate(page);
1175 		}
1176 		if (sgp == SGP_DIRTY)
1177 			set_page_dirty(page);
1178 	}
1179 
1180 	/* Perhaps the file has been truncated since we checked */
1181 	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1182 	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1183 		error = -EINVAL;
1184 		if (alloced)
1185 			goto trunc;
1186 		else
1187 			goto failed;
1188 	}
1189 	*pagep = page;
1190 	return 0;
1191 
1192 	/*
1193 	 * Error recovery.
1194 	 */
1195 trunc:
1196 	info = SHMEM_I(inode);
1197 	ClearPageDirty(page);
1198 	delete_from_page_cache(page);
1199 	spin_lock(&info->lock);
1200 	info->alloced--;
1201 	inode->i_blocks -= BLOCKS_PER_PAGE;
1202 	spin_unlock(&info->lock);
1203 decused:
1204 	sbinfo = SHMEM_SB(inode->i_sb);
1205 	if (sbinfo->max_blocks)
1206 		percpu_counter_add(&sbinfo->used_blocks, -1);
1207 unacct:
1208 	shmem_unacct_blocks(info->flags, 1);
1209 failed:
1210 	if (swap.val && error != -EINVAL &&
1211 	    !shmem_confirm_swap(mapping, index, swap))
1212 		error = -EEXIST;
1213 unlock:
1214 	if (page) {
1215 		unlock_page(page);
1216 		page_cache_release(page);
1217 	}
1218 	if (error == -ENOSPC && !once++) {
1219 		info = SHMEM_I(inode);
1220 		spin_lock(&info->lock);
1221 		shmem_recalc_inode(inode);
1222 		spin_unlock(&info->lock);
1223 		goto repeat;
1224 	}
1225 	if (error == -EEXIST)	/* from above or from radix_tree_insert */
1226 		goto repeat;
1227 	return error;
1228 }
1229 
1230 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1231 {
1232 	struct inode *inode = file_inode(vma->vm_file);
1233 	int error;
1234 	int ret = VM_FAULT_LOCKED;
1235 
1236 	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1237 	if (error)
1238 		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1239 
1240 	if (ret & VM_FAULT_MAJOR) {
1241 		count_vm_event(PGMAJFAULT);
1242 		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1243 	}
1244 	return ret;
1245 }
1246 
1247 #ifdef CONFIG_NUMA
1248 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1249 {
1250 	struct inode *inode = file_inode(vma->vm_file);
1251 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1252 }
1253 
1254 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1255 					  unsigned long addr)
1256 {
1257 	struct inode *inode = file_inode(vma->vm_file);
1258 	pgoff_t index;
1259 
1260 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1261 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1262 }
1263 #endif
1264 
1265 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1266 {
1267 	struct inode *inode = file_inode(file);
1268 	struct shmem_inode_info *info = SHMEM_I(inode);
1269 	int retval = -ENOMEM;
1270 
1271 	spin_lock(&info->lock);
1272 	if (lock && !(info->flags & VM_LOCKED)) {
1273 		if (!user_shm_lock(inode->i_size, user))
1274 			goto out_nomem;
1275 		info->flags |= VM_LOCKED;
1276 		mapping_set_unevictable(file->f_mapping);
1277 	}
1278 	if (!lock && (info->flags & VM_LOCKED) && user) {
1279 		user_shm_unlock(inode->i_size, user);
1280 		info->flags &= ~VM_LOCKED;
1281 		mapping_clear_unevictable(file->f_mapping);
1282 	}
1283 	retval = 0;
1284 
1285 out_nomem:
1286 	spin_unlock(&info->lock);
1287 	return retval;
1288 }
1289 
1290 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1291 {
1292 	file_accessed(file);
1293 	vma->vm_ops = &shmem_vm_ops;
1294 	return 0;
1295 }
1296 
1297 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1298 				     umode_t mode, dev_t dev, unsigned long flags)
1299 {
1300 	struct inode *inode;
1301 	struct shmem_inode_info *info;
1302 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1303 
1304 	if (shmem_reserve_inode(sb))
1305 		return NULL;
1306 
1307 	inode = new_inode(sb);
1308 	if (inode) {
1309 		inode->i_ino = get_next_ino();
1310 		inode_init_owner(inode, dir, mode);
1311 		inode->i_blocks = 0;
1312 		inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1313 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1314 		inode->i_generation = get_seconds();
1315 		info = SHMEM_I(inode);
1316 		memset(info, 0, (char *)inode - (char *)info);
1317 		spin_lock_init(&info->lock);
1318 		info->flags = flags & VM_NORESERVE;
1319 		INIT_LIST_HEAD(&info->swaplist);
1320 		simple_xattrs_init(&info->xattrs);
1321 		cache_no_acl(inode);
1322 
1323 		switch (mode & S_IFMT) {
1324 		default:
1325 			inode->i_op = &shmem_special_inode_operations;
1326 			init_special_inode(inode, mode, dev);
1327 			break;
1328 		case S_IFREG:
1329 			inode->i_mapping->a_ops = &shmem_aops;
1330 			inode->i_op = &shmem_inode_operations;
1331 			inode->i_fop = &shmem_file_operations;
1332 			mpol_shared_policy_init(&info->policy,
1333 						 shmem_get_sbmpol(sbinfo));
1334 			break;
1335 		case S_IFDIR:
1336 			inc_nlink(inode);
1337 			/* Some things misbehave if size == 0 on a directory */
1338 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
1339 			inode->i_op = &shmem_dir_inode_operations;
1340 			inode->i_fop = &simple_dir_operations;
1341 			break;
1342 		case S_IFLNK:
1343 			/*
1344 			 * Must not load anything in the rbtree,
1345 			 * mpol_free_shared_policy will not be called.
1346 			 */
1347 			mpol_shared_policy_init(&info->policy, NULL);
1348 			break;
1349 		}
1350 	} else
1351 		shmem_free_inode(sb);
1352 	return inode;
1353 }
1354 
1355 bool shmem_mapping(struct address_space *mapping)
1356 {
1357 	return mapping->backing_dev_info == &shmem_backing_dev_info;
1358 }
1359 
1360 #ifdef CONFIG_TMPFS
1361 static const struct inode_operations shmem_symlink_inode_operations;
1362 static const struct inode_operations shmem_short_symlink_operations;
1363 
1364 #ifdef CONFIG_TMPFS_XATTR
1365 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1366 #else
1367 #define shmem_initxattrs NULL
1368 #endif
1369 
1370 static int
1371 shmem_write_begin(struct file *file, struct address_space *mapping,
1372 			loff_t pos, unsigned len, unsigned flags,
1373 			struct page **pagep, void **fsdata)
1374 {
1375 	struct inode *inode = mapping->host;
1376 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1377 	return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1378 }
1379 
1380 static int
1381 shmem_write_end(struct file *file, struct address_space *mapping,
1382 			loff_t pos, unsigned len, unsigned copied,
1383 			struct page *page, void *fsdata)
1384 {
1385 	struct inode *inode = mapping->host;
1386 
1387 	if (pos + copied > inode->i_size)
1388 		i_size_write(inode, pos + copied);
1389 
1390 	if (!PageUptodate(page)) {
1391 		if (copied < PAGE_CACHE_SIZE) {
1392 			unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1393 			zero_user_segments(page, 0, from,
1394 					from + copied, PAGE_CACHE_SIZE);
1395 		}
1396 		SetPageUptodate(page);
1397 	}
1398 	set_page_dirty(page);
1399 	unlock_page(page);
1400 	page_cache_release(page);
1401 
1402 	return copied;
1403 }
1404 
1405 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1406 		const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1407 {
1408 	struct file *file = iocb->ki_filp;
1409 	struct inode *inode = file_inode(file);
1410 	struct address_space *mapping = inode->i_mapping;
1411 	pgoff_t index;
1412 	unsigned long offset;
1413 	enum sgp_type sgp = SGP_READ;
1414 	int error = 0;
1415 	ssize_t retval;
1416 	size_t count;
1417 	loff_t *ppos = &iocb->ki_pos;
1418 	struct iov_iter iter;
1419 
1420 	retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1421 	if (retval)
1422 		return retval;
1423 	iov_iter_init(&iter, iov, nr_segs, count, 0);
1424 
1425 	/*
1426 	 * Might this read be for a stacking filesystem?  Then when reading
1427 	 * holes of a sparse file, we actually need to allocate those pages,
1428 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1429 	 */
1430 	if (segment_eq(get_fs(), KERNEL_DS))
1431 		sgp = SGP_DIRTY;
1432 
1433 	index = *ppos >> PAGE_CACHE_SHIFT;
1434 	offset = *ppos & ~PAGE_CACHE_MASK;
1435 
1436 	for (;;) {
1437 		struct page *page = NULL;
1438 		pgoff_t end_index;
1439 		unsigned long nr, ret;
1440 		loff_t i_size = i_size_read(inode);
1441 
1442 		end_index = i_size >> PAGE_CACHE_SHIFT;
1443 		if (index > end_index)
1444 			break;
1445 		if (index == end_index) {
1446 			nr = i_size & ~PAGE_CACHE_MASK;
1447 			if (nr <= offset)
1448 				break;
1449 		}
1450 
1451 		error = shmem_getpage(inode, index, &page, sgp, NULL);
1452 		if (error) {
1453 			if (error == -EINVAL)
1454 				error = 0;
1455 			break;
1456 		}
1457 		if (page)
1458 			unlock_page(page);
1459 
1460 		/*
1461 		 * We must evaluate after, since reads (unlike writes)
1462 		 * are called without i_mutex protection against truncate
1463 		 */
1464 		nr = PAGE_CACHE_SIZE;
1465 		i_size = i_size_read(inode);
1466 		end_index = i_size >> PAGE_CACHE_SHIFT;
1467 		if (index == end_index) {
1468 			nr = i_size & ~PAGE_CACHE_MASK;
1469 			if (nr <= offset) {
1470 				if (page)
1471 					page_cache_release(page);
1472 				break;
1473 			}
1474 		}
1475 		nr -= offset;
1476 
1477 		if (page) {
1478 			/*
1479 			 * If users can be writing to this page using arbitrary
1480 			 * virtual addresses, take care about potential aliasing
1481 			 * before reading the page on the kernel side.
1482 			 */
1483 			if (mapping_writably_mapped(mapping))
1484 				flush_dcache_page(page);
1485 			/*
1486 			 * Mark the page accessed if we read the beginning.
1487 			 */
1488 			if (!offset)
1489 				mark_page_accessed(page);
1490 		} else {
1491 			page = ZERO_PAGE(0);
1492 			page_cache_get(page);
1493 		}
1494 
1495 		/*
1496 		 * Ok, we have the page, and it's up-to-date, so
1497 		 * now we can copy it to user space...
1498 		 */
1499 		ret = copy_page_to_iter(page, offset, nr, &iter);
1500 		retval += ret;
1501 		offset += ret;
1502 		index += offset >> PAGE_CACHE_SHIFT;
1503 		offset &= ~PAGE_CACHE_MASK;
1504 
1505 		page_cache_release(page);
1506 		if (!iov_iter_count(&iter))
1507 			break;
1508 		if (ret < nr) {
1509 			error = -EFAULT;
1510 			break;
1511 		}
1512 		cond_resched();
1513 	}
1514 
1515 	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1516 	file_accessed(file);
1517 	return retval ? retval : error;
1518 }
1519 
1520 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1521 				struct pipe_inode_info *pipe, size_t len,
1522 				unsigned int flags)
1523 {
1524 	struct address_space *mapping = in->f_mapping;
1525 	struct inode *inode = mapping->host;
1526 	unsigned int loff, nr_pages, req_pages;
1527 	struct page *pages[PIPE_DEF_BUFFERS];
1528 	struct partial_page partial[PIPE_DEF_BUFFERS];
1529 	struct page *page;
1530 	pgoff_t index, end_index;
1531 	loff_t isize, left;
1532 	int error, page_nr;
1533 	struct splice_pipe_desc spd = {
1534 		.pages = pages,
1535 		.partial = partial,
1536 		.nr_pages_max = PIPE_DEF_BUFFERS,
1537 		.flags = flags,
1538 		.ops = &page_cache_pipe_buf_ops,
1539 		.spd_release = spd_release_page,
1540 	};
1541 
1542 	isize = i_size_read(inode);
1543 	if (unlikely(*ppos >= isize))
1544 		return 0;
1545 
1546 	left = isize - *ppos;
1547 	if (unlikely(left < len))
1548 		len = left;
1549 
1550 	if (splice_grow_spd(pipe, &spd))
1551 		return -ENOMEM;
1552 
1553 	index = *ppos >> PAGE_CACHE_SHIFT;
1554 	loff = *ppos & ~PAGE_CACHE_MASK;
1555 	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1556 	nr_pages = min(req_pages, spd.nr_pages_max);
1557 
1558 	spd.nr_pages = find_get_pages_contig(mapping, index,
1559 						nr_pages, spd.pages);
1560 	index += spd.nr_pages;
1561 	error = 0;
1562 
1563 	while (spd.nr_pages < nr_pages) {
1564 		error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1565 		if (error)
1566 			break;
1567 		unlock_page(page);
1568 		spd.pages[spd.nr_pages++] = page;
1569 		index++;
1570 	}
1571 
1572 	index = *ppos >> PAGE_CACHE_SHIFT;
1573 	nr_pages = spd.nr_pages;
1574 	spd.nr_pages = 0;
1575 
1576 	for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1577 		unsigned int this_len;
1578 
1579 		if (!len)
1580 			break;
1581 
1582 		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1583 		page = spd.pages[page_nr];
1584 
1585 		if (!PageUptodate(page) || page->mapping != mapping) {
1586 			error = shmem_getpage(inode, index, &page,
1587 							SGP_CACHE, NULL);
1588 			if (error)
1589 				break;
1590 			unlock_page(page);
1591 			page_cache_release(spd.pages[page_nr]);
1592 			spd.pages[page_nr] = page;
1593 		}
1594 
1595 		isize = i_size_read(inode);
1596 		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1597 		if (unlikely(!isize || index > end_index))
1598 			break;
1599 
1600 		if (end_index == index) {
1601 			unsigned int plen;
1602 
1603 			plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1604 			if (plen <= loff)
1605 				break;
1606 
1607 			this_len = min(this_len, plen - loff);
1608 			len = this_len;
1609 		}
1610 
1611 		spd.partial[page_nr].offset = loff;
1612 		spd.partial[page_nr].len = this_len;
1613 		len -= this_len;
1614 		loff = 0;
1615 		spd.nr_pages++;
1616 		index++;
1617 	}
1618 
1619 	while (page_nr < nr_pages)
1620 		page_cache_release(spd.pages[page_nr++]);
1621 
1622 	if (spd.nr_pages)
1623 		error = splice_to_pipe(pipe, &spd);
1624 
1625 	splice_shrink_spd(&spd);
1626 
1627 	if (error > 0) {
1628 		*ppos += error;
1629 		file_accessed(in);
1630 	}
1631 	return error;
1632 }
1633 
1634 /*
1635  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
1636  */
1637 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
1638 				    pgoff_t index, pgoff_t end, int whence)
1639 {
1640 	struct page *page;
1641 	struct pagevec pvec;
1642 	pgoff_t indices[PAGEVEC_SIZE];
1643 	bool done = false;
1644 	int i;
1645 
1646 	pagevec_init(&pvec, 0);
1647 	pvec.nr = 1;		/* start small: we may be there already */
1648 	while (!done) {
1649 		pvec.nr = find_get_entries(mapping, index,
1650 					pvec.nr, pvec.pages, indices);
1651 		if (!pvec.nr) {
1652 			if (whence == SEEK_DATA)
1653 				index = end;
1654 			break;
1655 		}
1656 		for (i = 0; i < pvec.nr; i++, index++) {
1657 			if (index < indices[i]) {
1658 				if (whence == SEEK_HOLE) {
1659 					done = true;
1660 					break;
1661 				}
1662 				index = indices[i];
1663 			}
1664 			page = pvec.pages[i];
1665 			if (page && !radix_tree_exceptional_entry(page)) {
1666 				if (!PageUptodate(page))
1667 					page = NULL;
1668 			}
1669 			if (index >= end ||
1670 			    (page && whence == SEEK_DATA) ||
1671 			    (!page && whence == SEEK_HOLE)) {
1672 				done = true;
1673 				break;
1674 			}
1675 		}
1676 		pagevec_remove_exceptionals(&pvec);
1677 		pagevec_release(&pvec);
1678 		pvec.nr = PAGEVEC_SIZE;
1679 		cond_resched();
1680 	}
1681 	return index;
1682 }
1683 
1684 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1685 {
1686 	struct address_space *mapping = file->f_mapping;
1687 	struct inode *inode = mapping->host;
1688 	pgoff_t start, end;
1689 	loff_t new_offset;
1690 
1691 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
1692 		return generic_file_llseek_size(file, offset, whence,
1693 					MAX_LFS_FILESIZE, i_size_read(inode));
1694 	mutex_lock(&inode->i_mutex);
1695 	/* We're holding i_mutex so we can access i_size directly */
1696 
1697 	if (offset < 0)
1698 		offset = -EINVAL;
1699 	else if (offset >= inode->i_size)
1700 		offset = -ENXIO;
1701 	else {
1702 		start = offset >> PAGE_CACHE_SHIFT;
1703 		end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1704 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
1705 		new_offset <<= PAGE_CACHE_SHIFT;
1706 		if (new_offset > offset) {
1707 			if (new_offset < inode->i_size)
1708 				offset = new_offset;
1709 			else if (whence == SEEK_DATA)
1710 				offset = -ENXIO;
1711 			else
1712 				offset = inode->i_size;
1713 		}
1714 	}
1715 
1716 	if (offset >= 0)
1717 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
1718 	mutex_unlock(&inode->i_mutex);
1719 	return offset;
1720 }
1721 
1722 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1723 							 loff_t len)
1724 {
1725 	struct inode *inode = file_inode(file);
1726 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1727 	struct shmem_falloc shmem_falloc;
1728 	pgoff_t start, index, end;
1729 	int error;
1730 
1731 	mutex_lock(&inode->i_mutex);
1732 
1733 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1734 		struct address_space *mapping = file->f_mapping;
1735 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
1736 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1737 
1738 		if ((u64)unmap_end > (u64)unmap_start)
1739 			unmap_mapping_range(mapping, unmap_start,
1740 					    1 + unmap_end - unmap_start, 0);
1741 		shmem_truncate_range(inode, offset, offset + len - 1);
1742 		/* No need to unmap again: hole-punching leaves COWed pages */
1743 		error = 0;
1744 		goto out;
1745 	}
1746 
1747 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
1748 	error = inode_newsize_ok(inode, offset + len);
1749 	if (error)
1750 		goto out;
1751 
1752 	start = offset >> PAGE_CACHE_SHIFT;
1753 	end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1754 	/* Try to avoid a swapstorm if len is impossible to satisfy */
1755 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
1756 		error = -ENOSPC;
1757 		goto out;
1758 	}
1759 
1760 	shmem_falloc.start = start;
1761 	shmem_falloc.next  = start;
1762 	shmem_falloc.nr_falloced = 0;
1763 	shmem_falloc.nr_unswapped = 0;
1764 	spin_lock(&inode->i_lock);
1765 	inode->i_private = &shmem_falloc;
1766 	spin_unlock(&inode->i_lock);
1767 
1768 	for (index = start; index < end; index++) {
1769 		struct page *page;
1770 
1771 		/*
1772 		 * Good, the fallocate(2) manpage permits EINTR: we may have
1773 		 * been interrupted because we are using up too much memory.
1774 		 */
1775 		if (signal_pending(current))
1776 			error = -EINTR;
1777 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
1778 			error = -ENOMEM;
1779 		else
1780 			error = shmem_getpage(inode, index, &page, SGP_FALLOC,
1781 									NULL);
1782 		if (error) {
1783 			/* Remove the !PageUptodate pages we added */
1784 			shmem_undo_range(inode,
1785 				(loff_t)start << PAGE_CACHE_SHIFT,
1786 				(loff_t)index << PAGE_CACHE_SHIFT, true);
1787 			goto undone;
1788 		}
1789 
1790 		/*
1791 		 * Inform shmem_writepage() how far we have reached.
1792 		 * No need for lock or barrier: we have the page lock.
1793 		 */
1794 		shmem_falloc.next++;
1795 		if (!PageUptodate(page))
1796 			shmem_falloc.nr_falloced++;
1797 
1798 		/*
1799 		 * If !PageUptodate, leave it that way so that freeable pages
1800 		 * can be recognized if we need to rollback on error later.
1801 		 * But set_page_dirty so that memory pressure will swap rather
1802 		 * than free the pages we are allocating (and SGP_CACHE pages
1803 		 * might still be clean: we now need to mark those dirty too).
1804 		 */
1805 		set_page_dirty(page);
1806 		unlock_page(page);
1807 		page_cache_release(page);
1808 		cond_resched();
1809 	}
1810 
1811 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
1812 		i_size_write(inode, offset + len);
1813 	inode->i_ctime = CURRENT_TIME;
1814 undone:
1815 	spin_lock(&inode->i_lock);
1816 	inode->i_private = NULL;
1817 	spin_unlock(&inode->i_lock);
1818 out:
1819 	mutex_unlock(&inode->i_mutex);
1820 	return error;
1821 }
1822 
1823 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1824 {
1825 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1826 
1827 	buf->f_type = TMPFS_MAGIC;
1828 	buf->f_bsize = PAGE_CACHE_SIZE;
1829 	buf->f_namelen = NAME_MAX;
1830 	if (sbinfo->max_blocks) {
1831 		buf->f_blocks = sbinfo->max_blocks;
1832 		buf->f_bavail =
1833 		buf->f_bfree  = sbinfo->max_blocks -
1834 				percpu_counter_sum(&sbinfo->used_blocks);
1835 	}
1836 	if (sbinfo->max_inodes) {
1837 		buf->f_files = sbinfo->max_inodes;
1838 		buf->f_ffree = sbinfo->free_inodes;
1839 	}
1840 	/* else leave those fields 0 like simple_statfs */
1841 	return 0;
1842 }
1843 
1844 /*
1845  * File creation. Allocate an inode, and we're done..
1846  */
1847 static int
1848 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1849 {
1850 	struct inode *inode;
1851 	int error = -ENOSPC;
1852 
1853 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1854 	if (inode) {
1855 		error = simple_acl_create(dir, inode);
1856 		if (error)
1857 			goto out_iput;
1858 		error = security_inode_init_security(inode, dir,
1859 						     &dentry->d_name,
1860 						     shmem_initxattrs, NULL);
1861 		if (error && error != -EOPNOTSUPP)
1862 			goto out_iput;
1863 
1864 		error = 0;
1865 		dir->i_size += BOGO_DIRENT_SIZE;
1866 		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1867 		d_instantiate(dentry, inode);
1868 		dget(dentry); /* Extra count - pin the dentry in core */
1869 	}
1870 	return error;
1871 out_iput:
1872 	iput(inode);
1873 	return error;
1874 }
1875 
1876 static int
1877 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
1878 {
1879 	struct inode *inode;
1880 	int error = -ENOSPC;
1881 
1882 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
1883 	if (inode) {
1884 		error = security_inode_init_security(inode, dir,
1885 						     NULL,
1886 						     shmem_initxattrs, NULL);
1887 		if (error && error != -EOPNOTSUPP)
1888 			goto out_iput;
1889 		error = simple_acl_create(dir, inode);
1890 		if (error)
1891 			goto out_iput;
1892 		d_tmpfile(dentry, inode);
1893 	}
1894 	return error;
1895 out_iput:
1896 	iput(inode);
1897 	return error;
1898 }
1899 
1900 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1901 {
1902 	int error;
1903 
1904 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1905 		return error;
1906 	inc_nlink(dir);
1907 	return 0;
1908 }
1909 
1910 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1911 		bool excl)
1912 {
1913 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1914 }
1915 
1916 /*
1917  * Link a file..
1918  */
1919 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1920 {
1921 	struct inode *inode = old_dentry->d_inode;
1922 	int ret;
1923 
1924 	/*
1925 	 * No ordinary (disk based) filesystem counts links as inodes;
1926 	 * but each new link needs a new dentry, pinning lowmem, and
1927 	 * tmpfs dentries cannot be pruned until they are unlinked.
1928 	 */
1929 	ret = shmem_reserve_inode(inode->i_sb);
1930 	if (ret)
1931 		goto out;
1932 
1933 	dir->i_size += BOGO_DIRENT_SIZE;
1934 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1935 	inc_nlink(inode);
1936 	ihold(inode);	/* New dentry reference */
1937 	dget(dentry);		/* Extra pinning count for the created dentry */
1938 	d_instantiate(dentry, inode);
1939 out:
1940 	return ret;
1941 }
1942 
1943 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1944 {
1945 	struct inode *inode = dentry->d_inode;
1946 
1947 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1948 		shmem_free_inode(inode->i_sb);
1949 
1950 	dir->i_size -= BOGO_DIRENT_SIZE;
1951 	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1952 	drop_nlink(inode);
1953 	dput(dentry);	/* Undo the count from "create" - this does all the work */
1954 	return 0;
1955 }
1956 
1957 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1958 {
1959 	if (!simple_empty(dentry))
1960 		return -ENOTEMPTY;
1961 
1962 	drop_nlink(dentry->d_inode);
1963 	drop_nlink(dir);
1964 	return shmem_unlink(dir, dentry);
1965 }
1966 
1967 /*
1968  * The VFS layer already does all the dentry stuff for rename,
1969  * we just have to decrement the usage count for the target if
1970  * it exists so that the VFS layer correctly free's it when it
1971  * gets overwritten.
1972  */
1973 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1974 {
1975 	struct inode *inode = old_dentry->d_inode;
1976 	int they_are_dirs = S_ISDIR(inode->i_mode);
1977 
1978 	if (!simple_empty(new_dentry))
1979 		return -ENOTEMPTY;
1980 
1981 	if (new_dentry->d_inode) {
1982 		(void) shmem_unlink(new_dir, new_dentry);
1983 		if (they_are_dirs)
1984 			drop_nlink(old_dir);
1985 	} else if (they_are_dirs) {
1986 		drop_nlink(old_dir);
1987 		inc_nlink(new_dir);
1988 	}
1989 
1990 	old_dir->i_size -= BOGO_DIRENT_SIZE;
1991 	new_dir->i_size += BOGO_DIRENT_SIZE;
1992 	old_dir->i_ctime = old_dir->i_mtime =
1993 	new_dir->i_ctime = new_dir->i_mtime =
1994 	inode->i_ctime = CURRENT_TIME;
1995 	return 0;
1996 }
1997 
1998 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1999 {
2000 	int error;
2001 	int len;
2002 	struct inode *inode;
2003 	struct page *page;
2004 	char *kaddr;
2005 	struct shmem_inode_info *info;
2006 
2007 	len = strlen(symname) + 1;
2008 	if (len > PAGE_CACHE_SIZE)
2009 		return -ENAMETOOLONG;
2010 
2011 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
2012 	if (!inode)
2013 		return -ENOSPC;
2014 
2015 	error = security_inode_init_security(inode, dir, &dentry->d_name,
2016 					     shmem_initxattrs, NULL);
2017 	if (error) {
2018 		if (error != -EOPNOTSUPP) {
2019 			iput(inode);
2020 			return error;
2021 		}
2022 		error = 0;
2023 	}
2024 
2025 	info = SHMEM_I(inode);
2026 	inode->i_size = len-1;
2027 	if (len <= SHORT_SYMLINK_LEN) {
2028 		info->symlink = kmemdup(symname, len, GFP_KERNEL);
2029 		if (!info->symlink) {
2030 			iput(inode);
2031 			return -ENOMEM;
2032 		}
2033 		inode->i_op = &shmem_short_symlink_operations;
2034 	} else {
2035 		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2036 		if (error) {
2037 			iput(inode);
2038 			return error;
2039 		}
2040 		inode->i_mapping->a_ops = &shmem_aops;
2041 		inode->i_op = &shmem_symlink_inode_operations;
2042 		kaddr = kmap_atomic(page);
2043 		memcpy(kaddr, symname, len);
2044 		kunmap_atomic(kaddr);
2045 		SetPageUptodate(page);
2046 		set_page_dirty(page);
2047 		unlock_page(page);
2048 		page_cache_release(page);
2049 	}
2050 	dir->i_size += BOGO_DIRENT_SIZE;
2051 	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2052 	d_instantiate(dentry, inode);
2053 	dget(dentry);
2054 	return 0;
2055 }
2056 
2057 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
2058 {
2059 	nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
2060 	return NULL;
2061 }
2062 
2063 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
2064 {
2065 	struct page *page = NULL;
2066 	int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2067 	nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
2068 	if (page)
2069 		unlock_page(page);
2070 	return page;
2071 }
2072 
2073 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
2074 {
2075 	if (!IS_ERR(nd_get_link(nd))) {
2076 		struct page *page = cookie;
2077 		kunmap(page);
2078 		mark_page_accessed(page);
2079 		page_cache_release(page);
2080 	}
2081 }
2082 
2083 #ifdef CONFIG_TMPFS_XATTR
2084 /*
2085  * Superblocks without xattr inode operations may get some security.* xattr
2086  * support from the LSM "for free". As soon as we have any other xattrs
2087  * like ACLs, we also need to implement the security.* handlers at
2088  * filesystem level, though.
2089  */
2090 
2091 /*
2092  * Callback for security_inode_init_security() for acquiring xattrs.
2093  */
2094 static int shmem_initxattrs(struct inode *inode,
2095 			    const struct xattr *xattr_array,
2096 			    void *fs_info)
2097 {
2098 	struct shmem_inode_info *info = SHMEM_I(inode);
2099 	const struct xattr *xattr;
2100 	struct simple_xattr *new_xattr;
2101 	size_t len;
2102 
2103 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
2104 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
2105 		if (!new_xattr)
2106 			return -ENOMEM;
2107 
2108 		len = strlen(xattr->name) + 1;
2109 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
2110 					  GFP_KERNEL);
2111 		if (!new_xattr->name) {
2112 			kfree(new_xattr);
2113 			return -ENOMEM;
2114 		}
2115 
2116 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
2117 		       XATTR_SECURITY_PREFIX_LEN);
2118 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
2119 		       xattr->name, len);
2120 
2121 		simple_xattr_list_add(&info->xattrs, new_xattr);
2122 	}
2123 
2124 	return 0;
2125 }
2126 
2127 static const struct xattr_handler *shmem_xattr_handlers[] = {
2128 #ifdef CONFIG_TMPFS_POSIX_ACL
2129 	&posix_acl_access_xattr_handler,
2130 	&posix_acl_default_xattr_handler,
2131 #endif
2132 	NULL
2133 };
2134 
2135 static int shmem_xattr_validate(const char *name)
2136 {
2137 	struct { const char *prefix; size_t len; } arr[] = {
2138 		{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2139 		{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2140 	};
2141 	int i;
2142 
2143 	for (i = 0; i < ARRAY_SIZE(arr); i++) {
2144 		size_t preflen = arr[i].len;
2145 		if (strncmp(name, arr[i].prefix, preflen) == 0) {
2146 			if (!name[preflen])
2147 				return -EINVAL;
2148 			return 0;
2149 		}
2150 	}
2151 	return -EOPNOTSUPP;
2152 }
2153 
2154 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2155 			      void *buffer, size_t size)
2156 {
2157 	struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2158 	int err;
2159 
2160 	/*
2161 	 * If this is a request for a synthetic attribute in the system.*
2162 	 * namespace use the generic infrastructure to resolve a handler
2163 	 * for it via sb->s_xattr.
2164 	 */
2165 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2166 		return generic_getxattr(dentry, name, buffer, size);
2167 
2168 	err = shmem_xattr_validate(name);
2169 	if (err)
2170 		return err;
2171 
2172 	return simple_xattr_get(&info->xattrs, name, buffer, size);
2173 }
2174 
2175 static int shmem_setxattr(struct dentry *dentry, const char *name,
2176 			  const void *value, size_t size, int flags)
2177 {
2178 	struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2179 	int err;
2180 
2181 	/*
2182 	 * If this is a request for a synthetic attribute in the system.*
2183 	 * namespace use the generic infrastructure to resolve a handler
2184 	 * for it via sb->s_xattr.
2185 	 */
2186 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2187 		return generic_setxattr(dentry, name, value, size, flags);
2188 
2189 	err = shmem_xattr_validate(name);
2190 	if (err)
2191 		return err;
2192 
2193 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
2194 }
2195 
2196 static int shmem_removexattr(struct dentry *dentry, const char *name)
2197 {
2198 	struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2199 	int err;
2200 
2201 	/*
2202 	 * If this is a request for a synthetic attribute in the system.*
2203 	 * namespace use the generic infrastructure to resolve a handler
2204 	 * for it via sb->s_xattr.
2205 	 */
2206 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2207 		return generic_removexattr(dentry, name);
2208 
2209 	err = shmem_xattr_validate(name);
2210 	if (err)
2211 		return err;
2212 
2213 	return simple_xattr_remove(&info->xattrs, name);
2214 }
2215 
2216 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2217 {
2218 	struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2219 	return simple_xattr_list(&info->xattrs, buffer, size);
2220 }
2221 #endif /* CONFIG_TMPFS_XATTR */
2222 
2223 static const struct inode_operations shmem_short_symlink_operations = {
2224 	.readlink	= generic_readlink,
2225 	.follow_link	= shmem_follow_short_symlink,
2226 #ifdef CONFIG_TMPFS_XATTR
2227 	.setxattr	= shmem_setxattr,
2228 	.getxattr	= shmem_getxattr,
2229 	.listxattr	= shmem_listxattr,
2230 	.removexattr	= shmem_removexattr,
2231 #endif
2232 };
2233 
2234 static const struct inode_operations shmem_symlink_inode_operations = {
2235 	.readlink	= generic_readlink,
2236 	.follow_link	= shmem_follow_link,
2237 	.put_link	= shmem_put_link,
2238 #ifdef CONFIG_TMPFS_XATTR
2239 	.setxattr	= shmem_setxattr,
2240 	.getxattr	= shmem_getxattr,
2241 	.listxattr	= shmem_listxattr,
2242 	.removexattr	= shmem_removexattr,
2243 #endif
2244 };
2245 
2246 static struct dentry *shmem_get_parent(struct dentry *child)
2247 {
2248 	return ERR_PTR(-ESTALE);
2249 }
2250 
2251 static int shmem_match(struct inode *ino, void *vfh)
2252 {
2253 	__u32 *fh = vfh;
2254 	__u64 inum = fh[2];
2255 	inum = (inum << 32) | fh[1];
2256 	return ino->i_ino == inum && fh[0] == ino->i_generation;
2257 }
2258 
2259 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2260 		struct fid *fid, int fh_len, int fh_type)
2261 {
2262 	struct inode *inode;
2263 	struct dentry *dentry = NULL;
2264 	u64 inum;
2265 
2266 	if (fh_len < 3)
2267 		return NULL;
2268 
2269 	inum = fid->raw[2];
2270 	inum = (inum << 32) | fid->raw[1];
2271 
2272 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2273 			shmem_match, fid->raw);
2274 	if (inode) {
2275 		dentry = d_find_alias(inode);
2276 		iput(inode);
2277 	}
2278 
2279 	return dentry;
2280 }
2281 
2282 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
2283 				struct inode *parent)
2284 {
2285 	if (*len < 3) {
2286 		*len = 3;
2287 		return FILEID_INVALID;
2288 	}
2289 
2290 	if (inode_unhashed(inode)) {
2291 		/* Unfortunately insert_inode_hash is not idempotent,
2292 		 * so as we hash inodes here rather than at creation
2293 		 * time, we need a lock to ensure we only try
2294 		 * to do it once
2295 		 */
2296 		static DEFINE_SPINLOCK(lock);
2297 		spin_lock(&lock);
2298 		if (inode_unhashed(inode))
2299 			__insert_inode_hash(inode,
2300 					    inode->i_ino + inode->i_generation);
2301 		spin_unlock(&lock);
2302 	}
2303 
2304 	fh[0] = inode->i_generation;
2305 	fh[1] = inode->i_ino;
2306 	fh[2] = ((__u64)inode->i_ino) >> 32;
2307 
2308 	*len = 3;
2309 	return 1;
2310 }
2311 
2312 static const struct export_operations shmem_export_ops = {
2313 	.get_parent     = shmem_get_parent,
2314 	.encode_fh      = shmem_encode_fh,
2315 	.fh_to_dentry	= shmem_fh_to_dentry,
2316 };
2317 
2318 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2319 			       bool remount)
2320 {
2321 	char *this_char, *value, *rest;
2322 	struct mempolicy *mpol = NULL;
2323 	uid_t uid;
2324 	gid_t gid;
2325 
2326 	while (options != NULL) {
2327 		this_char = options;
2328 		for (;;) {
2329 			/*
2330 			 * NUL-terminate this option: unfortunately,
2331 			 * mount options form a comma-separated list,
2332 			 * but mpol's nodelist may also contain commas.
2333 			 */
2334 			options = strchr(options, ',');
2335 			if (options == NULL)
2336 				break;
2337 			options++;
2338 			if (!isdigit(*options)) {
2339 				options[-1] = '\0';
2340 				break;
2341 			}
2342 		}
2343 		if (!*this_char)
2344 			continue;
2345 		if ((value = strchr(this_char,'=')) != NULL) {
2346 			*value++ = 0;
2347 		} else {
2348 			printk(KERN_ERR
2349 			    "tmpfs: No value for mount option '%s'\n",
2350 			    this_char);
2351 			goto error;
2352 		}
2353 
2354 		if (!strcmp(this_char,"size")) {
2355 			unsigned long long size;
2356 			size = memparse(value,&rest);
2357 			if (*rest == '%') {
2358 				size <<= PAGE_SHIFT;
2359 				size *= totalram_pages;
2360 				do_div(size, 100);
2361 				rest++;
2362 			}
2363 			if (*rest)
2364 				goto bad_val;
2365 			sbinfo->max_blocks =
2366 				DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2367 		} else if (!strcmp(this_char,"nr_blocks")) {
2368 			sbinfo->max_blocks = memparse(value, &rest);
2369 			if (*rest)
2370 				goto bad_val;
2371 		} else if (!strcmp(this_char,"nr_inodes")) {
2372 			sbinfo->max_inodes = memparse(value, &rest);
2373 			if (*rest)
2374 				goto bad_val;
2375 		} else if (!strcmp(this_char,"mode")) {
2376 			if (remount)
2377 				continue;
2378 			sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2379 			if (*rest)
2380 				goto bad_val;
2381 		} else if (!strcmp(this_char,"uid")) {
2382 			if (remount)
2383 				continue;
2384 			uid = simple_strtoul(value, &rest, 0);
2385 			if (*rest)
2386 				goto bad_val;
2387 			sbinfo->uid = make_kuid(current_user_ns(), uid);
2388 			if (!uid_valid(sbinfo->uid))
2389 				goto bad_val;
2390 		} else if (!strcmp(this_char,"gid")) {
2391 			if (remount)
2392 				continue;
2393 			gid = simple_strtoul(value, &rest, 0);
2394 			if (*rest)
2395 				goto bad_val;
2396 			sbinfo->gid = make_kgid(current_user_ns(), gid);
2397 			if (!gid_valid(sbinfo->gid))
2398 				goto bad_val;
2399 		} else if (!strcmp(this_char,"mpol")) {
2400 			mpol_put(mpol);
2401 			mpol = NULL;
2402 			if (mpol_parse_str(value, &mpol))
2403 				goto bad_val;
2404 		} else {
2405 			printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2406 			       this_char);
2407 			goto error;
2408 		}
2409 	}
2410 	sbinfo->mpol = mpol;
2411 	return 0;
2412 
2413 bad_val:
2414 	printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2415 	       value, this_char);
2416 error:
2417 	mpol_put(mpol);
2418 	return 1;
2419 
2420 }
2421 
2422 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2423 {
2424 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2425 	struct shmem_sb_info config = *sbinfo;
2426 	unsigned long inodes;
2427 	int error = -EINVAL;
2428 
2429 	config.mpol = NULL;
2430 	if (shmem_parse_options(data, &config, true))
2431 		return error;
2432 
2433 	spin_lock(&sbinfo->stat_lock);
2434 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2435 	if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2436 		goto out;
2437 	if (config.max_inodes < inodes)
2438 		goto out;
2439 	/*
2440 	 * Those tests disallow limited->unlimited while any are in use;
2441 	 * but we must separately disallow unlimited->limited, because
2442 	 * in that case we have no record of how much is already in use.
2443 	 */
2444 	if (config.max_blocks && !sbinfo->max_blocks)
2445 		goto out;
2446 	if (config.max_inodes && !sbinfo->max_inodes)
2447 		goto out;
2448 
2449 	error = 0;
2450 	sbinfo->max_blocks  = config.max_blocks;
2451 	sbinfo->max_inodes  = config.max_inodes;
2452 	sbinfo->free_inodes = config.max_inodes - inodes;
2453 
2454 	/*
2455 	 * Preserve previous mempolicy unless mpol remount option was specified.
2456 	 */
2457 	if (config.mpol) {
2458 		mpol_put(sbinfo->mpol);
2459 		sbinfo->mpol = config.mpol;	/* transfers initial ref */
2460 	}
2461 out:
2462 	spin_unlock(&sbinfo->stat_lock);
2463 	return error;
2464 }
2465 
2466 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2467 {
2468 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
2469 
2470 	if (sbinfo->max_blocks != shmem_default_max_blocks())
2471 		seq_printf(seq, ",size=%luk",
2472 			sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2473 	if (sbinfo->max_inodes != shmem_default_max_inodes())
2474 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2475 	if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2476 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
2477 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
2478 		seq_printf(seq, ",uid=%u",
2479 				from_kuid_munged(&init_user_ns, sbinfo->uid));
2480 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
2481 		seq_printf(seq, ",gid=%u",
2482 				from_kgid_munged(&init_user_ns, sbinfo->gid));
2483 	shmem_show_mpol(seq, sbinfo->mpol);
2484 	return 0;
2485 }
2486 #endif /* CONFIG_TMPFS */
2487 
2488 static void shmem_put_super(struct super_block *sb)
2489 {
2490 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2491 
2492 	percpu_counter_destroy(&sbinfo->used_blocks);
2493 	mpol_put(sbinfo->mpol);
2494 	kfree(sbinfo);
2495 	sb->s_fs_info = NULL;
2496 }
2497 
2498 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2499 {
2500 	struct inode *inode;
2501 	struct shmem_sb_info *sbinfo;
2502 	int err = -ENOMEM;
2503 
2504 	/* Round up to L1_CACHE_BYTES to resist false sharing */
2505 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2506 				L1_CACHE_BYTES), GFP_KERNEL);
2507 	if (!sbinfo)
2508 		return -ENOMEM;
2509 
2510 	sbinfo->mode = S_IRWXUGO | S_ISVTX;
2511 	sbinfo->uid = current_fsuid();
2512 	sbinfo->gid = current_fsgid();
2513 	sb->s_fs_info = sbinfo;
2514 
2515 #ifdef CONFIG_TMPFS
2516 	/*
2517 	 * Per default we only allow half of the physical ram per
2518 	 * tmpfs instance, limiting inodes to one per page of lowmem;
2519 	 * but the internal instance is left unlimited.
2520 	 */
2521 	if (!(sb->s_flags & MS_KERNMOUNT)) {
2522 		sbinfo->max_blocks = shmem_default_max_blocks();
2523 		sbinfo->max_inodes = shmem_default_max_inodes();
2524 		if (shmem_parse_options(data, sbinfo, false)) {
2525 			err = -EINVAL;
2526 			goto failed;
2527 		}
2528 	} else {
2529 		sb->s_flags |= MS_NOUSER;
2530 	}
2531 	sb->s_export_op = &shmem_export_ops;
2532 	sb->s_flags |= MS_NOSEC;
2533 #else
2534 	sb->s_flags |= MS_NOUSER;
2535 #endif
2536 
2537 	spin_lock_init(&sbinfo->stat_lock);
2538 	if (percpu_counter_init(&sbinfo->used_blocks, 0))
2539 		goto failed;
2540 	sbinfo->free_inodes = sbinfo->max_inodes;
2541 
2542 	sb->s_maxbytes = MAX_LFS_FILESIZE;
2543 	sb->s_blocksize = PAGE_CACHE_SIZE;
2544 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2545 	sb->s_magic = TMPFS_MAGIC;
2546 	sb->s_op = &shmem_ops;
2547 	sb->s_time_gran = 1;
2548 #ifdef CONFIG_TMPFS_XATTR
2549 	sb->s_xattr = shmem_xattr_handlers;
2550 #endif
2551 #ifdef CONFIG_TMPFS_POSIX_ACL
2552 	sb->s_flags |= MS_POSIXACL;
2553 #endif
2554 
2555 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2556 	if (!inode)
2557 		goto failed;
2558 	inode->i_uid = sbinfo->uid;
2559 	inode->i_gid = sbinfo->gid;
2560 	sb->s_root = d_make_root(inode);
2561 	if (!sb->s_root)
2562 		goto failed;
2563 	return 0;
2564 
2565 failed:
2566 	shmem_put_super(sb);
2567 	return err;
2568 }
2569 
2570 static struct kmem_cache *shmem_inode_cachep;
2571 
2572 static struct inode *shmem_alloc_inode(struct super_block *sb)
2573 {
2574 	struct shmem_inode_info *info;
2575 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2576 	if (!info)
2577 		return NULL;
2578 	return &info->vfs_inode;
2579 }
2580 
2581 static void shmem_destroy_callback(struct rcu_head *head)
2582 {
2583 	struct inode *inode = container_of(head, struct inode, i_rcu);
2584 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2585 }
2586 
2587 static void shmem_destroy_inode(struct inode *inode)
2588 {
2589 	if (S_ISREG(inode->i_mode))
2590 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2591 	call_rcu(&inode->i_rcu, shmem_destroy_callback);
2592 }
2593 
2594 static void shmem_init_inode(void *foo)
2595 {
2596 	struct shmem_inode_info *info = foo;
2597 	inode_init_once(&info->vfs_inode);
2598 }
2599 
2600 static int shmem_init_inodecache(void)
2601 {
2602 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2603 				sizeof(struct shmem_inode_info),
2604 				0, SLAB_PANIC, shmem_init_inode);
2605 	return 0;
2606 }
2607 
2608 static void shmem_destroy_inodecache(void)
2609 {
2610 	kmem_cache_destroy(shmem_inode_cachep);
2611 }
2612 
2613 static const struct address_space_operations shmem_aops = {
2614 	.writepage	= shmem_writepage,
2615 	.set_page_dirty	= __set_page_dirty_no_writeback,
2616 #ifdef CONFIG_TMPFS
2617 	.write_begin	= shmem_write_begin,
2618 	.write_end	= shmem_write_end,
2619 #endif
2620 	.migratepage	= migrate_page,
2621 	.error_remove_page = generic_error_remove_page,
2622 };
2623 
2624 static const struct file_operations shmem_file_operations = {
2625 	.mmap		= shmem_mmap,
2626 #ifdef CONFIG_TMPFS
2627 	.llseek		= shmem_file_llseek,
2628 	.read		= do_sync_read,
2629 	.write		= do_sync_write,
2630 	.aio_read	= shmem_file_aio_read,
2631 	.aio_write	= generic_file_aio_write,
2632 	.fsync		= noop_fsync,
2633 	.splice_read	= shmem_file_splice_read,
2634 	.splice_write	= generic_file_splice_write,
2635 	.fallocate	= shmem_fallocate,
2636 #endif
2637 };
2638 
2639 static const struct inode_operations shmem_inode_operations = {
2640 	.setattr	= shmem_setattr,
2641 #ifdef CONFIG_TMPFS_XATTR
2642 	.setxattr	= shmem_setxattr,
2643 	.getxattr	= shmem_getxattr,
2644 	.listxattr	= shmem_listxattr,
2645 	.removexattr	= shmem_removexattr,
2646 	.set_acl	= simple_set_acl,
2647 #endif
2648 };
2649 
2650 static const struct inode_operations shmem_dir_inode_operations = {
2651 #ifdef CONFIG_TMPFS
2652 	.create		= shmem_create,
2653 	.lookup		= simple_lookup,
2654 	.link		= shmem_link,
2655 	.unlink		= shmem_unlink,
2656 	.symlink	= shmem_symlink,
2657 	.mkdir		= shmem_mkdir,
2658 	.rmdir		= shmem_rmdir,
2659 	.mknod		= shmem_mknod,
2660 	.rename		= shmem_rename,
2661 	.tmpfile	= shmem_tmpfile,
2662 #endif
2663 #ifdef CONFIG_TMPFS_XATTR
2664 	.setxattr	= shmem_setxattr,
2665 	.getxattr	= shmem_getxattr,
2666 	.listxattr	= shmem_listxattr,
2667 	.removexattr	= shmem_removexattr,
2668 #endif
2669 #ifdef CONFIG_TMPFS_POSIX_ACL
2670 	.setattr	= shmem_setattr,
2671 	.set_acl	= simple_set_acl,
2672 #endif
2673 };
2674 
2675 static const struct inode_operations shmem_special_inode_operations = {
2676 #ifdef CONFIG_TMPFS_XATTR
2677 	.setxattr	= shmem_setxattr,
2678 	.getxattr	= shmem_getxattr,
2679 	.listxattr	= shmem_listxattr,
2680 	.removexattr	= shmem_removexattr,
2681 #endif
2682 #ifdef CONFIG_TMPFS_POSIX_ACL
2683 	.setattr	= shmem_setattr,
2684 	.set_acl	= simple_set_acl,
2685 #endif
2686 };
2687 
2688 static const struct super_operations shmem_ops = {
2689 	.alloc_inode	= shmem_alloc_inode,
2690 	.destroy_inode	= shmem_destroy_inode,
2691 #ifdef CONFIG_TMPFS
2692 	.statfs		= shmem_statfs,
2693 	.remount_fs	= shmem_remount_fs,
2694 	.show_options	= shmem_show_options,
2695 #endif
2696 	.evict_inode	= shmem_evict_inode,
2697 	.drop_inode	= generic_delete_inode,
2698 	.put_super	= shmem_put_super,
2699 };
2700 
2701 static const struct vm_operations_struct shmem_vm_ops = {
2702 	.fault		= shmem_fault,
2703 	.map_pages	= filemap_map_pages,
2704 #ifdef CONFIG_NUMA
2705 	.set_policy     = shmem_set_policy,
2706 	.get_policy     = shmem_get_policy,
2707 #endif
2708 	.remap_pages	= generic_file_remap_pages,
2709 };
2710 
2711 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2712 	int flags, const char *dev_name, void *data)
2713 {
2714 	return mount_nodev(fs_type, flags, data, shmem_fill_super);
2715 }
2716 
2717 static struct file_system_type shmem_fs_type = {
2718 	.owner		= THIS_MODULE,
2719 	.name		= "tmpfs",
2720 	.mount		= shmem_mount,
2721 	.kill_sb	= kill_litter_super,
2722 	.fs_flags	= FS_USERNS_MOUNT,
2723 };
2724 
2725 int __init shmem_init(void)
2726 {
2727 	int error;
2728 
2729 	/* If rootfs called this, don't re-init */
2730 	if (shmem_inode_cachep)
2731 		return 0;
2732 
2733 	error = bdi_init(&shmem_backing_dev_info);
2734 	if (error)
2735 		goto out4;
2736 
2737 	error = shmem_init_inodecache();
2738 	if (error)
2739 		goto out3;
2740 
2741 	error = register_filesystem(&shmem_fs_type);
2742 	if (error) {
2743 		printk(KERN_ERR "Could not register tmpfs\n");
2744 		goto out2;
2745 	}
2746 
2747 	shm_mnt = kern_mount(&shmem_fs_type);
2748 	if (IS_ERR(shm_mnt)) {
2749 		error = PTR_ERR(shm_mnt);
2750 		printk(KERN_ERR "Could not kern_mount tmpfs\n");
2751 		goto out1;
2752 	}
2753 	return 0;
2754 
2755 out1:
2756 	unregister_filesystem(&shmem_fs_type);
2757 out2:
2758 	shmem_destroy_inodecache();
2759 out3:
2760 	bdi_destroy(&shmem_backing_dev_info);
2761 out4:
2762 	shm_mnt = ERR_PTR(error);
2763 	return error;
2764 }
2765 
2766 #else /* !CONFIG_SHMEM */
2767 
2768 /*
2769  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2770  *
2771  * This is intended for small system where the benefits of the full
2772  * shmem code (swap-backed and resource-limited) are outweighed by
2773  * their complexity. On systems without swap this code should be
2774  * effectively equivalent, but much lighter weight.
2775  */
2776 
2777 static struct file_system_type shmem_fs_type = {
2778 	.name		= "tmpfs",
2779 	.mount		= ramfs_mount,
2780 	.kill_sb	= kill_litter_super,
2781 	.fs_flags	= FS_USERNS_MOUNT,
2782 };
2783 
2784 int __init shmem_init(void)
2785 {
2786 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
2787 
2788 	shm_mnt = kern_mount(&shmem_fs_type);
2789 	BUG_ON(IS_ERR(shm_mnt));
2790 
2791 	return 0;
2792 }
2793 
2794 int shmem_unuse(swp_entry_t swap, struct page *page)
2795 {
2796 	return 0;
2797 }
2798 
2799 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2800 {
2801 	return 0;
2802 }
2803 
2804 void shmem_unlock_mapping(struct address_space *mapping)
2805 {
2806 }
2807 
2808 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2809 {
2810 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
2811 }
2812 EXPORT_SYMBOL_GPL(shmem_truncate_range);
2813 
2814 #define shmem_vm_ops				generic_file_vm_ops
2815 #define shmem_file_operations			ramfs_file_operations
2816 #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
2817 #define shmem_acct_size(flags, size)		0
2818 #define shmem_unacct_size(flags, size)		do {} while (0)
2819 
2820 #endif /* CONFIG_SHMEM */
2821 
2822 /* common code */
2823 
2824 static struct dentry_operations anon_ops = {
2825 	.d_dname = simple_dname
2826 };
2827 
2828 static struct file *__shmem_file_setup(const char *name, loff_t size,
2829 				       unsigned long flags, unsigned int i_flags)
2830 {
2831 	struct file *res;
2832 	struct inode *inode;
2833 	struct path path;
2834 	struct super_block *sb;
2835 	struct qstr this;
2836 
2837 	if (IS_ERR(shm_mnt))
2838 		return ERR_CAST(shm_mnt);
2839 
2840 	if (size < 0 || size > MAX_LFS_FILESIZE)
2841 		return ERR_PTR(-EINVAL);
2842 
2843 	if (shmem_acct_size(flags, size))
2844 		return ERR_PTR(-ENOMEM);
2845 
2846 	res = ERR_PTR(-ENOMEM);
2847 	this.name = name;
2848 	this.len = strlen(name);
2849 	this.hash = 0; /* will go */
2850 	sb = shm_mnt->mnt_sb;
2851 	path.dentry = d_alloc_pseudo(sb, &this);
2852 	if (!path.dentry)
2853 		goto put_memory;
2854 	d_set_d_op(path.dentry, &anon_ops);
2855 	path.mnt = mntget(shm_mnt);
2856 
2857 	res = ERR_PTR(-ENOSPC);
2858 	inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
2859 	if (!inode)
2860 		goto put_dentry;
2861 
2862 	inode->i_flags |= i_flags;
2863 	d_instantiate(path.dentry, inode);
2864 	inode->i_size = size;
2865 	clear_nlink(inode);	/* It is unlinked */
2866 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
2867 	if (IS_ERR(res))
2868 		goto put_dentry;
2869 
2870 	res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
2871 		  &shmem_file_operations);
2872 	if (IS_ERR(res))
2873 		goto put_dentry;
2874 
2875 	return res;
2876 
2877 put_dentry:
2878 	path_put(&path);
2879 put_memory:
2880 	shmem_unacct_size(flags, size);
2881 	return res;
2882 }
2883 
2884 /**
2885  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
2886  * 	kernel internal.  There will be NO LSM permission checks against the
2887  * 	underlying inode.  So users of this interface must do LSM checks at a
2888  * 	higher layer.  The one user is the big_key implementation.  LSM checks
2889  * 	are provided at the key level rather than the inode level.
2890  * @name: name for dentry (to be seen in /proc/<pid>/maps
2891  * @size: size to be set for the file
2892  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2893  */
2894 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
2895 {
2896 	return __shmem_file_setup(name, size, flags, S_PRIVATE);
2897 }
2898 
2899 /**
2900  * shmem_file_setup - get an unlinked file living in tmpfs
2901  * @name: name for dentry (to be seen in /proc/<pid>/maps
2902  * @size: size to be set for the file
2903  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2904  */
2905 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
2906 {
2907 	return __shmem_file_setup(name, size, flags, 0);
2908 }
2909 EXPORT_SYMBOL_GPL(shmem_file_setup);
2910 
2911 /**
2912  * shmem_zero_setup - setup a shared anonymous mapping
2913  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2914  */
2915 int shmem_zero_setup(struct vm_area_struct *vma)
2916 {
2917 	struct file *file;
2918 	loff_t size = vma->vm_end - vma->vm_start;
2919 
2920 	file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2921 	if (IS_ERR(file))
2922 		return PTR_ERR(file);
2923 
2924 	if (vma->vm_file)
2925 		fput(vma->vm_file);
2926 	vma->vm_file = file;
2927 	vma->vm_ops = &shmem_vm_ops;
2928 	return 0;
2929 }
2930 
2931 /**
2932  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2933  * @mapping:	the page's address_space
2934  * @index:	the page index
2935  * @gfp:	the page allocator flags to use if allocating
2936  *
2937  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2938  * with any new page allocations done using the specified allocation flags.
2939  * But read_cache_page_gfp() uses the ->readpage() method: which does not
2940  * suit tmpfs, since it may have pages in swapcache, and needs to find those
2941  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2942  *
2943  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2944  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
2945  */
2946 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2947 					 pgoff_t index, gfp_t gfp)
2948 {
2949 #ifdef CONFIG_SHMEM
2950 	struct inode *inode = mapping->host;
2951 	struct page *page;
2952 	int error;
2953 
2954 	BUG_ON(mapping->a_ops != &shmem_aops);
2955 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2956 	if (error)
2957 		page = ERR_PTR(error);
2958 	else
2959 		unlock_page(page);
2960 	return page;
2961 #else
2962 	/*
2963 	 * The tiny !SHMEM case uses ramfs without swap
2964 	 */
2965 	return read_cache_page_gfp(mapping, index, gfp);
2966 #endif
2967 }
2968 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
2969