1 /* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 28 #include <linux/pagemap.h> 29 #include <linux/file.h> 30 #include <linux/mm.h> 31 #include <linux/export.h> 32 #include <linux/swap.h> 33 34 static struct vfsmount *shm_mnt; 35 36 #ifdef CONFIG_SHMEM 37 /* 38 * This virtual memory filesystem is heavily based on the ramfs. It 39 * extends ramfs by the ability to use swap and honor resource limits 40 * which makes it a completely usable filesystem. 41 */ 42 43 #include <linux/xattr.h> 44 #include <linux/exportfs.h> 45 #include <linux/posix_acl.h> 46 #include <linux/generic_acl.h> 47 #include <linux/mman.h> 48 #include <linux/string.h> 49 #include <linux/slab.h> 50 #include <linux/backing-dev.h> 51 #include <linux/shmem_fs.h> 52 #include <linux/writeback.h> 53 #include <linux/blkdev.h> 54 #include <linux/pagevec.h> 55 #include <linux/percpu_counter.h> 56 #include <linux/falloc.h> 57 #include <linux/splice.h> 58 #include <linux/security.h> 59 #include <linux/swapops.h> 60 #include <linux/mempolicy.h> 61 #include <linux/namei.h> 62 #include <linux/ctype.h> 63 #include <linux/migrate.h> 64 #include <linux/highmem.h> 65 #include <linux/seq_file.h> 66 #include <linux/magic.h> 67 68 #include <asm/uaccess.h> 69 #include <asm/pgtable.h> 70 71 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 72 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 73 74 /* Pretend that each entry is of this size in directory's i_size */ 75 #define BOGO_DIRENT_SIZE 20 76 77 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 78 #define SHORT_SYMLINK_LEN 128 79 80 /* 81 * shmem_fallocate and shmem_writepage communicate via inode->i_private 82 * (with i_mutex making sure that it has only one user at a time): 83 * we would prefer not to enlarge the shmem inode just for that. 84 */ 85 struct shmem_falloc { 86 pgoff_t start; /* start of range currently being fallocated */ 87 pgoff_t next; /* the next page offset to be fallocated */ 88 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 89 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 90 }; 91 92 /* Flag allocation requirements to shmem_getpage */ 93 enum sgp_type { 94 SGP_READ, /* don't exceed i_size, don't allocate page */ 95 SGP_CACHE, /* don't exceed i_size, may allocate page */ 96 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 97 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 98 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 99 }; 100 101 #ifdef CONFIG_TMPFS 102 static unsigned long shmem_default_max_blocks(void) 103 { 104 return totalram_pages / 2; 105 } 106 107 static unsigned long shmem_default_max_inodes(void) 108 { 109 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 110 } 111 #endif 112 113 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 114 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 115 struct shmem_inode_info *info, pgoff_t index); 116 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 117 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 118 119 static inline int shmem_getpage(struct inode *inode, pgoff_t index, 120 struct page **pagep, enum sgp_type sgp, int *fault_type) 121 { 122 return shmem_getpage_gfp(inode, index, pagep, sgp, 123 mapping_gfp_mask(inode->i_mapping), fault_type); 124 } 125 126 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 127 { 128 return sb->s_fs_info; 129 } 130 131 /* 132 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 133 * for shared memory and for shared anonymous (/dev/zero) mappings 134 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 135 * consistent with the pre-accounting of private mappings ... 136 */ 137 static inline int shmem_acct_size(unsigned long flags, loff_t size) 138 { 139 return (flags & VM_NORESERVE) ? 140 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 141 } 142 143 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 144 { 145 if (!(flags & VM_NORESERVE)) 146 vm_unacct_memory(VM_ACCT(size)); 147 } 148 149 /* 150 * ... whereas tmpfs objects are accounted incrementally as 151 * pages are allocated, in order to allow huge sparse files. 152 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 153 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 154 */ 155 static inline int shmem_acct_block(unsigned long flags) 156 { 157 return (flags & VM_NORESERVE) ? 158 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 159 } 160 161 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 162 { 163 if (flags & VM_NORESERVE) 164 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 165 } 166 167 static const struct super_operations shmem_ops; 168 static const struct address_space_operations shmem_aops; 169 static const struct file_operations shmem_file_operations; 170 static const struct inode_operations shmem_inode_operations; 171 static const struct inode_operations shmem_dir_inode_operations; 172 static const struct inode_operations shmem_special_inode_operations; 173 static const struct vm_operations_struct shmem_vm_ops; 174 175 static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 176 .ra_pages = 0, /* No readahead */ 177 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 178 }; 179 180 static LIST_HEAD(shmem_swaplist); 181 static DEFINE_MUTEX(shmem_swaplist_mutex); 182 183 static int shmem_reserve_inode(struct super_block *sb) 184 { 185 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 186 if (sbinfo->max_inodes) { 187 spin_lock(&sbinfo->stat_lock); 188 if (!sbinfo->free_inodes) { 189 spin_unlock(&sbinfo->stat_lock); 190 return -ENOSPC; 191 } 192 sbinfo->free_inodes--; 193 spin_unlock(&sbinfo->stat_lock); 194 } 195 return 0; 196 } 197 198 static void shmem_free_inode(struct super_block *sb) 199 { 200 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 201 if (sbinfo->max_inodes) { 202 spin_lock(&sbinfo->stat_lock); 203 sbinfo->free_inodes++; 204 spin_unlock(&sbinfo->stat_lock); 205 } 206 } 207 208 /** 209 * shmem_recalc_inode - recalculate the block usage of an inode 210 * @inode: inode to recalc 211 * 212 * We have to calculate the free blocks since the mm can drop 213 * undirtied hole pages behind our back. 214 * 215 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 216 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 217 * 218 * It has to be called with the spinlock held. 219 */ 220 static void shmem_recalc_inode(struct inode *inode) 221 { 222 struct shmem_inode_info *info = SHMEM_I(inode); 223 long freed; 224 225 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 226 if (freed > 0) { 227 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 228 if (sbinfo->max_blocks) 229 percpu_counter_add(&sbinfo->used_blocks, -freed); 230 info->alloced -= freed; 231 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 232 shmem_unacct_blocks(info->flags, freed); 233 } 234 } 235 236 /* 237 * Replace item expected in radix tree by a new item, while holding tree lock. 238 */ 239 static int shmem_radix_tree_replace(struct address_space *mapping, 240 pgoff_t index, void *expected, void *replacement) 241 { 242 void **pslot; 243 void *item = NULL; 244 245 VM_BUG_ON(!expected); 246 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 247 if (pslot) 248 item = radix_tree_deref_slot_protected(pslot, 249 &mapping->tree_lock); 250 if (item != expected) 251 return -ENOENT; 252 if (replacement) 253 radix_tree_replace_slot(pslot, replacement); 254 else 255 radix_tree_delete(&mapping->page_tree, index); 256 return 0; 257 } 258 259 /* 260 * Sometimes, before we decide whether to proceed or to fail, we must check 261 * that an entry was not already brought back from swap by a racing thread. 262 * 263 * Checking page is not enough: by the time a SwapCache page is locked, it 264 * might be reused, and again be SwapCache, using the same swap as before. 265 */ 266 static bool shmem_confirm_swap(struct address_space *mapping, 267 pgoff_t index, swp_entry_t swap) 268 { 269 void *item; 270 271 rcu_read_lock(); 272 item = radix_tree_lookup(&mapping->page_tree, index); 273 rcu_read_unlock(); 274 return item == swp_to_radix_entry(swap); 275 } 276 277 /* 278 * Like add_to_page_cache_locked, but error if expected item has gone. 279 */ 280 static int shmem_add_to_page_cache(struct page *page, 281 struct address_space *mapping, 282 pgoff_t index, gfp_t gfp, void *expected) 283 { 284 int error; 285 286 VM_BUG_ON(!PageLocked(page)); 287 VM_BUG_ON(!PageSwapBacked(page)); 288 289 page_cache_get(page); 290 page->mapping = mapping; 291 page->index = index; 292 293 spin_lock_irq(&mapping->tree_lock); 294 if (!expected) 295 error = radix_tree_insert(&mapping->page_tree, index, page); 296 else 297 error = shmem_radix_tree_replace(mapping, index, expected, 298 page); 299 if (!error) { 300 mapping->nrpages++; 301 __inc_zone_page_state(page, NR_FILE_PAGES); 302 __inc_zone_page_state(page, NR_SHMEM); 303 spin_unlock_irq(&mapping->tree_lock); 304 } else { 305 page->mapping = NULL; 306 spin_unlock_irq(&mapping->tree_lock); 307 page_cache_release(page); 308 } 309 return error; 310 } 311 312 /* 313 * Like delete_from_page_cache, but substitutes swap for page. 314 */ 315 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 316 { 317 struct address_space *mapping = page->mapping; 318 int error; 319 320 spin_lock_irq(&mapping->tree_lock); 321 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 322 page->mapping = NULL; 323 mapping->nrpages--; 324 __dec_zone_page_state(page, NR_FILE_PAGES); 325 __dec_zone_page_state(page, NR_SHMEM); 326 spin_unlock_irq(&mapping->tree_lock); 327 page_cache_release(page); 328 BUG_ON(error); 329 } 330 331 /* 332 * Like find_get_pages, but collecting swap entries as well as pages. 333 */ 334 static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, 335 pgoff_t start, unsigned int nr_pages, 336 struct page **pages, pgoff_t *indices) 337 { 338 void **slot; 339 unsigned int ret = 0; 340 struct radix_tree_iter iter; 341 342 if (!nr_pages) 343 return 0; 344 345 rcu_read_lock(); 346 restart: 347 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 348 struct page *page; 349 repeat: 350 page = radix_tree_deref_slot(slot); 351 if (unlikely(!page)) 352 continue; 353 if (radix_tree_exception(page)) { 354 if (radix_tree_deref_retry(page)) 355 goto restart; 356 /* 357 * Otherwise, we must be storing a swap entry 358 * here as an exceptional entry: so return it 359 * without attempting to raise page count. 360 */ 361 goto export; 362 } 363 if (!page_cache_get_speculative(page)) 364 goto repeat; 365 366 /* Has the page moved? */ 367 if (unlikely(page != *slot)) { 368 page_cache_release(page); 369 goto repeat; 370 } 371 export: 372 indices[ret] = iter.index; 373 pages[ret] = page; 374 if (++ret == nr_pages) 375 break; 376 } 377 rcu_read_unlock(); 378 return ret; 379 } 380 381 /* 382 * Remove swap entry from radix tree, free the swap and its page cache. 383 */ 384 static int shmem_free_swap(struct address_space *mapping, 385 pgoff_t index, void *radswap) 386 { 387 int error; 388 389 spin_lock_irq(&mapping->tree_lock); 390 error = shmem_radix_tree_replace(mapping, index, radswap, NULL); 391 spin_unlock_irq(&mapping->tree_lock); 392 if (!error) 393 free_swap_and_cache(radix_to_swp_entry(radswap)); 394 return error; 395 } 396 397 /* 398 * Pagevec may contain swap entries, so shuffle up pages before releasing. 399 */ 400 static void shmem_deswap_pagevec(struct pagevec *pvec) 401 { 402 int i, j; 403 404 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 405 struct page *page = pvec->pages[i]; 406 if (!radix_tree_exceptional_entry(page)) 407 pvec->pages[j++] = page; 408 } 409 pvec->nr = j; 410 } 411 412 /* 413 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 414 */ 415 void shmem_unlock_mapping(struct address_space *mapping) 416 { 417 struct pagevec pvec; 418 pgoff_t indices[PAGEVEC_SIZE]; 419 pgoff_t index = 0; 420 421 pagevec_init(&pvec, 0); 422 /* 423 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 424 */ 425 while (!mapping_unevictable(mapping)) { 426 /* 427 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 428 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 429 */ 430 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 431 PAGEVEC_SIZE, pvec.pages, indices); 432 if (!pvec.nr) 433 break; 434 index = indices[pvec.nr - 1] + 1; 435 shmem_deswap_pagevec(&pvec); 436 check_move_unevictable_pages(pvec.pages, pvec.nr); 437 pagevec_release(&pvec); 438 cond_resched(); 439 } 440 } 441 442 /* 443 * Remove range of pages and swap entries from radix tree, and free them. 444 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 445 */ 446 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 447 bool unfalloc) 448 { 449 struct address_space *mapping = inode->i_mapping; 450 struct shmem_inode_info *info = SHMEM_I(inode); 451 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 452 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 453 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 454 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 455 struct pagevec pvec; 456 pgoff_t indices[PAGEVEC_SIZE]; 457 long nr_swaps_freed = 0; 458 pgoff_t index; 459 int i; 460 461 if (lend == -1) 462 end = -1; /* unsigned, so actually very big */ 463 464 pagevec_init(&pvec, 0); 465 index = start; 466 while (index < end) { 467 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 468 min(end - index, (pgoff_t)PAGEVEC_SIZE), 469 pvec.pages, indices); 470 if (!pvec.nr) 471 break; 472 mem_cgroup_uncharge_start(); 473 for (i = 0; i < pagevec_count(&pvec); i++) { 474 struct page *page = pvec.pages[i]; 475 476 index = indices[i]; 477 if (index >= end) 478 break; 479 480 if (radix_tree_exceptional_entry(page)) { 481 if (unfalloc) 482 continue; 483 nr_swaps_freed += !shmem_free_swap(mapping, 484 index, page); 485 continue; 486 } 487 488 if (!trylock_page(page)) 489 continue; 490 if (!unfalloc || !PageUptodate(page)) { 491 if (page->mapping == mapping) { 492 VM_BUG_ON(PageWriteback(page)); 493 truncate_inode_page(mapping, page); 494 } 495 } 496 unlock_page(page); 497 } 498 shmem_deswap_pagevec(&pvec); 499 pagevec_release(&pvec); 500 mem_cgroup_uncharge_end(); 501 cond_resched(); 502 index++; 503 } 504 505 if (partial_start) { 506 struct page *page = NULL; 507 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 508 if (page) { 509 unsigned int top = PAGE_CACHE_SIZE; 510 if (start > end) { 511 top = partial_end; 512 partial_end = 0; 513 } 514 zero_user_segment(page, partial_start, top); 515 set_page_dirty(page); 516 unlock_page(page); 517 page_cache_release(page); 518 } 519 } 520 if (partial_end) { 521 struct page *page = NULL; 522 shmem_getpage(inode, end, &page, SGP_READ, NULL); 523 if (page) { 524 zero_user_segment(page, 0, partial_end); 525 set_page_dirty(page); 526 unlock_page(page); 527 page_cache_release(page); 528 } 529 } 530 if (start >= end) 531 return; 532 533 index = start; 534 for ( ; ; ) { 535 cond_resched(); 536 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 537 min(end - index, (pgoff_t)PAGEVEC_SIZE), 538 pvec.pages, indices); 539 if (!pvec.nr) { 540 if (index == start || unfalloc) 541 break; 542 index = start; 543 continue; 544 } 545 if ((index == start || unfalloc) && indices[0] >= end) { 546 shmem_deswap_pagevec(&pvec); 547 pagevec_release(&pvec); 548 break; 549 } 550 mem_cgroup_uncharge_start(); 551 for (i = 0; i < pagevec_count(&pvec); i++) { 552 struct page *page = pvec.pages[i]; 553 554 index = indices[i]; 555 if (index >= end) 556 break; 557 558 if (radix_tree_exceptional_entry(page)) { 559 if (unfalloc) 560 continue; 561 nr_swaps_freed += !shmem_free_swap(mapping, 562 index, page); 563 continue; 564 } 565 566 lock_page(page); 567 if (!unfalloc || !PageUptodate(page)) { 568 if (page->mapping == mapping) { 569 VM_BUG_ON(PageWriteback(page)); 570 truncate_inode_page(mapping, page); 571 } 572 } 573 unlock_page(page); 574 } 575 shmem_deswap_pagevec(&pvec); 576 pagevec_release(&pvec); 577 mem_cgroup_uncharge_end(); 578 index++; 579 } 580 581 spin_lock(&info->lock); 582 info->swapped -= nr_swaps_freed; 583 shmem_recalc_inode(inode); 584 spin_unlock(&info->lock); 585 } 586 587 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 588 { 589 shmem_undo_range(inode, lstart, lend, false); 590 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 591 } 592 EXPORT_SYMBOL_GPL(shmem_truncate_range); 593 594 static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 595 { 596 struct inode *inode = dentry->d_inode; 597 int error; 598 599 error = inode_change_ok(inode, attr); 600 if (error) 601 return error; 602 603 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 604 loff_t oldsize = inode->i_size; 605 loff_t newsize = attr->ia_size; 606 607 if (newsize != oldsize) { 608 i_size_write(inode, newsize); 609 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 610 } 611 if (newsize < oldsize) { 612 loff_t holebegin = round_up(newsize, PAGE_SIZE); 613 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 614 shmem_truncate_range(inode, newsize, (loff_t)-1); 615 /* unmap again to remove racily COWed private pages */ 616 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 617 } 618 } 619 620 setattr_copy(inode, attr); 621 #ifdef CONFIG_TMPFS_POSIX_ACL 622 if (attr->ia_valid & ATTR_MODE) 623 error = generic_acl_chmod(inode); 624 #endif 625 return error; 626 } 627 628 static void shmem_evict_inode(struct inode *inode) 629 { 630 struct shmem_inode_info *info = SHMEM_I(inode); 631 632 if (inode->i_mapping->a_ops == &shmem_aops) { 633 shmem_unacct_size(info->flags, inode->i_size); 634 inode->i_size = 0; 635 shmem_truncate_range(inode, 0, (loff_t)-1); 636 if (!list_empty(&info->swaplist)) { 637 mutex_lock(&shmem_swaplist_mutex); 638 list_del_init(&info->swaplist); 639 mutex_unlock(&shmem_swaplist_mutex); 640 } 641 } else 642 kfree(info->symlink); 643 644 simple_xattrs_free(&info->xattrs); 645 WARN_ON(inode->i_blocks); 646 shmem_free_inode(inode->i_sb); 647 clear_inode(inode); 648 } 649 650 /* 651 * If swap found in inode, free it and move page from swapcache to filecache. 652 */ 653 static int shmem_unuse_inode(struct shmem_inode_info *info, 654 swp_entry_t swap, struct page **pagep) 655 { 656 struct address_space *mapping = info->vfs_inode.i_mapping; 657 void *radswap; 658 pgoff_t index; 659 gfp_t gfp; 660 int error = 0; 661 662 radswap = swp_to_radix_entry(swap); 663 index = radix_tree_locate_item(&mapping->page_tree, radswap); 664 if (index == -1) 665 return 0; 666 667 /* 668 * Move _head_ to start search for next from here. 669 * But be careful: shmem_evict_inode checks list_empty without taking 670 * mutex, and there's an instant in list_move_tail when info->swaplist 671 * would appear empty, if it were the only one on shmem_swaplist. 672 */ 673 if (shmem_swaplist.next != &info->swaplist) 674 list_move_tail(&shmem_swaplist, &info->swaplist); 675 676 gfp = mapping_gfp_mask(mapping); 677 if (shmem_should_replace_page(*pagep, gfp)) { 678 mutex_unlock(&shmem_swaplist_mutex); 679 error = shmem_replace_page(pagep, gfp, info, index); 680 mutex_lock(&shmem_swaplist_mutex); 681 /* 682 * We needed to drop mutex to make that restrictive page 683 * allocation, but the inode might have been freed while we 684 * dropped it: although a racing shmem_evict_inode() cannot 685 * complete without emptying the radix_tree, our page lock 686 * on this swapcache page is not enough to prevent that - 687 * free_swap_and_cache() of our swap entry will only 688 * trylock_page(), removing swap from radix_tree whatever. 689 * 690 * We must not proceed to shmem_add_to_page_cache() if the 691 * inode has been freed, but of course we cannot rely on 692 * inode or mapping or info to check that. However, we can 693 * safely check if our swap entry is still in use (and here 694 * it can't have got reused for another page): if it's still 695 * in use, then the inode cannot have been freed yet, and we 696 * can safely proceed (if it's no longer in use, that tells 697 * nothing about the inode, but we don't need to unuse swap). 698 */ 699 if (!page_swapcount(*pagep)) 700 error = -ENOENT; 701 } 702 703 /* 704 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 705 * but also to hold up shmem_evict_inode(): so inode cannot be freed 706 * beneath us (pagelock doesn't help until the page is in pagecache). 707 */ 708 if (!error) 709 error = shmem_add_to_page_cache(*pagep, mapping, index, 710 GFP_NOWAIT, radswap); 711 if (error != -ENOMEM) { 712 /* 713 * Truncation and eviction use free_swap_and_cache(), which 714 * only does trylock page: if we raced, best clean up here. 715 */ 716 delete_from_swap_cache(*pagep); 717 set_page_dirty(*pagep); 718 if (!error) { 719 spin_lock(&info->lock); 720 info->swapped--; 721 spin_unlock(&info->lock); 722 swap_free(swap); 723 } 724 error = 1; /* not an error, but entry was found */ 725 } 726 return error; 727 } 728 729 /* 730 * Search through swapped inodes to find and replace swap by page. 731 */ 732 int shmem_unuse(swp_entry_t swap, struct page *page) 733 { 734 struct list_head *this, *next; 735 struct shmem_inode_info *info; 736 int found = 0; 737 int error = 0; 738 739 /* 740 * There's a faint possibility that swap page was replaced before 741 * caller locked it: caller will come back later with the right page. 742 */ 743 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 744 goto out; 745 746 /* 747 * Charge page using GFP_KERNEL while we can wait, before taking 748 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 749 * Charged back to the user (not to caller) when swap account is used. 750 */ 751 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 752 if (error) 753 goto out; 754 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 755 756 mutex_lock(&shmem_swaplist_mutex); 757 list_for_each_safe(this, next, &shmem_swaplist) { 758 info = list_entry(this, struct shmem_inode_info, swaplist); 759 if (info->swapped) 760 found = shmem_unuse_inode(info, swap, &page); 761 else 762 list_del_init(&info->swaplist); 763 cond_resched(); 764 if (found) 765 break; 766 } 767 mutex_unlock(&shmem_swaplist_mutex); 768 769 if (found < 0) 770 error = found; 771 out: 772 unlock_page(page); 773 page_cache_release(page); 774 return error; 775 } 776 777 /* 778 * Move the page from the page cache to the swap cache. 779 */ 780 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 781 { 782 struct shmem_inode_info *info; 783 struct address_space *mapping; 784 struct inode *inode; 785 swp_entry_t swap; 786 pgoff_t index; 787 788 BUG_ON(!PageLocked(page)); 789 mapping = page->mapping; 790 index = page->index; 791 inode = mapping->host; 792 info = SHMEM_I(inode); 793 if (info->flags & VM_LOCKED) 794 goto redirty; 795 if (!total_swap_pages) 796 goto redirty; 797 798 /* 799 * shmem_backing_dev_info's capabilities prevent regular writeback or 800 * sync from ever calling shmem_writepage; but a stacking filesystem 801 * might use ->writepage of its underlying filesystem, in which case 802 * tmpfs should write out to swap only in response to memory pressure, 803 * and not for the writeback threads or sync. 804 */ 805 if (!wbc->for_reclaim) { 806 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 807 goto redirty; 808 } 809 810 /* 811 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 812 * value into swapfile.c, the only way we can correctly account for a 813 * fallocated page arriving here is now to initialize it and write it. 814 * 815 * That's okay for a page already fallocated earlier, but if we have 816 * not yet completed the fallocation, then (a) we want to keep track 817 * of this page in case we have to undo it, and (b) it may not be a 818 * good idea to continue anyway, once we're pushing into swap. So 819 * reactivate the page, and let shmem_fallocate() quit when too many. 820 */ 821 if (!PageUptodate(page)) { 822 if (inode->i_private) { 823 struct shmem_falloc *shmem_falloc; 824 spin_lock(&inode->i_lock); 825 shmem_falloc = inode->i_private; 826 if (shmem_falloc && 827 index >= shmem_falloc->start && 828 index < shmem_falloc->next) 829 shmem_falloc->nr_unswapped++; 830 else 831 shmem_falloc = NULL; 832 spin_unlock(&inode->i_lock); 833 if (shmem_falloc) 834 goto redirty; 835 } 836 clear_highpage(page); 837 flush_dcache_page(page); 838 SetPageUptodate(page); 839 } 840 841 swap = get_swap_page(); 842 if (!swap.val) 843 goto redirty; 844 845 /* 846 * Add inode to shmem_unuse()'s list of swapped-out inodes, 847 * if it's not already there. Do it now before the page is 848 * moved to swap cache, when its pagelock no longer protects 849 * the inode from eviction. But don't unlock the mutex until 850 * we've incremented swapped, because shmem_unuse_inode() will 851 * prune a !swapped inode from the swaplist under this mutex. 852 */ 853 mutex_lock(&shmem_swaplist_mutex); 854 if (list_empty(&info->swaplist)) 855 list_add_tail(&info->swaplist, &shmem_swaplist); 856 857 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 858 swap_shmem_alloc(swap); 859 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 860 861 spin_lock(&info->lock); 862 info->swapped++; 863 shmem_recalc_inode(inode); 864 spin_unlock(&info->lock); 865 866 mutex_unlock(&shmem_swaplist_mutex); 867 BUG_ON(page_mapped(page)); 868 swap_writepage(page, wbc); 869 return 0; 870 } 871 872 mutex_unlock(&shmem_swaplist_mutex); 873 swapcache_free(swap, NULL); 874 redirty: 875 set_page_dirty(page); 876 if (wbc->for_reclaim) 877 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 878 unlock_page(page); 879 return 0; 880 } 881 882 #ifdef CONFIG_NUMA 883 #ifdef CONFIG_TMPFS 884 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 885 { 886 char buffer[64]; 887 888 if (!mpol || mpol->mode == MPOL_DEFAULT) 889 return; /* show nothing */ 890 891 mpol_to_str(buffer, sizeof(buffer), mpol); 892 893 seq_printf(seq, ",mpol=%s", buffer); 894 } 895 896 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 897 { 898 struct mempolicy *mpol = NULL; 899 if (sbinfo->mpol) { 900 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 901 mpol = sbinfo->mpol; 902 mpol_get(mpol); 903 spin_unlock(&sbinfo->stat_lock); 904 } 905 return mpol; 906 } 907 #endif /* CONFIG_TMPFS */ 908 909 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 910 struct shmem_inode_info *info, pgoff_t index) 911 { 912 struct vm_area_struct pvma; 913 struct page *page; 914 915 /* Create a pseudo vma that just contains the policy */ 916 pvma.vm_start = 0; 917 /* Bias interleave by inode number to distribute better across nodes */ 918 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 919 pvma.vm_ops = NULL; 920 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 921 922 page = swapin_readahead(swap, gfp, &pvma, 0); 923 924 /* Drop reference taken by mpol_shared_policy_lookup() */ 925 mpol_cond_put(pvma.vm_policy); 926 927 return page; 928 } 929 930 static struct page *shmem_alloc_page(gfp_t gfp, 931 struct shmem_inode_info *info, pgoff_t index) 932 { 933 struct vm_area_struct pvma; 934 struct page *page; 935 936 /* Create a pseudo vma that just contains the policy */ 937 pvma.vm_start = 0; 938 /* Bias interleave by inode number to distribute better across nodes */ 939 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 940 pvma.vm_ops = NULL; 941 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 942 943 page = alloc_page_vma(gfp, &pvma, 0); 944 945 /* Drop reference taken by mpol_shared_policy_lookup() */ 946 mpol_cond_put(pvma.vm_policy); 947 948 return page; 949 } 950 #else /* !CONFIG_NUMA */ 951 #ifdef CONFIG_TMPFS 952 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 953 { 954 } 955 #endif /* CONFIG_TMPFS */ 956 957 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 958 struct shmem_inode_info *info, pgoff_t index) 959 { 960 return swapin_readahead(swap, gfp, NULL, 0); 961 } 962 963 static inline struct page *shmem_alloc_page(gfp_t gfp, 964 struct shmem_inode_info *info, pgoff_t index) 965 { 966 return alloc_page(gfp); 967 } 968 #endif /* CONFIG_NUMA */ 969 970 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 971 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 972 { 973 return NULL; 974 } 975 #endif 976 977 /* 978 * When a page is moved from swapcache to shmem filecache (either by the 979 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 980 * shmem_unuse_inode()), it may have been read in earlier from swap, in 981 * ignorance of the mapping it belongs to. If that mapping has special 982 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 983 * we may need to copy to a suitable page before moving to filecache. 984 * 985 * In a future release, this may well be extended to respect cpuset and 986 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 987 * but for now it is a simple matter of zone. 988 */ 989 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 990 { 991 return page_zonenum(page) > gfp_zone(gfp); 992 } 993 994 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 995 struct shmem_inode_info *info, pgoff_t index) 996 { 997 struct page *oldpage, *newpage; 998 struct address_space *swap_mapping; 999 pgoff_t swap_index; 1000 int error; 1001 1002 oldpage = *pagep; 1003 swap_index = page_private(oldpage); 1004 swap_mapping = page_mapping(oldpage); 1005 1006 /* 1007 * We have arrived here because our zones are constrained, so don't 1008 * limit chance of success by further cpuset and node constraints. 1009 */ 1010 gfp &= ~GFP_CONSTRAINT_MASK; 1011 newpage = shmem_alloc_page(gfp, info, index); 1012 if (!newpage) 1013 return -ENOMEM; 1014 1015 page_cache_get(newpage); 1016 copy_highpage(newpage, oldpage); 1017 flush_dcache_page(newpage); 1018 1019 __set_page_locked(newpage); 1020 SetPageUptodate(newpage); 1021 SetPageSwapBacked(newpage); 1022 set_page_private(newpage, swap_index); 1023 SetPageSwapCache(newpage); 1024 1025 /* 1026 * Our caller will very soon move newpage out of swapcache, but it's 1027 * a nice clean interface for us to replace oldpage by newpage there. 1028 */ 1029 spin_lock_irq(&swap_mapping->tree_lock); 1030 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1031 newpage); 1032 if (!error) { 1033 __inc_zone_page_state(newpage, NR_FILE_PAGES); 1034 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1035 } 1036 spin_unlock_irq(&swap_mapping->tree_lock); 1037 1038 if (unlikely(error)) { 1039 /* 1040 * Is this possible? I think not, now that our callers check 1041 * both PageSwapCache and page_private after getting page lock; 1042 * but be defensive. Reverse old to newpage for clear and free. 1043 */ 1044 oldpage = newpage; 1045 } else { 1046 mem_cgroup_replace_page_cache(oldpage, newpage); 1047 lru_cache_add_anon(newpage); 1048 *pagep = newpage; 1049 } 1050 1051 ClearPageSwapCache(oldpage); 1052 set_page_private(oldpage, 0); 1053 1054 unlock_page(oldpage); 1055 page_cache_release(oldpage); 1056 page_cache_release(oldpage); 1057 return error; 1058 } 1059 1060 /* 1061 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1062 * 1063 * If we allocate a new one we do not mark it dirty. That's up to the 1064 * vm. If we swap it in we mark it dirty since we also free the swap 1065 * entry since a page cannot live in both the swap and page cache 1066 */ 1067 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1068 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1069 { 1070 struct address_space *mapping = inode->i_mapping; 1071 struct shmem_inode_info *info; 1072 struct shmem_sb_info *sbinfo; 1073 struct page *page; 1074 swp_entry_t swap; 1075 int error; 1076 int once = 0; 1077 int alloced = 0; 1078 1079 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1080 return -EFBIG; 1081 repeat: 1082 swap.val = 0; 1083 page = find_lock_page(mapping, index); 1084 if (radix_tree_exceptional_entry(page)) { 1085 swap = radix_to_swp_entry(page); 1086 page = NULL; 1087 } 1088 1089 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1090 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1091 error = -EINVAL; 1092 goto failed; 1093 } 1094 1095 /* fallocated page? */ 1096 if (page && !PageUptodate(page)) { 1097 if (sgp != SGP_READ) 1098 goto clear; 1099 unlock_page(page); 1100 page_cache_release(page); 1101 page = NULL; 1102 } 1103 if (page || (sgp == SGP_READ && !swap.val)) { 1104 *pagep = page; 1105 return 0; 1106 } 1107 1108 /* 1109 * Fast cache lookup did not find it: 1110 * bring it back from swap or allocate. 1111 */ 1112 info = SHMEM_I(inode); 1113 sbinfo = SHMEM_SB(inode->i_sb); 1114 1115 if (swap.val) { 1116 /* Look it up and read it in.. */ 1117 page = lookup_swap_cache(swap); 1118 if (!page) { 1119 /* here we actually do the io */ 1120 if (fault_type) 1121 *fault_type |= VM_FAULT_MAJOR; 1122 page = shmem_swapin(swap, gfp, info, index); 1123 if (!page) { 1124 error = -ENOMEM; 1125 goto failed; 1126 } 1127 } 1128 1129 /* We have to do this with page locked to prevent races */ 1130 lock_page(page); 1131 if (!PageSwapCache(page) || page_private(page) != swap.val || 1132 !shmem_confirm_swap(mapping, index, swap)) { 1133 error = -EEXIST; /* try again */ 1134 goto unlock; 1135 } 1136 if (!PageUptodate(page)) { 1137 error = -EIO; 1138 goto failed; 1139 } 1140 wait_on_page_writeback(page); 1141 1142 if (shmem_should_replace_page(page, gfp)) { 1143 error = shmem_replace_page(&page, gfp, info, index); 1144 if (error) 1145 goto failed; 1146 } 1147 1148 error = mem_cgroup_cache_charge(page, current->mm, 1149 gfp & GFP_RECLAIM_MASK); 1150 if (!error) { 1151 error = shmem_add_to_page_cache(page, mapping, index, 1152 gfp, swp_to_radix_entry(swap)); 1153 /* 1154 * We already confirmed swap under page lock, and make 1155 * no memory allocation here, so usually no possibility 1156 * of error; but free_swap_and_cache() only trylocks a 1157 * page, so it is just possible that the entry has been 1158 * truncated or holepunched since swap was confirmed. 1159 * shmem_undo_range() will have done some of the 1160 * unaccounting, now delete_from_swap_cache() will do 1161 * the rest (including mem_cgroup_uncharge_swapcache). 1162 * Reset swap.val? No, leave it so "failed" goes back to 1163 * "repeat": reading a hole and writing should succeed. 1164 */ 1165 if (error) 1166 delete_from_swap_cache(page); 1167 } 1168 if (error) 1169 goto failed; 1170 1171 spin_lock(&info->lock); 1172 info->swapped--; 1173 shmem_recalc_inode(inode); 1174 spin_unlock(&info->lock); 1175 1176 delete_from_swap_cache(page); 1177 set_page_dirty(page); 1178 swap_free(swap); 1179 1180 } else { 1181 if (shmem_acct_block(info->flags)) { 1182 error = -ENOSPC; 1183 goto failed; 1184 } 1185 if (sbinfo->max_blocks) { 1186 if (percpu_counter_compare(&sbinfo->used_blocks, 1187 sbinfo->max_blocks) >= 0) { 1188 error = -ENOSPC; 1189 goto unacct; 1190 } 1191 percpu_counter_inc(&sbinfo->used_blocks); 1192 } 1193 1194 page = shmem_alloc_page(gfp, info, index); 1195 if (!page) { 1196 error = -ENOMEM; 1197 goto decused; 1198 } 1199 1200 SetPageSwapBacked(page); 1201 __set_page_locked(page); 1202 error = mem_cgroup_cache_charge(page, current->mm, 1203 gfp & GFP_RECLAIM_MASK); 1204 if (error) 1205 goto decused; 1206 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); 1207 if (!error) { 1208 error = shmem_add_to_page_cache(page, mapping, index, 1209 gfp, NULL); 1210 radix_tree_preload_end(); 1211 } 1212 if (error) { 1213 mem_cgroup_uncharge_cache_page(page); 1214 goto decused; 1215 } 1216 lru_cache_add_anon(page); 1217 1218 spin_lock(&info->lock); 1219 info->alloced++; 1220 inode->i_blocks += BLOCKS_PER_PAGE; 1221 shmem_recalc_inode(inode); 1222 spin_unlock(&info->lock); 1223 alloced = true; 1224 1225 /* 1226 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1227 */ 1228 if (sgp == SGP_FALLOC) 1229 sgp = SGP_WRITE; 1230 clear: 1231 /* 1232 * Let SGP_WRITE caller clear ends if write does not fill page; 1233 * but SGP_FALLOC on a page fallocated earlier must initialize 1234 * it now, lest undo on failure cancel our earlier guarantee. 1235 */ 1236 if (sgp != SGP_WRITE) { 1237 clear_highpage(page); 1238 flush_dcache_page(page); 1239 SetPageUptodate(page); 1240 } 1241 if (sgp == SGP_DIRTY) 1242 set_page_dirty(page); 1243 } 1244 1245 /* Perhaps the file has been truncated since we checked */ 1246 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1247 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1248 error = -EINVAL; 1249 if (alloced) 1250 goto trunc; 1251 else 1252 goto failed; 1253 } 1254 *pagep = page; 1255 return 0; 1256 1257 /* 1258 * Error recovery. 1259 */ 1260 trunc: 1261 info = SHMEM_I(inode); 1262 ClearPageDirty(page); 1263 delete_from_page_cache(page); 1264 spin_lock(&info->lock); 1265 info->alloced--; 1266 inode->i_blocks -= BLOCKS_PER_PAGE; 1267 spin_unlock(&info->lock); 1268 decused: 1269 sbinfo = SHMEM_SB(inode->i_sb); 1270 if (sbinfo->max_blocks) 1271 percpu_counter_add(&sbinfo->used_blocks, -1); 1272 unacct: 1273 shmem_unacct_blocks(info->flags, 1); 1274 failed: 1275 if (swap.val && error != -EINVAL && 1276 !shmem_confirm_swap(mapping, index, swap)) 1277 error = -EEXIST; 1278 unlock: 1279 if (page) { 1280 unlock_page(page); 1281 page_cache_release(page); 1282 } 1283 if (error == -ENOSPC && !once++) { 1284 info = SHMEM_I(inode); 1285 spin_lock(&info->lock); 1286 shmem_recalc_inode(inode); 1287 spin_unlock(&info->lock); 1288 goto repeat; 1289 } 1290 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1291 goto repeat; 1292 return error; 1293 } 1294 1295 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1296 { 1297 struct inode *inode = file_inode(vma->vm_file); 1298 int error; 1299 int ret = VM_FAULT_LOCKED; 1300 1301 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1302 if (error) 1303 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1304 1305 if (ret & VM_FAULT_MAJOR) { 1306 count_vm_event(PGMAJFAULT); 1307 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1308 } 1309 return ret; 1310 } 1311 1312 #ifdef CONFIG_NUMA 1313 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1314 { 1315 struct inode *inode = file_inode(vma->vm_file); 1316 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1317 } 1318 1319 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1320 unsigned long addr) 1321 { 1322 struct inode *inode = file_inode(vma->vm_file); 1323 pgoff_t index; 1324 1325 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1326 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1327 } 1328 #endif 1329 1330 int shmem_lock(struct file *file, int lock, struct user_struct *user) 1331 { 1332 struct inode *inode = file_inode(file); 1333 struct shmem_inode_info *info = SHMEM_I(inode); 1334 int retval = -ENOMEM; 1335 1336 spin_lock(&info->lock); 1337 if (lock && !(info->flags & VM_LOCKED)) { 1338 if (!user_shm_lock(inode->i_size, user)) 1339 goto out_nomem; 1340 info->flags |= VM_LOCKED; 1341 mapping_set_unevictable(file->f_mapping); 1342 } 1343 if (!lock && (info->flags & VM_LOCKED) && user) { 1344 user_shm_unlock(inode->i_size, user); 1345 info->flags &= ~VM_LOCKED; 1346 mapping_clear_unevictable(file->f_mapping); 1347 } 1348 retval = 0; 1349 1350 out_nomem: 1351 spin_unlock(&info->lock); 1352 return retval; 1353 } 1354 1355 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1356 { 1357 file_accessed(file); 1358 vma->vm_ops = &shmem_vm_ops; 1359 return 0; 1360 } 1361 1362 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1363 umode_t mode, dev_t dev, unsigned long flags) 1364 { 1365 struct inode *inode; 1366 struct shmem_inode_info *info; 1367 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1368 1369 if (shmem_reserve_inode(sb)) 1370 return NULL; 1371 1372 inode = new_inode(sb); 1373 if (inode) { 1374 inode->i_ino = get_next_ino(); 1375 inode_init_owner(inode, dir, mode); 1376 inode->i_blocks = 0; 1377 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1378 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1379 inode->i_generation = get_seconds(); 1380 info = SHMEM_I(inode); 1381 memset(info, 0, (char *)inode - (char *)info); 1382 spin_lock_init(&info->lock); 1383 info->flags = flags & VM_NORESERVE; 1384 INIT_LIST_HEAD(&info->swaplist); 1385 simple_xattrs_init(&info->xattrs); 1386 cache_no_acl(inode); 1387 1388 switch (mode & S_IFMT) { 1389 default: 1390 inode->i_op = &shmem_special_inode_operations; 1391 init_special_inode(inode, mode, dev); 1392 break; 1393 case S_IFREG: 1394 inode->i_mapping->a_ops = &shmem_aops; 1395 inode->i_op = &shmem_inode_operations; 1396 inode->i_fop = &shmem_file_operations; 1397 mpol_shared_policy_init(&info->policy, 1398 shmem_get_sbmpol(sbinfo)); 1399 break; 1400 case S_IFDIR: 1401 inc_nlink(inode); 1402 /* Some things misbehave if size == 0 on a directory */ 1403 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1404 inode->i_op = &shmem_dir_inode_operations; 1405 inode->i_fop = &simple_dir_operations; 1406 break; 1407 case S_IFLNK: 1408 /* 1409 * Must not load anything in the rbtree, 1410 * mpol_free_shared_policy will not be called. 1411 */ 1412 mpol_shared_policy_init(&info->policy, NULL); 1413 break; 1414 } 1415 } else 1416 shmem_free_inode(sb); 1417 return inode; 1418 } 1419 1420 #ifdef CONFIG_TMPFS 1421 static const struct inode_operations shmem_symlink_inode_operations; 1422 static const struct inode_operations shmem_short_symlink_operations; 1423 1424 #ifdef CONFIG_TMPFS_XATTR 1425 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1426 #else 1427 #define shmem_initxattrs NULL 1428 #endif 1429 1430 static int 1431 shmem_write_begin(struct file *file, struct address_space *mapping, 1432 loff_t pos, unsigned len, unsigned flags, 1433 struct page **pagep, void **fsdata) 1434 { 1435 struct inode *inode = mapping->host; 1436 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1437 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1438 } 1439 1440 static int 1441 shmem_write_end(struct file *file, struct address_space *mapping, 1442 loff_t pos, unsigned len, unsigned copied, 1443 struct page *page, void *fsdata) 1444 { 1445 struct inode *inode = mapping->host; 1446 1447 if (pos + copied > inode->i_size) 1448 i_size_write(inode, pos + copied); 1449 1450 if (!PageUptodate(page)) { 1451 if (copied < PAGE_CACHE_SIZE) { 1452 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1453 zero_user_segments(page, 0, from, 1454 from + copied, PAGE_CACHE_SIZE); 1455 } 1456 SetPageUptodate(page); 1457 } 1458 set_page_dirty(page); 1459 unlock_page(page); 1460 page_cache_release(page); 1461 1462 return copied; 1463 } 1464 1465 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1466 { 1467 struct inode *inode = file_inode(filp); 1468 struct address_space *mapping = inode->i_mapping; 1469 pgoff_t index; 1470 unsigned long offset; 1471 enum sgp_type sgp = SGP_READ; 1472 1473 /* 1474 * Might this read be for a stacking filesystem? Then when reading 1475 * holes of a sparse file, we actually need to allocate those pages, 1476 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1477 */ 1478 if (segment_eq(get_fs(), KERNEL_DS)) 1479 sgp = SGP_DIRTY; 1480 1481 index = *ppos >> PAGE_CACHE_SHIFT; 1482 offset = *ppos & ~PAGE_CACHE_MASK; 1483 1484 for (;;) { 1485 struct page *page = NULL; 1486 pgoff_t end_index; 1487 unsigned long nr, ret; 1488 loff_t i_size = i_size_read(inode); 1489 1490 end_index = i_size >> PAGE_CACHE_SHIFT; 1491 if (index > end_index) 1492 break; 1493 if (index == end_index) { 1494 nr = i_size & ~PAGE_CACHE_MASK; 1495 if (nr <= offset) 1496 break; 1497 } 1498 1499 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1500 if (desc->error) { 1501 if (desc->error == -EINVAL) 1502 desc->error = 0; 1503 break; 1504 } 1505 if (page) 1506 unlock_page(page); 1507 1508 /* 1509 * We must evaluate after, since reads (unlike writes) 1510 * are called without i_mutex protection against truncate 1511 */ 1512 nr = PAGE_CACHE_SIZE; 1513 i_size = i_size_read(inode); 1514 end_index = i_size >> PAGE_CACHE_SHIFT; 1515 if (index == end_index) { 1516 nr = i_size & ~PAGE_CACHE_MASK; 1517 if (nr <= offset) { 1518 if (page) 1519 page_cache_release(page); 1520 break; 1521 } 1522 } 1523 nr -= offset; 1524 1525 if (page) { 1526 /* 1527 * If users can be writing to this page using arbitrary 1528 * virtual addresses, take care about potential aliasing 1529 * before reading the page on the kernel side. 1530 */ 1531 if (mapping_writably_mapped(mapping)) 1532 flush_dcache_page(page); 1533 /* 1534 * Mark the page accessed if we read the beginning. 1535 */ 1536 if (!offset) 1537 mark_page_accessed(page); 1538 } else { 1539 page = ZERO_PAGE(0); 1540 page_cache_get(page); 1541 } 1542 1543 /* 1544 * Ok, we have the page, and it's up-to-date, so 1545 * now we can copy it to user space... 1546 * 1547 * The actor routine returns how many bytes were actually used.. 1548 * NOTE! This may not be the same as how much of a user buffer 1549 * we filled up (we may be padding etc), so we can only update 1550 * "pos" here (the actor routine has to update the user buffer 1551 * pointers and the remaining count). 1552 */ 1553 ret = actor(desc, page, offset, nr); 1554 offset += ret; 1555 index += offset >> PAGE_CACHE_SHIFT; 1556 offset &= ~PAGE_CACHE_MASK; 1557 1558 page_cache_release(page); 1559 if (ret != nr || !desc->count) 1560 break; 1561 1562 cond_resched(); 1563 } 1564 1565 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1566 file_accessed(filp); 1567 } 1568 1569 static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1570 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1571 { 1572 struct file *filp = iocb->ki_filp; 1573 ssize_t retval; 1574 unsigned long seg; 1575 size_t count; 1576 loff_t *ppos = &iocb->ki_pos; 1577 1578 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1579 if (retval) 1580 return retval; 1581 1582 for (seg = 0; seg < nr_segs; seg++) { 1583 read_descriptor_t desc; 1584 1585 desc.written = 0; 1586 desc.arg.buf = iov[seg].iov_base; 1587 desc.count = iov[seg].iov_len; 1588 if (desc.count == 0) 1589 continue; 1590 desc.error = 0; 1591 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1592 retval += desc.written; 1593 if (desc.error) { 1594 retval = retval ?: desc.error; 1595 break; 1596 } 1597 if (desc.count > 0) 1598 break; 1599 } 1600 return retval; 1601 } 1602 1603 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1604 struct pipe_inode_info *pipe, size_t len, 1605 unsigned int flags) 1606 { 1607 struct address_space *mapping = in->f_mapping; 1608 struct inode *inode = mapping->host; 1609 unsigned int loff, nr_pages, req_pages; 1610 struct page *pages[PIPE_DEF_BUFFERS]; 1611 struct partial_page partial[PIPE_DEF_BUFFERS]; 1612 struct page *page; 1613 pgoff_t index, end_index; 1614 loff_t isize, left; 1615 int error, page_nr; 1616 struct splice_pipe_desc spd = { 1617 .pages = pages, 1618 .partial = partial, 1619 .nr_pages_max = PIPE_DEF_BUFFERS, 1620 .flags = flags, 1621 .ops = &page_cache_pipe_buf_ops, 1622 .spd_release = spd_release_page, 1623 }; 1624 1625 isize = i_size_read(inode); 1626 if (unlikely(*ppos >= isize)) 1627 return 0; 1628 1629 left = isize - *ppos; 1630 if (unlikely(left < len)) 1631 len = left; 1632 1633 if (splice_grow_spd(pipe, &spd)) 1634 return -ENOMEM; 1635 1636 index = *ppos >> PAGE_CACHE_SHIFT; 1637 loff = *ppos & ~PAGE_CACHE_MASK; 1638 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1639 nr_pages = min(req_pages, pipe->buffers); 1640 1641 spd.nr_pages = find_get_pages_contig(mapping, index, 1642 nr_pages, spd.pages); 1643 index += spd.nr_pages; 1644 error = 0; 1645 1646 while (spd.nr_pages < nr_pages) { 1647 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1648 if (error) 1649 break; 1650 unlock_page(page); 1651 spd.pages[spd.nr_pages++] = page; 1652 index++; 1653 } 1654 1655 index = *ppos >> PAGE_CACHE_SHIFT; 1656 nr_pages = spd.nr_pages; 1657 spd.nr_pages = 0; 1658 1659 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1660 unsigned int this_len; 1661 1662 if (!len) 1663 break; 1664 1665 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1666 page = spd.pages[page_nr]; 1667 1668 if (!PageUptodate(page) || page->mapping != mapping) { 1669 error = shmem_getpage(inode, index, &page, 1670 SGP_CACHE, NULL); 1671 if (error) 1672 break; 1673 unlock_page(page); 1674 page_cache_release(spd.pages[page_nr]); 1675 spd.pages[page_nr] = page; 1676 } 1677 1678 isize = i_size_read(inode); 1679 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1680 if (unlikely(!isize || index > end_index)) 1681 break; 1682 1683 if (end_index == index) { 1684 unsigned int plen; 1685 1686 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1687 if (plen <= loff) 1688 break; 1689 1690 this_len = min(this_len, plen - loff); 1691 len = this_len; 1692 } 1693 1694 spd.partial[page_nr].offset = loff; 1695 spd.partial[page_nr].len = this_len; 1696 len -= this_len; 1697 loff = 0; 1698 spd.nr_pages++; 1699 index++; 1700 } 1701 1702 while (page_nr < nr_pages) 1703 page_cache_release(spd.pages[page_nr++]); 1704 1705 if (spd.nr_pages) 1706 error = splice_to_pipe(pipe, &spd); 1707 1708 splice_shrink_spd(&spd); 1709 1710 if (error > 0) { 1711 *ppos += error; 1712 file_accessed(in); 1713 } 1714 return error; 1715 } 1716 1717 /* 1718 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 1719 */ 1720 static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 1721 pgoff_t index, pgoff_t end, int whence) 1722 { 1723 struct page *page; 1724 struct pagevec pvec; 1725 pgoff_t indices[PAGEVEC_SIZE]; 1726 bool done = false; 1727 int i; 1728 1729 pagevec_init(&pvec, 0); 1730 pvec.nr = 1; /* start small: we may be there already */ 1731 while (!done) { 1732 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 1733 pvec.nr, pvec.pages, indices); 1734 if (!pvec.nr) { 1735 if (whence == SEEK_DATA) 1736 index = end; 1737 break; 1738 } 1739 for (i = 0; i < pvec.nr; i++, index++) { 1740 if (index < indices[i]) { 1741 if (whence == SEEK_HOLE) { 1742 done = true; 1743 break; 1744 } 1745 index = indices[i]; 1746 } 1747 page = pvec.pages[i]; 1748 if (page && !radix_tree_exceptional_entry(page)) { 1749 if (!PageUptodate(page)) 1750 page = NULL; 1751 } 1752 if (index >= end || 1753 (page && whence == SEEK_DATA) || 1754 (!page && whence == SEEK_HOLE)) { 1755 done = true; 1756 break; 1757 } 1758 } 1759 shmem_deswap_pagevec(&pvec); 1760 pagevec_release(&pvec); 1761 pvec.nr = PAGEVEC_SIZE; 1762 cond_resched(); 1763 } 1764 return index; 1765 } 1766 1767 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 1768 { 1769 struct address_space *mapping = file->f_mapping; 1770 struct inode *inode = mapping->host; 1771 pgoff_t start, end; 1772 loff_t new_offset; 1773 1774 if (whence != SEEK_DATA && whence != SEEK_HOLE) 1775 return generic_file_llseek_size(file, offset, whence, 1776 MAX_LFS_FILESIZE, i_size_read(inode)); 1777 mutex_lock(&inode->i_mutex); 1778 /* We're holding i_mutex so we can access i_size directly */ 1779 1780 if (offset < 0) 1781 offset = -EINVAL; 1782 else if (offset >= inode->i_size) 1783 offset = -ENXIO; 1784 else { 1785 start = offset >> PAGE_CACHE_SHIFT; 1786 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1787 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1788 new_offset <<= PAGE_CACHE_SHIFT; 1789 if (new_offset > offset) { 1790 if (new_offset < inode->i_size) 1791 offset = new_offset; 1792 else if (whence == SEEK_DATA) 1793 offset = -ENXIO; 1794 else 1795 offset = inode->i_size; 1796 } 1797 } 1798 1799 if (offset >= 0 && offset != file->f_pos) { 1800 file->f_pos = offset; 1801 file->f_version = 0; 1802 } 1803 mutex_unlock(&inode->i_mutex); 1804 return offset; 1805 } 1806 1807 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1808 loff_t len) 1809 { 1810 struct inode *inode = file_inode(file); 1811 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1812 struct shmem_falloc shmem_falloc; 1813 pgoff_t start, index, end; 1814 int error; 1815 1816 mutex_lock(&inode->i_mutex); 1817 1818 if (mode & FALLOC_FL_PUNCH_HOLE) { 1819 struct address_space *mapping = file->f_mapping; 1820 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1821 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1822 1823 if ((u64)unmap_end > (u64)unmap_start) 1824 unmap_mapping_range(mapping, unmap_start, 1825 1 + unmap_end - unmap_start, 0); 1826 shmem_truncate_range(inode, offset, offset + len - 1); 1827 /* No need to unmap again: hole-punching leaves COWed pages */ 1828 error = 0; 1829 goto out; 1830 } 1831 1832 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1833 error = inode_newsize_ok(inode, offset + len); 1834 if (error) 1835 goto out; 1836 1837 start = offset >> PAGE_CACHE_SHIFT; 1838 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1839 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1840 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1841 error = -ENOSPC; 1842 goto out; 1843 } 1844 1845 shmem_falloc.start = start; 1846 shmem_falloc.next = start; 1847 shmem_falloc.nr_falloced = 0; 1848 shmem_falloc.nr_unswapped = 0; 1849 spin_lock(&inode->i_lock); 1850 inode->i_private = &shmem_falloc; 1851 spin_unlock(&inode->i_lock); 1852 1853 for (index = start; index < end; index++) { 1854 struct page *page; 1855 1856 /* 1857 * Good, the fallocate(2) manpage permits EINTR: we may have 1858 * been interrupted because we are using up too much memory. 1859 */ 1860 if (signal_pending(current)) 1861 error = -EINTR; 1862 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1863 error = -ENOMEM; 1864 else 1865 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1866 NULL); 1867 if (error) { 1868 /* Remove the !PageUptodate pages we added */ 1869 shmem_undo_range(inode, 1870 (loff_t)start << PAGE_CACHE_SHIFT, 1871 (loff_t)index << PAGE_CACHE_SHIFT, true); 1872 goto undone; 1873 } 1874 1875 /* 1876 * Inform shmem_writepage() how far we have reached. 1877 * No need for lock or barrier: we have the page lock. 1878 */ 1879 shmem_falloc.next++; 1880 if (!PageUptodate(page)) 1881 shmem_falloc.nr_falloced++; 1882 1883 /* 1884 * If !PageUptodate, leave it that way so that freeable pages 1885 * can be recognized if we need to rollback on error later. 1886 * But set_page_dirty so that memory pressure will swap rather 1887 * than free the pages we are allocating (and SGP_CACHE pages 1888 * might still be clean: we now need to mark those dirty too). 1889 */ 1890 set_page_dirty(page); 1891 unlock_page(page); 1892 page_cache_release(page); 1893 cond_resched(); 1894 } 1895 1896 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1897 i_size_write(inode, offset + len); 1898 inode->i_ctime = CURRENT_TIME; 1899 undone: 1900 spin_lock(&inode->i_lock); 1901 inode->i_private = NULL; 1902 spin_unlock(&inode->i_lock); 1903 out: 1904 mutex_unlock(&inode->i_mutex); 1905 return error; 1906 } 1907 1908 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1909 { 1910 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1911 1912 buf->f_type = TMPFS_MAGIC; 1913 buf->f_bsize = PAGE_CACHE_SIZE; 1914 buf->f_namelen = NAME_MAX; 1915 if (sbinfo->max_blocks) { 1916 buf->f_blocks = sbinfo->max_blocks; 1917 buf->f_bavail = 1918 buf->f_bfree = sbinfo->max_blocks - 1919 percpu_counter_sum(&sbinfo->used_blocks); 1920 } 1921 if (sbinfo->max_inodes) { 1922 buf->f_files = sbinfo->max_inodes; 1923 buf->f_ffree = sbinfo->free_inodes; 1924 } 1925 /* else leave those fields 0 like simple_statfs */ 1926 return 0; 1927 } 1928 1929 /* 1930 * File creation. Allocate an inode, and we're done.. 1931 */ 1932 static int 1933 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1934 { 1935 struct inode *inode; 1936 int error = -ENOSPC; 1937 1938 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1939 if (inode) { 1940 error = security_inode_init_security(inode, dir, 1941 &dentry->d_name, 1942 shmem_initxattrs, NULL); 1943 if (error) { 1944 if (error != -EOPNOTSUPP) { 1945 iput(inode); 1946 return error; 1947 } 1948 } 1949 #ifdef CONFIG_TMPFS_POSIX_ACL 1950 error = generic_acl_init(inode, dir); 1951 if (error) { 1952 iput(inode); 1953 return error; 1954 } 1955 #else 1956 error = 0; 1957 #endif 1958 dir->i_size += BOGO_DIRENT_SIZE; 1959 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1960 d_instantiate(dentry, inode); 1961 dget(dentry); /* Extra count - pin the dentry in core */ 1962 } 1963 return error; 1964 } 1965 1966 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1967 { 1968 int error; 1969 1970 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1971 return error; 1972 inc_nlink(dir); 1973 return 0; 1974 } 1975 1976 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 1977 bool excl) 1978 { 1979 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1980 } 1981 1982 /* 1983 * Link a file.. 1984 */ 1985 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1986 { 1987 struct inode *inode = old_dentry->d_inode; 1988 int ret; 1989 1990 /* 1991 * No ordinary (disk based) filesystem counts links as inodes; 1992 * but each new link needs a new dentry, pinning lowmem, and 1993 * tmpfs dentries cannot be pruned until they are unlinked. 1994 */ 1995 ret = shmem_reserve_inode(inode->i_sb); 1996 if (ret) 1997 goto out; 1998 1999 dir->i_size += BOGO_DIRENT_SIZE; 2000 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2001 inc_nlink(inode); 2002 ihold(inode); /* New dentry reference */ 2003 dget(dentry); /* Extra pinning count for the created dentry */ 2004 d_instantiate(dentry, inode); 2005 out: 2006 return ret; 2007 } 2008 2009 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 2010 { 2011 struct inode *inode = dentry->d_inode; 2012 2013 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 2014 shmem_free_inode(inode->i_sb); 2015 2016 dir->i_size -= BOGO_DIRENT_SIZE; 2017 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2018 drop_nlink(inode); 2019 dput(dentry); /* Undo the count from "create" - this does all the work */ 2020 return 0; 2021 } 2022 2023 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 2024 { 2025 if (!simple_empty(dentry)) 2026 return -ENOTEMPTY; 2027 2028 drop_nlink(dentry->d_inode); 2029 drop_nlink(dir); 2030 return shmem_unlink(dir, dentry); 2031 } 2032 2033 /* 2034 * The VFS layer already does all the dentry stuff for rename, 2035 * we just have to decrement the usage count for the target if 2036 * it exists so that the VFS layer correctly free's it when it 2037 * gets overwritten. 2038 */ 2039 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2040 { 2041 struct inode *inode = old_dentry->d_inode; 2042 int they_are_dirs = S_ISDIR(inode->i_mode); 2043 2044 if (!simple_empty(new_dentry)) 2045 return -ENOTEMPTY; 2046 2047 if (new_dentry->d_inode) { 2048 (void) shmem_unlink(new_dir, new_dentry); 2049 if (they_are_dirs) 2050 drop_nlink(old_dir); 2051 } else if (they_are_dirs) { 2052 drop_nlink(old_dir); 2053 inc_nlink(new_dir); 2054 } 2055 2056 old_dir->i_size -= BOGO_DIRENT_SIZE; 2057 new_dir->i_size += BOGO_DIRENT_SIZE; 2058 old_dir->i_ctime = old_dir->i_mtime = 2059 new_dir->i_ctime = new_dir->i_mtime = 2060 inode->i_ctime = CURRENT_TIME; 2061 return 0; 2062 } 2063 2064 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 2065 { 2066 int error; 2067 int len; 2068 struct inode *inode; 2069 struct page *page; 2070 char *kaddr; 2071 struct shmem_inode_info *info; 2072 2073 len = strlen(symname) + 1; 2074 if (len > PAGE_CACHE_SIZE) 2075 return -ENAMETOOLONG; 2076 2077 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2078 if (!inode) 2079 return -ENOSPC; 2080 2081 error = security_inode_init_security(inode, dir, &dentry->d_name, 2082 shmem_initxattrs, NULL); 2083 if (error) { 2084 if (error != -EOPNOTSUPP) { 2085 iput(inode); 2086 return error; 2087 } 2088 error = 0; 2089 } 2090 2091 info = SHMEM_I(inode); 2092 inode->i_size = len-1; 2093 if (len <= SHORT_SYMLINK_LEN) { 2094 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2095 if (!info->symlink) { 2096 iput(inode); 2097 return -ENOMEM; 2098 } 2099 inode->i_op = &shmem_short_symlink_operations; 2100 } else { 2101 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2102 if (error) { 2103 iput(inode); 2104 return error; 2105 } 2106 inode->i_mapping->a_ops = &shmem_aops; 2107 inode->i_op = &shmem_symlink_inode_operations; 2108 kaddr = kmap_atomic(page); 2109 memcpy(kaddr, symname, len); 2110 kunmap_atomic(kaddr); 2111 SetPageUptodate(page); 2112 set_page_dirty(page); 2113 unlock_page(page); 2114 page_cache_release(page); 2115 } 2116 dir->i_size += BOGO_DIRENT_SIZE; 2117 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2118 d_instantiate(dentry, inode); 2119 dget(dentry); 2120 return 0; 2121 } 2122 2123 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2124 { 2125 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2126 return NULL; 2127 } 2128 2129 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2130 { 2131 struct page *page = NULL; 2132 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2133 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2134 if (page) 2135 unlock_page(page); 2136 return page; 2137 } 2138 2139 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2140 { 2141 if (!IS_ERR(nd_get_link(nd))) { 2142 struct page *page = cookie; 2143 kunmap(page); 2144 mark_page_accessed(page); 2145 page_cache_release(page); 2146 } 2147 } 2148 2149 #ifdef CONFIG_TMPFS_XATTR 2150 /* 2151 * Superblocks without xattr inode operations may get some security.* xattr 2152 * support from the LSM "for free". As soon as we have any other xattrs 2153 * like ACLs, we also need to implement the security.* handlers at 2154 * filesystem level, though. 2155 */ 2156 2157 /* 2158 * Callback for security_inode_init_security() for acquiring xattrs. 2159 */ 2160 static int shmem_initxattrs(struct inode *inode, 2161 const struct xattr *xattr_array, 2162 void *fs_info) 2163 { 2164 struct shmem_inode_info *info = SHMEM_I(inode); 2165 const struct xattr *xattr; 2166 struct simple_xattr *new_xattr; 2167 size_t len; 2168 2169 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2170 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2171 if (!new_xattr) 2172 return -ENOMEM; 2173 2174 len = strlen(xattr->name) + 1; 2175 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2176 GFP_KERNEL); 2177 if (!new_xattr->name) { 2178 kfree(new_xattr); 2179 return -ENOMEM; 2180 } 2181 2182 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2183 XATTR_SECURITY_PREFIX_LEN); 2184 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2185 xattr->name, len); 2186 2187 simple_xattr_list_add(&info->xattrs, new_xattr); 2188 } 2189 2190 return 0; 2191 } 2192 2193 static const struct xattr_handler *shmem_xattr_handlers[] = { 2194 #ifdef CONFIG_TMPFS_POSIX_ACL 2195 &generic_acl_access_handler, 2196 &generic_acl_default_handler, 2197 #endif 2198 NULL 2199 }; 2200 2201 static int shmem_xattr_validate(const char *name) 2202 { 2203 struct { const char *prefix; size_t len; } arr[] = { 2204 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2205 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2206 }; 2207 int i; 2208 2209 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2210 size_t preflen = arr[i].len; 2211 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2212 if (!name[preflen]) 2213 return -EINVAL; 2214 return 0; 2215 } 2216 } 2217 return -EOPNOTSUPP; 2218 } 2219 2220 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2221 void *buffer, size_t size) 2222 { 2223 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2224 int err; 2225 2226 /* 2227 * If this is a request for a synthetic attribute in the system.* 2228 * namespace use the generic infrastructure to resolve a handler 2229 * for it via sb->s_xattr. 2230 */ 2231 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2232 return generic_getxattr(dentry, name, buffer, size); 2233 2234 err = shmem_xattr_validate(name); 2235 if (err) 2236 return err; 2237 2238 return simple_xattr_get(&info->xattrs, name, buffer, size); 2239 } 2240 2241 static int shmem_setxattr(struct dentry *dentry, const char *name, 2242 const void *value, size_t size, int flags) 2243 { 2244 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2245 int err; 2246 2247 /* 2248 * If this is a request for a synthetic attribute in the system.* 2249 * namespace use the generic infrastructure to resolve a handler 2250 * for it via sb->s_xattr. 2251 */ 2252 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2253 return generic_setxattr(dentry, name, value, size, flags); 2254 2255 err = shmem_xattr_validate(name); 2256 if (err) 2257 return err; 2258 2259 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2260 } 2261 2262 static int shmem_removexattr(struct dentry *dentry, const char *name) 2263 { 2264 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2265 int err; 2266 2267 /* 2268 * If this is a request for a synthetic attribute in the system.* 2269 * namespace use the generic infrastructure to resolve a handler 2270 * for it via sb->s_xattr. 2271 */ 2272 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2273 return generic_removexattr(dentry, name); 2274 2275 err = shmem_xattr_validate(name); 2276 if (err) 2277 return err; 2278 2279 return simple_xattr_remove(&info->xattrs, name); 2280 } 2281 2282 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2283 { 2284 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2285 return simple_xattr_list(&info->xattrs, buffer, size); 2286 } 2287 #endif /* CONFIG_TMPFS_XATTR */ 2288 2289 static const struct inode_operations shmem_short_symlink_operations = { 2290 .readlink = generic_readlink, 2291 .follow_link = shmem_follow_short_symlink, 2292 #ifdef CONFIG_TMPFS_XATTR 2293 .setxattr = shmem_setxattr, 2294 .getxattr = shmem_getxattr, 2295 .listxattr = shmem_listxattr, 2296 .removexattr = shmem_removexattr, 2297 #endif 2298 }; 2299 2300 static const struct inode_operations shmem_symlink_inode_operations = { 2301 .readlink = generic_readlink, 2302 .follow_link = shmem_follow_link, 2303 .put_link = shmem_put_link, 2304 #ifdef CONFIG_TMPFS_XATTR 2305 .setxattr = shmem_setxattr, 2306 .getxattr = shmem_getxattr, 2307 .listxattr = shmem_listxattr, 2308 .removexattr = shmem_removexattr, 2309 #endif 2310 }; 2311 2312 static struct dentry *shmem_get_parent(struct dentry *child) 2313 { 2314 return ERR_PTR(-ESTALE); 2315 } 2316 2317 static int shmem_match(struct inode *ino, void *vfh) 2318 { 2319 __u32 *fh = vfh; 2320 __u64 inum = fh[2]; 2321 inum = (inum << 32) | fh[1]; 2322 return ino->i_ino == inum && fh[0] == ino->i_generation; 2323 } 2324 2325 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2326 struct fid *fid, int fh_len, int fh_type) 2327 { 2328 struct inode *inode; 2329 struct dentry *dentry = NULL; 2330 u64 inum; 2331 2332 if (fh_len < 3) 2333 return NULL; 2334 2335 inum = fid->raw[2]; 2336 inum = (inum << 32) | fid->raw[1]; 2337 2338 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2339 shmem_match, fid->raw); 2340 if (inode) { 2341 dentry = d_find_alias(inode); 2342 iput(inode); 2343 } 2344 2345 return dentry; 2346 } 2347 2348 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2349 struct inode *parent) 2350 { 2351 if (*len < 3) { 2352 *len = 3; 2353 return FILEID_INVALID; 2354 } 2355 2356 if (inode_unhashed(inode)) { 2357 /* Unfortunately insert_inode_hash is not idempotent, 2358 * so as we hash inodes here rather than at creation 2359 * time, we need a lock to ensure we only try 2360 * to do it once 2361 */ 2362 static DEFINE_SPINLOCK(lock); 2363 spin_lock(&lock); 2364 if (inode_unhashed(inode)) 2365 __insert_inode_hash(inode, 2366 inode->i_ino + inode->i_generation); 2367 spin_unlock(&lock); 2368 } 2369 2370 fh[0] = inode->i_generation; 2371 fh[1] = inode->i_ino; 2372 fh[2] = ((__u64)inode->i_ino) >> 32; 2373 2374 *len = 3; 2375 return 1; 2376 } 2377 2378 static const struct export_operations shmem_export_ops = { 2379 .get_parent = shmem_get_parent, 2380 .encode_fh = shmem_encode_fh, 2381 .fh_to_dentry = shmem_fh_to_dentry, 2382 }; 2383 2384 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2385 bool remount) 2386 { 2387 char *this_char, *value, *rest; 2388 struct mempolicy *mpol = NULL; 2389 uid_t uid; 2390 gid_t gid; 2391 2392 while (options != NULL) { 2393 this_char = options; 2394 for (;;) { 2395 /* 2396 * NUL-terminate this option: unfortunately, 2397 * mount options form a comma-separated list, 2398 * but mpol's nodelist may also contain commas. 2399 */ 2400 options = strchr(options, ','); 2401 if (options == NULL) 2402 break; 2403 options++; 2404 if (!isdigit(*options)) { 2405 options[-1] = '\0'; 2406 break; 2407 } 2408 } 2409 if (!*this_char) 2410 continue; 2411 if ((value = strchr(this_char,'=')) != NULL) { 2412 *value++ = 0; 2413 } else { 2414 printk(KERN_ERR 2415 "tmpfs: No value for mount option '%s'\n", 2416 this_char); 2417 goto error; 2418 } 2419 2420 if (!strcmp(this_char,"size")) { 2421 unsigned long long size; 2422 size = memparse(value,&rest); 2423 if (*rest == '%') { 2424 size <<= PAGE_SHIFT; 2425 size *= totalram_pages; 2426 do_div(size, 100); 2427 rest++; 2428 } 2429 if (*rest) 2430 goto bad_val; 2431 sbinfo->max_blocks = 2432 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2433 } else if (!strcmp(this_char,"nr_blocks")) { 2434 sbinfo->max_blocks = memparse(value, &rest); 2435 if (*rest) 2436 goto bad_val; 2437 } else if (!strcmp(this_char,"nr_inodes")) { 2438 sbinfo->max_inodes = memparse(value, &rest); 2439 if (*rest) 2440 goto bad_val; 2441 } else if (!strcmp(this_char,"mode")) { 2442 if (remount) 2443 continue; 2444 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2445 if (*rest) 2446 goto bad_val; 2447 } else if (!strcmp(this_char,"uid")) { 2448 if (remount) 2449 continue; 2450 uid = simple_strtoul(value, &rest, 0); 2451 if (*rest) 2452 goto bad_val; 2453 sbinfo->uid = make_kuid(current_user_ns(), uid); 2454 if (!uid_valid(sbinfo->uid)) 2455 goto bad_val; 2456 } else if (!strcmp(this_char,"gid")) { 2457 if (remount) 2458 continue; 2459 gid = simple_strtoul(value, &rest, 0); 2460 if (*rest) 2461 goto bad_val; 2462 sbinfo->gid = make_kgid(current_user_ns(), gid); 2463 if (!gid_valid(sbinfo->gid)) 2464 goto bad_val; 2465 } else if (!strcmp(this_char,"mpol")) { 2466 mpol_put(mpol); 2467 mpol = NULL; 2468 if (mpol_parse_str(value, &mpol)) 2469 goto bad_val; 2470 } else { 2471 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2472 this_char); 2473 goto error; 2474 } 2475 } 2476 sbinfo->mpol = mpol; 2477 return 0; 2478 2479 bad_val: 2480 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2481 value, this_char); 2482 error: 2483 mpol_put(mpol); 2484 return 1; 2485 2486 } 2487 2488 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2489 { 2490 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2491 struct shmem_sb_info config = *sbinfo; 2492 unsigned long inodes; 2493 int error = -EINVAL; 2494 2495 config.mpol = NULL; 2496 if (shmem_parse_options(data, &config, true)) 2497 return error; 2498 2499 spin_lock(&sbinfo->stat_lock); 2500 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2501 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2502 goto out; 2503 if (config.max_inodes < inodes) 2504 goto out; 2505 /* 2506 * Those tests disallow limited->unlimited while any are in use; 2507 * but we must separately disallow unlimited->limited, because 2508 * in that case we have no record of how much is already in use. 2509 */ 2510 if (config.max_blocks && !sbinfo->max_blocks) 2511 goto out; 2512 if (config.max_inodes && !sbinfo->max_inodes) 2513 goto out; 2514 2515 error = 0; 2516 sbinfo->max_blocks = config.max_blocks; 2517 sbinfo->max_inodes = config.max_inodes; 2518 sbinfo->free_inodes = config.max_inodes - inodes; 2519 2520 /* 2521 * Preserve previous mempolicy unless mpol remount option was specified. 2522 */ 2523 if (config.mpol) { 2524 mpol_put(sbinfo->mpol); 2525 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2526 } 2527 out: 2528 spin_unlock(&sbinfo->stat_lock); 2529 return error; 2530 } 2531 2532 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2533 { 2534 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2535 2536 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2537 seq_printf(seq, ",size=%luk", 2538 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2539 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2540 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2541 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2542 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2543 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2544 seq_printf(seq, ",uid=%u", 2545 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2546 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2547 seq_printf(seq, ",gid=%u", 2548 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2549 shmem_show_mpol(seq, sbinfo->mpol); 2550 return 0; 2551 } 2552 #endif /* CONFIG_TMPFS */ 2553 2554 static void shmem_put_super(struct super_block *sb) 2555 { 2556 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2557 2558 percpu_counter_destroy(&sbinfo->used_blocks); 2559 mpol_put(sbinfo->mpol); 2560 kfree(sbinfo); 2561 sb->s_fs_info = NULL; 2562 } 2563 2564 int shmem_fill_super(struct super_block *sb, void *data, int silent) 2565 { 2566 struct inode *inode; 2567 struct shmem_sb_info *sbinfo; 2568 int err = -ENOMEM; 2569 2570 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2571 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2572 L1_CACHE_BYTES), GFP_KERNEL); 2573 if (!sbinfo) 2574 return -ENOMEM; 2575 2576 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2577 sbinfo->uid = current_fsuid(); 2578 sbinfo->gid = current_fsgid(); 2579 sb->s_fs_info = sbinfo; 2580 2581 #ifdef CONFIG_TMPFS 2582 /* 2583 * Per default we only allow half of the physical ram per 2584 * tmpfs instance, limiting inodes to one per page of lowmem; 2585 * but the internal instance is left unlimited. 2586 */ 2587 if (!(sb->s_flags & MS_NOUSER)) { 2588 sbinfo->max_blocks = shmem_default_max_blocks(); 2589 sbinfo->max_inodes = shmem_default_max_inodes(); 2590 if (shmem_parse_options(data, sbinfo, false)) { 2591 err = -EINVAL; 2592 goto failed; 2593 } 2594 } 2595 sb->s_export_op = &shmem_export_ops; 2596 sb->s_flags |= MS_NOSEC; 2597 #else 2598 sb->s_flags |= MS_NOUSER; 2599 #endif 2600 2601 spin_lock_init(&sbinfo->stat_lock); 2602 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2603 goto failed; 2604 sbinfo->free_inodes = sbinfo->max_inodes; 2605 2606 sb->s_maxbytes = MAX_LFS_FILESIZE; 2607 sb->s_blocksize = PAGE_CACHE_SIZE; 2608 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2609 sb->s_magic = TMPFS_MAGIC; 2610 sb->s_op = &shmem_ops; 2611 sb->s_time_gran = 1; 2612 #ifdef CONFIG_TMPFS_XATTR 2613 sb->s_xattr = shmem_xattr_handlers; 2614 #endif 2615 #ifdef CONFIG_TMPFS_POSIX_ACL 2616 sb->s_flags |= MS_POSIXACL; 2617 #endif 2618 2619 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2620 if (!inode) 2621 goto failed; 2622 inode->i_uid = sbinfo->uid; 2623 inode->i_gid = sbinfo->gid; 2624 sb->s_root = d_make_root(inode); 2625 if (!sb->s_root) 2626 goto failed; 2627 return 0; 2628 2629 failed: 2630 shmem_put_super(sb); 2631 return err; 2632 } 2633 2634 static struct kmem_cache *shmem_inode_cachep; 2635 2636 static struct inode *shmem_alloc_inode(struct super_block *sb) 2637 { 2638 struct shmem_inode_info *info; 2639 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2640 if (!info) 2641 return NULL; 2642 return &info->vfs_inode; 2643 } 2644 2645 static void shmem_destroy_callback(struct rcu_head *head) 2646 { 2647 struct inode *inode = container_of(head, struct inode, i_rcu); 2648 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2649 } 2650 2651 static void shmem_destroy_inode(struct inode *inode) 2652 { 2653 if (S_ISREG(inode->i_mode)) 2654 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2655 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2656 } 2657 2658 static void shmem_init_inode(void *foo) 2659 { 2660 struct shmem_inode_info *info = foo; 2661 inode_init_once(&info->vfs_inode); 2662 } 2663 2664 static int shmem_init_inodecache(void) 2665 { 2666 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2667 sizeof(struct shmem_inode_info), 2668 0, SLAB_PANIC, shmem_init_inode); 2669 return 0; 2670 } 2671 2672 static void shmem_destroy_inodecache(void) 2673 { 2674 kmem_cache_destroy(shmem_inode_cachep); 2675 } 2676 2677 static const struct address_space_operations shmem_aops = { 2678 .writepage = shmem_writepage, 2679 .set_page_dirty = __set_page_dirty_no_writeback, 2680 #ifdef CONFIG_TMPFS 2681 .write_begin = shmem_write_begin, 2682 .write_end = shmem_write_end, 2683 #endif 2684 .migratepage = migrate_page, 2685 .error_remove_page = generic_error_remove_page, 2686 }; 2687 2688 static const struct file_operations shmem_file_operations = { 2689 .mmap = shmem_mmap, 2690 #ifdef CONFIG_TMPFS 2691 .llseek = shmem_file_llseek, 2692 .read = do_sync_read, 2693 .write = do_sync_write, 2694 .aio_read = shmem_file_aio_read, 2695 .aio_write = generic_file_aio_write, 2696 .fsync = noop_fsync, 2697 .splice_read = shmem_file_splice_read, 2698 .splice_write = generic_file_splice_write, 2699 .fallocate = shmem_fallocate, 2700 #endif 2701 }; 2702 2703 static const struct inode_operations shmem_inode_operations = { 2704 .setattr = shmem_setattr, 2705 #ifdef CONFIG_TMPFS_XATTR 2706 .setxattr = shmem_setxattr, 2707 .getxattr = shmem_getxattr, 2708 .listxattr = shmem_listxattr, 2709 .removexattr = shmem_removexattr, 2710 #endif 2711 }; 2712 2713 static const struct inode_operations shmem_dir_inode_operations = { 2714 #ifdef CONFIG_TMPFS 2715 .create = shmem_create, 2716 .lookup = simple_lookup, 2717 .link = shmem_link, 2718 .unlink = shmem_unlink, 2719 .symlink = shmem_symlink, 2720 .mkdir = shmem_mkdir, 2721 .rmdir = shmem_rmdir, 2722 .mknod = shmem_mknod, 2723 .rename = shmem_rename, 2724 #endif 2725 #ifdef CONFIG_TMPFS_XATTR 2726 .setxattr = shmem_setxattr, 2727 .getxattr = shmem_getxattr, 2728 .listxattr = shmem_listxattr, 2729 .removexattr = shmem_removexattr, 2730 #endif 2731 #ifdef CONFIG_TMPFS_POSIX_ACL 2732 .setattr = shmem_setattr, 2733 #endif 2734 }; 2735 2736 static const struct inode_operations shmem_special_inode_operations = { 2737 #ifdef CONFIG_TMPFS_XATTR 2738 .setxattr = shmem_setxattr, 2739 .getxattr = shmem_getxattr, 2740 .listxattr = shmem_listxattr, 2741 .removexattr = shmem_removexattr, 2742 #endif 2743 #ifdef CONFIG_TMPFS_POSIX_ACL 2744 .setattr = shmem_setattr, 2745 #endif 2746 }; 2747 2748 static const struct super_operations shmem_ops = { 2749 .alloc_inode = shmem_alloc_inode, 2750 .destroy_inode = shmem_destroy_inode, 2751 #ifdef CONFIG_TMPFS 2752 .statfs = shmem_statfs, 2753 .remount_fs = shmem_remount_fs, 2754 .show_options = shmem_show_options, 2755 #endif 2756 .evict_inode = shmem_evict_inode, 2757 .drop_inode = generic_delete_inode, 2758 .put_super = shmem_put_super, 2759 }; 2760 2761 static const struct vm_operations_struct shmem_vm_ops = { 2762 .fault = shmem_fault, 2763 #ifdef CONFIG_NUMA 2764 .set_policy = shmem_set_policy, 2765 .get_policy = shmem_get_policy, 2766 #endif 2767 .remap_pages = generic_file_remap_pages, 2768 }; 2769 2770 static struct dentry *shmem_mount(struct file_system_type *fs_type, 2771 int flags, const char *dev_name, void *data) 2772 { 2773 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2774 } 2775 2776 static struct file_system_type shmem_fs_type = { 2777 .owner = THIS_MODULE, 2778 .name = "tmpfs", 2779 .mount = shmem_mount, 2780 .kill_sb = kill_litter_super, 2781 .fs_flags = FS_USERNS_MOUNT, 2782 }; 2783 2784 int __init shmem_init(void) 2785 { 2786 int error; 2787 2788 error = bdi_init(&shmem_backing_dev_info); 2789 if (error) 2790 goto out4; 2791 2792 error = shmem_init_inodecache(); 2793 if (error) 2794 goto out3; 2795 2796 error = register_filesystem(&shmem_fs_type); 2797 if (error) { 2798 printk(KERN_ERR "Could not register tmpfs\n"); 2799 goto out2; 2800 } 2801 2802 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, 2803 shmem_fs_type.name, NULL); 2804 if (IS_ERR(shm_mnt)) { 2805 error = PTR_ERR(shm_mnt); 2806 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2807 goto out1; 2808 } 2809 return 0; 2810 2811 out1: 2812 unregister_filesystem(&shmem_fs_type); 2813 out2: 2814 shmem_destroy_inodecache(); 2815 out3: 2816 bdi_destroy(&shmem_backing_dev_info); 2817 out4: 2818 shm_mnt = ERR_PTR(error); 2819 return error; 2820 } 2821 2822 #else /* !CONFIG_SHMEM */ 2823 2824 /* 2825 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2826 * 2827 * This is intended for small system where the benefits of the full 2828 * shmem code (swap-backed and resource-limited) are outweighed by 2829 * their complexity. On systems without swap this code should be 2830 * effectively equivalent, but much lighter weight. 2831 */ 2832 2833 #include <linux/ramfs.h> 2834 2835 static struct file_system_type shmem_fs_type = { 2836 .name = "tmpfs", 2837 .mount = ramfs_mount, 2838 .kill_sb = kill_litter_super, 2839 .fs_flags = FS_USERNS_MOUNT, 2840 }; 2841 2842 int __init shmem_init(void) 2843 { 2844 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2845 2846 shm_mnt = kern_mount(&shmem_fs_type); 2847 BUG_ON(IS_ERR(shm_mnt)); 2848 2849 return 0; 2850 } 2851 2852 int shmem_unuse(swp_entry_t swap, struct page *page) 2853 { 2854 return 0; 2855 } 2856 2857 int shmem_lock(struct file *file, int lock, struct user_struct *user) 2858 { 2859 return 0; 2860 } 2861 2862 void shmem_unlock_mapping(struct address_space *mapping) 2863 { 2864 } 2865 2866 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2867 { 2868 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2869 } 2870 EXPORT_SYMBOL_GPL(shmem_truncate_range); 2871 2872 #define shmem_vm_ops generic_file_vm_ops 2873 #define shmem_file_operations ramfs_file_operations 2874 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2875 #define shmem_acct_size(flags, size) 0 2876 #define shmem_unacct_size(flags, size) do {} while (0) 2877 2878 #endif /* CONFIG_SHMEM */ 2879 2880 /* common code */ 2881 2882 static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen) 2883 { 2884 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", 2885 dentry->d_name.name); 2886 } 2887 2888 static struct dentry_operations anon_ops = { 2889 .d_dname = shmem_dname 2890 }; 2891 2892 /** 2893 * shmem_file_setup - get an unlinked file living in tmpfs 2894 * @name: name for dentry (to be seen in /proc/<pid>/maps 2895 * @size: size to be set for the file 2896 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2897 */ 2898 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2899 { 2900 struct file *res; 2901 struct inode *inode; 2902 struct path path; 2903 struct super_block *sb; 2904 struct qstr this; 2905 2906 if (IS_ERR(shm_mnt)) 2907 return ERR_CAST(shm_mnt); 2908 2909 if (size < 0 || size > MAX_LFS_FILESIZE) 2910 return ERR_PTR(-EINVAL); 2911 2912 if (shmem_acct_size(flags, size)) 2913 return ERR_PTR(-ENOMEM); 2914 2915 res = ERR_PTR(-ENOMEM); 2916 this.name = name; 2917 this.len = strlen(name); 2918 this.hash = 0; /* will go */ 2919 sb = shm_mnt->mnt_sb; 2920 path.dentry = d_alloc_pseudo(sb, &this); 2921 if (!path.dentry) 2922 goto put_memory; 2923 d_set_d_op(path.dentry, &anon_ops); 2924 path.mnt = mntget(shm_mnt); 2925 2926 res = ERR_PTR(-ENOSPC); 2927 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2928 if (!inode) 2929 goto put_dentry; 2930 2931 d_instantiate(path.dentry, inode); 2932 inode->i_size = size; 2933 clear_nlink(inode); /* It is unlinked */ 2934 #ifndef CONFIG_MMU 2935 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 2936 if (IS_ERR(res)) 2937 goto put_dentry; 2938 #endif 2939 2940 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2941 &shmem_file_operations); 2942 if (IS_ERR(res)) 2943 goto put_dentry; 2944 2945 return res; 2946 2947 put_dentry: 2948 path_put(&path); 2949 put_memory: 2950 shmem_unacct_size(flags, size); 2951 return res; 2952 } 2953 EXPORT_SYMBOL_GPL(shmem_file_setup); 2954 2955 /** 2956 * shmem_zero_setup - setup a shared anonymous mapping 2957 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2958 */ 2959 int shmem_zero_setup(struct vm_area_struct *vma) 2960 { 2961 struct file *file; 2962 loff_t size = vma->vm_end - vma->vm_start; 2963 2964 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2965 if (IS_ERR(file)) 2966 return PTR_ERR(file); 2967 2968 if (vma->vm_file) 2969 fput(vma->vm_file); 2970 vma->vm_file = file; 2971 vma->vm_ops = &shmem_vm_ops; 2972 return 0; 2973 } 2974 2975 /** 2976 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2977 * @mapping: the page's address_space 2978 * @index: the page index 2979 * @gfp: the page allocator flags to use if allocating 2980 * 2981 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2982 * with any new page allocations done using the specified allocation flags. 2983 * But read_cache_page_gfp() uses the ->readpage() method: which does not 2984 * suit tmpfs, since it may have pages in swapcache, and needs to find those 2985 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 2986 * 2987 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 2988 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 2989 */ 2990 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 2991 pgoff_t index, gfp_t gfp) 2992 { 2993 #ifdef CONFIG_SHMEM 2994 struct inode *inode = mapping->host; 2995 struct page *page; 2996 int error; 2997 2998 BUG_ON(mapping->a_ops != &shmem_aops); 2999 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 3000 if (error) 3001 page = ERR_PTR(error); 3002 else 3003 unlock_page(page); 3004 return page; 3005 #else 3006 /* 3007 * The tiny !SHMEM case uses ramfs without swap 3008 */ 3009 return read_cache_page_gfp(mapping, index, gfp); 3010 #endif 3011 } 3012 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 3013