1 /* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 31 #include <linux/mm.h> 32 #include <linux/export.h> 33 #include <linux/swap.h> 34 #include <linux/aio.h> 35 36 static struct vfsmount *shm_mnt; 37 38 #ifdef CONFIG_SHMEM 39 /* 40 * This virtual memory filesystem is heavily based on the ramfs. It 41 * extends ramfs by the ability to use swap and honor resource limits 42 * which makes it a completely usable filesystem. 43 */ 44 45 #include <linux/xattr.h> 46 #include <linux/exportfs.h> 47 #include <linux/posix_acl.h> 48 #include <linux/generic_acl.h> 49 #include <linux/mman.h> 50 #include <linux/string.h> 51 #include <linux/slab.h> 52 #include <linux/backing-dev.h> 53 #include <linux/shmem_fs.h> 54 #include <linux/writeback.h> 55 #include <linux/blkdev.h> 56 #include <linux/pagevec.h> 57 #include <linux/percpu_counter.h> 58 #include <linux/falloc.h> 59 #include <linux/splice.h> 60 #include <linux/security.h> 61 #include <linux/swapops.h> 62 #include <linux/mempolicy.h> 63 #include <linux/namei.h> 64 #include <linux/ctype.h> 65 #include <linux/migrate.h> 66 #include <linux/highmem.h> 67 #include <linux/seq_file.h> 68 #include <linux/magic.h> 69 70 #include <asm/uaccess.h> 71 #include <asm/pgtable.h> 72 73 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 74 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 75 76 /* Pretend that each entry is of this size in directory's i_size */ 77 #define BOGO_DIRENT_SIZE 20 78 79 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 80 #define SHORT_SYMLINK_LEN 128 81 82 /* 83 * shmem_fallocate and shmem_writepage communicate via inode->i_private 84 * (with i_mutex making sure that it has only one user at a time): 85 * we would prefer not to enlarge the shmem inode just for that. 86 */ 87 struct shmem_falloc { 88 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 92 }; 93 94 /* Flag allocation requirements to shmem_getpage */ 95 enum sgp_type { 96 SGP_READ, /* don't exceed i_size, don't allocate page */ 97 SGP_CACHE, /* don't exceed i_size, may allocate page */ 98 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 99 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 100 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 101 }; 102 103 #ifdef CONFIG_TMPFS 104 static unsigned long shmem_default_max_blocks(void) 105 { 106 return totalram_pages / 2; 107 } 108 109 static unsigned long shmem_default_max_inodes(void) 110 { 111 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 112 } 113 #endif 114 115 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 116 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 117 struct shmem_inode_info *info, pgoff_t index); 118 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 119 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 120 121 static inline int shmem_getpage(struct inode *inode, pgoff_t index, 122 struct page **pagep, enum sgp_type sgp, int *fault_type) 123 { 124 return shmem_getpage_gfp(inode, index, pagep, sgp, 125 mapping_gfp_mask(inode->i_mapping), fault_type); 126 } 127 128 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 129 { 130 return sb->s_fs_info; 131 } 132 133 /* 134 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 135 * for shared memory and for shared anonymous (/dev/zero) mappings 136 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 137 * consistent with the pre-accounting of private mappings ... 138 */ 139 static inline int shmem_acct_size(unsigned long flags, loff_t size) 140 { 141 return (flags & VM_NORESERVE) ? 142 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 143 } 144 145 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 146 { 147 if (!(flags & VM_NORESERVE)) 148 vm_unacct_memory(VM_ACCT(size)); 149 } 150 151 /* 152 * ... whereas tmpfs objects are accounted incrementally as 153 * pages are allocated, in order to allow huge sparse files. 154 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 155 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 156 */ 157 static inline int shmem_acct_block(unsigned long flags) 158 { 159 return (flags & VM_NORESERVE) ? 160 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 161 } 162 163 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 164 { 165 if (flags & VM_NORESERVE) 166 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 167 } 168 169 static const struct super_operations shmem_ops; 170 static const struct address_space_operations shmem_aops; 171 static const struct file_operations shmem_file_operations; 172 static const struct inode_operations shmem_inode_operations; 173 static const struct inode_operations shmem_dir_inode_operations; 174 static const struct inode_operations shmem_special_inode_operations; 175 static const struct vm_operations_struct shmem_vm_ops; 176 177 static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 178 .ra_pages = 0, /* No readahead */ 179 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 180 }; 181 182 static LIST_HEAD(shmem_swaplist); 183 static DEFINE_MUTEX(shmem_swaplist_mutex); 184 185 static int shmem_reserve_inode(struct super_block *sb) 186 { 187 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 188 if (sbinfo->max_inodes) { 189 spin_lock(&sbinfo->stat_lock); 190 if (!sbinfo->free_inodes) { 191 spin_unlock(&sbinfo->stat_lock); 192 return -ENOSPC; 193 } 194 sbinfo->free_inodes--; 195 spin_unlock(&sbinfo->stat_lock); 196 } 197 return 0; 198 } 199 200 static void shmem_free_inode(struct super_block *sb) 201 { 202 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 203 if (sbinfo->max_inodes) { 204 spin_lock(&sbinfo->stat_lock); 205 sbinfo->free_inodes++; 206 spin_unlock(&sbinfo->stat_lock); 207 } 208 } 209 210 /** 211 * shmem_recalc_inode - recalculate the block usage of an inode 212 * @inode: inode to recalc 213 * 214 * We have to calculate the free blocks since the mm can drop 215 * undirtied hole pages behind our back. 216 * 217 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 218 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 219 * 220 * It has to be called with the spinlock held. 221 */ 222 static void shmem_recalc_inode(struct inode *inode) 223 { 224 struct shmem_inode_info *info = SHMEM_I(inode); 225 long freed; 226 227 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 228 if (freed > 0) { 229 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 230 if (sbinfo->max_blocks) 231 percpu_counter_add(&sbinfo->used_blocks, -freed); 232 info->alloced -= freed; 233 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 234 shmem_unacct_blocks(info->flags, freed); 235 } 236 } 237 238 /* 239 * Replace item expected in radix tree by a new item, while holding tree lock. 240 */ 241 static int shmem_radix_tree_replace(struct address_space *mapping, 242 pgoff_t index, void *expected, void *replacement) 243 { 244 void **pslot; 245 void *item = NULL; 246 247 VM_BUG_ON(!expected); 248 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 249 if (pslot) 250 item = radix_tree_deref_slot_protected(pslot, 251 &mapping->tree_lock); 252 if (item != expected) 253 return -ENOENT; 254 if (replacement) 255 radix_tree_replace_slot(pslot, replacement); 256 else 257 radix_tree_delete(&mapping->page_tree, index); 258 return 0; 259 } 260 261 /* 262 * Sometimes, before we decide whether to proceed or to fail, we must check 263 * that an entry was not already brought back from swap by a racing thread. 264 * 265 * Checking page is not enough: by the time a SwapCache page is locked, it 266 * might be reused, and again be SwapCache, using the same swap as before. 267 */ 268 static bool shmem_confirm_swap(struct address_space *mapping, 269 pgoff_t index, swp_entry_t swap) 270 { 271 void *item; 272 273 rcu_read_lock(); 274 item = radix_tree_lookup(&mapping->page_tree, index); 275 rcu_read_unlock(); 276 return item == swp_to_radix_entry(swap); 277 } 278 279 /* 280 * Like add_to_page_cache_locked, but error if expected item has gone. 281 */ 282 static int shmem_add_to_page_cache(struct page *page, 283 struct address_space *mapping, 284 pgoff_t index, gfp_t gfp, void *expected) 285 { 286 int error; 287 288 VM_BUG_ON(!PageLocked(page)); 289 VM_BUG_ON(!PageSwapBacked(page)); 290 291 page_cache_get(page); 292 page->mapping = mapping; 293 page->index = index; 294 295 spin_lock_irq(&mapping->tree_lock); 296 if (!expected) 297 error = radix_tree_insert(&mapping->page_tree, index, page); 298 else 299 error = shmem_radix_tree_replace(mapping, index, expected, 300 page); 301 if (!error) { 302 mapping->nrpages++; 303 __inc_zone_page_state(page, NR_FILE_PAGES); 304 __inc_zone_page_state(page, NR_SHMEM); 305 spin_unlock_irq(&mapping->tree_lock); 306 } else { 307 page->mapping = NULL; 308 spin_unlock_irq(&mapping->tree_lock); 309 page_cache_release(page); 310 } 311 return error; 312 } 313 314 /* 315 * Like delete_from_page_cache, but substitutes swap for page. 316 */ 317 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 318 { 319 struct address_space *mapping = page->mapping; 320 int error; 321 322 spin_lock_irq(&mapping->tree_lock); 323 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 324 page->mapping = NULL; 325 mapping->nrpages--; 326 __dec_zone_page_state(page, NR_FILE_PAGES); 327 __dec_zone_page_state(page, NR_SHMEM); 328 spin_unlock_irq(&mapping->tree_lock); 329 page_cache_release(page); 330 BUG_ON(error); 331 } 332 333 /* 334 * Like find_get_pages, but collecting swap entries as well as pages. 335 */ 336 static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, 337 pgoff_t start, unsigned int nr_pages, 338 struct page **pages, pgoff_t *indices) 339 { 340 void **slot; 341 unsigned int ret = 0; 342 struct radix_tree_iter iter; 343 344 if (!nr_pages) 345 return 0; 346 347 rcu_read_lock(); 348 restart: 349 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 350 struct page *page; 351 repeat: 352 page = radix_tree_deref_slot(slot); 353 if (unlikely(!page)) 354 continue; 355 if (radix_tree_exception(page)) { 356 if (radix_tree_deref_retry(page)) 357 goto restart; 358 /* 359 * Otherwise, we must be storing a swap entry 360 * here as an exceptional entry: so return it 361 * without attempting to raise page count. 362 */ 363 goto export; 364 } 365 if (!page_cache_get_speculative(page)) 366 goto repeat; 367 368 /* Has the page moved? */ 369 if (unlikely(page != *slot)) { 370 page_cache_release(page); 371 goto repeat; 372 } 373 export: 374 indices[ret] = iter.index; 375 pages[ret] = page; 376 if (++ret == nr_pages) 377 break; 378 } 379 rcu_read_unlock(); 380 return ret; 381 } 382 383 /* 384 * Remove swap entry from radix tree, free the swap and its page cache. 385 */ 386 static int shmem_free_swap(struct address_space *mapping, 387 pgoff_t index, void *radswap) 388 { 389 int error; 390 391 spin_lock_irq(&mapping->tree_lock); 392 error = shmem_radix_tree_replace(mapping, index, radswap, NULL); 393 spin_unlock_irq(&mapping->tree_lock); 394 if (!error) 395 free_swap_and_cache(radix_to_swp_entry(radswap)); 396 return error; 397 } 398 399 /* 400 * Pagevec may contain swap entries, so shuffle up pages before releasing. 401 */ 402 static void shmem_deswap_pagevec(struct pagevec *pvec) 403 { 404 int i, j; 405 406 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 407 struct page *page = pvec->pages[i]; 408 if (!radix_tree_exceptional_entry(page)) 409 pvec->pages[j++] = page; 410 } 411 pvec->nr = j; 412 } 413 414 /* 415 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 416 */ 417 void shmem_unlock_mapping(struct address_space *mapping) 418 { 419 struct pagevec pvec; 420 pgoff_t indices[PAGEVEC_SIZE]; 421 pgoff_t index = 0; 422 423 pagevec_init(&pvec, 0); 424 /* 425 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 426 */ 427 while (!mapping_unevictable(mapping)) { 428 /* 429 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 430 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 431 */ 432 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 433 PAGEVEC_SIZE, pvec.pages, indices); 434 if (!pvec.nr) 435 break; 436 index = indices[pvec.nr - 1] + 1; 437 shmem_deswap_pagevec(&pvec); 438 check_move_unevictable_pages(pvec.pages, pvec.nr); 439 pagevec_release(&pvec); 440 cond_resched(); 441 } 442 } 443 444 /* 445 * Remove range of pages and swap entries from radix tree, and free them. 446 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 447 */ 448 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 449 bool unfalloc) 450 { 451 struct address_space *mapping = inode->i_mapping; 452 struct shmem_inode_info *info = SHMEM_I(inode); 453 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 454 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 455 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 456 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 457 struct pagevec pvec; 458 pgoff_t indices[PAGEVEC_SIZE]; 459 long nr_swaps_freed = 0; 460 pgoff_t index; 461 int i; 462 463 if (lend == -1) 464 end = -1; /* unsigned, so actually very big */ 465 466 pagevec_init(&pvec, 0); 467 index = start; 468 while (index < end) { 469 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 470 min(end - index, (pgoff_t)PAGEVEC_SIZE), 471 pvec.pages, indices); 472 if (!pvec.nr) 473 break; 474 mem_cgroup_uncharge_start(); 475 for (i = 0; i < pagevec_count(&pvec); i++) { 476 struct page *page = pvec.pages[i]; 477 478 index = indices[i]; 479 if (index >= end) 480 break; 481 482 if (radix_tree_exceptional_entry(page)) { 483 if (unfalloc) 484 continue; 485 nr_swaps_freed += !shmem_free_swap(mapping, 486 index, page); 487 continue; 488 } 489 490 if (!trylock_page(page)) 491 continue; 492 if (!unfalloc || !PageUptodate(page)) { 493 if (page->mapping == mapping) { 494 VM_BUG_ON(PageWriteback(page)); 495 truncate_inode_page(mapping, page); 496 } 497 } 498 unlock_page(page); 499 } 500 shmem_deswap_pagevec(&pvec); 501 pagevec_release(&pvec); 502 mem_cgroup_uncharge_end(); 503 cond_resched(); 504 index++; 505 } 506 507 if (partial_start) { 508 struct page *page = NULL; 509 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 510 if (page) { 511 unsigned int top = PAGE_CACHE_SIZE; 512 if (start > end) { 513 top = partial_end; 514 partial_end = 0; 515 } 516 zero_user_segment(page, partial_start, top); 517 set_page_dirty(page); 518 unlock_page(page); 519 page_cache_release(page); 520 } 521 } 522 if (partial_end) { 523 struct page *page = NULL; 524 shmem_getpage(inode, end, &page, SGP_READ, NULL); 525 if (page) { 526 zero_user_segment(page, 0, partial_end); 527 set_page_dirty(page); 528 unlock_page(page); 529 page_cache_release(page); 530 } 531 } 532 if (start >= end) 533 return; 534 535 index = start; 536 for ( ; ; ) { 537 cond_resched(); 538 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 539 min(end - index, (pgoff_t)PAGEVEC_SIZE), 540 pvec.pages, indices); 541 if (!pvec.nr) { 542 if (index == start || unfalloc) 543 break; 544 index = start; 545 continue; 546 } 547 if ((index == start || unfalloc) && indices[0] >= end) { 548 shmem_deswap_pagevec(&pvec); 549 pagevec_release(&pvec); 550 break; 551 } 552 mem_cgroup_uncharge_start(); 553 for (i = 0; i < pagevec_count(&pvec); i++) { 554 struct page *page = pvec.pages[i]; 555 556 index = indices[i]; 557 if (index >= end) 558 break; 559 560 if (radix_tree_exceptional_entry(page)) { 561 if (unfalloc) 562 continue; 563 nr_swaps_freed += !shmem_free_swap(mapping, 564 index, page); 565 continue; 566 } 567 568 lock_page(page); 569 if (!unfalloc || !PageUptodate(page)) { 570 if (page->mapping == mapping) { 571 VM_BUG_ON(PageWriteback(page)); 572 truncate_inode_page(mapping, page); 573 } 574 } 575 unlock_page(page); 576 } 577 shmem_deswap_pagevec(&pvec); 578 pagevec_release(&pvec); 579 mem_cgroup_uncharge_end(); 580 index++; 581 } 582 583 spin_lock(&info->lock); 584 info->swapped -= nr_swaps_freed; 585 shmem_recalc_inode(inode); 586 spin_unlock(&info->lock); 587 } 588 589 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 590 { 591 shmem_undo_range(inode, lstart, lend, false); 592 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 593 } 594 EXPORT_SYMBOL_GPL(shmem_truncate_range); 595 596 static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 597 { 598 struct inode *inode = dentry->d_inode; 599 int error; 600 601 error = inode_change_ok(inode, attr); 602 if (error) 603 return error; 604 605 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 606 loff_t oldsize = inode->i_size; 607 loff_t newsize = attr->ia_size; 608 609 if (newsize != oldsize) { 610 i_size_write(inode, newsize); 611 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 612 } 613 if (newsize < oldsize) { 614 loff_t holebegin = round_up(newsize, PAGE_SIZE); 615 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 616 shmem_truncate_range(inode, newsize, (loff_t)-1); 617 /* unmap again to remove racily COWed private pages */ 618 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 619 } 620 } 621 622 setattr_copy(inode, attr); 623 #ifdef CONFIG_TMPFS_POSIX_ACL 624 if (attr->ia_valid & ATTR_MODE) 625 error = generic_acl_chmod(inode); 626 #endif 627 return error; 628 } 629 630 static void shmem_evict_inode(struct inode *inode) 631 { 632 struct shmem_inode_info *info = SHMEM_I(inode); 633 634 if (inode->i_mapping->a_ops == &shmem_aops) { 635 shmem_unacct_size(info->flags, inode->i_size); 636 inode->i_size = 0; 637 shmem_truncate_range(inode, 0, (loff_t)-1); 638 if (!list_empty(&info->swaplist)) { 639 mutex_lock(&shmem_swaplist_mutex); 640 list_del_init(&info->swaplist); 641 mutex_unlock(&shmem_swaplist_mutex); 642 } 643 } else 644 kfree(info->symlink); 645 646 simple_xattrs_free(&info->xattrs); 647 WARN_ON(inode->i_blocks); 648 shmem_free_inode(inode->i_sb); 649 clear_inode(inode); 650 } 651 652 /* 653 * If swap found in inode, free it and move page from swapcache to filecache. 654 */ 655 static int shmem_unuse_inode(struct shmem_inode_info *info, 656 swp_entry_t swap, struct page **pagep) 657 { 658 struct address_space *mapping = info->vfs_inode.i_mapping; 659 void *radswap; 660 pgoff_t index; 661 gfp_t gfp; 662 int error = 0; 663 664 radswap = swp_to_radix_entry(swap); 665 index = radix_tree_locate_item(&mapping->page_tree, radswap); 666 if (index == -1) 667 return 0; 668 669 /* 670 * Move _head_ to start search for next from here. 671 * But be careful: shmem_evict_inode checks list_empty without taking 672 * mutex, and there's an instant in list_move_tail when info->swaplist 673 * would appear empty, if it were the only one on shmem_swaplist. 674 */ 675 if (shmem_swaplist.next != &info->swaplist) 676 list_move_tail(&shmem_swaplist, &info->swaplist); 677 678 gfp = mapping_gfp_mask(mapping); 679 if (shmem_should_replace_page(*pagep, gfp)) { 680 mutex_unlock(&shmem_swaplist_mutex); 681 error = shmem_replace_page(pagep, gfp, info, index); 682 mutex_lock(&shmem_swaplist_mutex); 683 /* 684 * We needed to drop mutex to make that restrictive page 685 * allocation, but the inode might have been freed while we 686 * dropped it: although a racing shmem_evict_inode() cannot 687 * complete without emptying the radix_tree, our page lock 688 * on this swapcache page is not enough to prevent that - 689 * free_swap_and_cache() of our swap entry will only 690 * trylock_page(), removing swap from radix_tree whatever. 691 * 692 * We must not proceed to shmem_add_to_page_cache() if the 693 * inode has been freed, but of course we cannot rely on 694 * inode or mapping or info to check that. However, we can 695 * safely check if our swap entry is still in use (and here 696 * it can't have got reused for another page): if it's still 697 * in use, then the inode cannot have been freed yet, and we 698 * can safely proceed (if it's no longer in use, that tells 699 * nothing about the inode, but we don't need to unuse swap). 700 */ 701 if (!page_swapcount(*pagep)) 702 error = -ENOENT; 703 } 704 705 /* 706 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 707 * but also to hold up shmem_evict_inode(): so inode cannot be freed 708 * beneath us (pagelock doesn't help until the page is in pagecache). 709 */ 710 if (!error) 711 error = shmem_add_to_page_cache(*pagep, mapping, index, 712 GFP_NOWAIT, radswap); 713 if (error != -ENOMEM) { 714 /* 715 * Truncation and eviction use free_swap_and_cache(), which 716 * only does trylock page: if we raced, best clean up here. 717 */ 718 delete_from_swap_cache(*pagep); 719 set_page_dirty(*pagep); 720 if (!error) { 721 spin_lock(&info->lock); 722 info->swapped--; 723 spin_unlock(&info->lock); 724 swap_free(swap); 725 } 726 error = 1; /* not an error, but entry was found */ 727 } 728 return error; 729 } 730 731 /* 732 * Search through swapped inodes to find and replace swap by page. 733 */ 734 int shmem_unuse(swp_entry_t swap, struct page *page) 735 { 736 struct list_head *this, *next; 737 struct shmem_inode_info *info; 738 int found = 0; 739 int error = 0; 740 741 /* 742 * There's a faint possibility that swap page was replaced before 743 * caller locked it: caller will come back later with the right page. 744 */ 745 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 746 goto out; 747 748 /* 749 * Charge page using GFP_KERNEL while we can wait, before taking 750 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 751 * Charged back to the user (not to caller) when swap account is used. 752 */ 753 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 754 if (error) 755 goto out; 756 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 757 758 mutex_lock(&shmem_swaplist_mutex); 759 list_for_each_safe(this, next, &shmem_swaplist) { 760 info = list_entry(this, struct shmem_inode_info, swaplist); 761 if (info->swapped) 762 found = shmem_unuse_inode(info, swap, &page); 763 else 764 list_del_init(&info->swaplist); 765 cond_resched(); 766 if (found) 767 break; 768 } 769 mutex_unlock(&shmem_swaplist_mutex); 770 771 if (found < 0) 772 error = found; 773 out: 774 unlock_page(page); 775 page_cache_release(page); 776 return error; 777 } 778 779 /* 780 * Move the page from the page cache to the swap cache. 781 */ 782 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 783 { 784 struct shmem_inode_info *info; 785 struct address_space *mapping; 786 struct inode *inode; 787 swp_entry_t swap; 788 pgoff_t index; 789 790 BUG_ON(!PageLocked(page)); 791 mapping = page->mapping; 792 index = page->index; 793 inode = mapping->host; 794 info = SHMEM_I(inode); 795 if (info->flags & VM_LOCKED) 796 goto redirty; 797 if (!total_swap_pages) 798 goto redirty; 799 800 /* 801 * shmem_backing_dev_info's capabilities prevent regular writeback or 802 * sync from ever calling shmem_writepage; but a stacking filesystem 803 * might use ->writepage of its underlying filesystem, in which case 804 * tmpfs should write out to swap only in response to memory pressure, 805 * and not for the writeback threads or sync. 806 */ 807 if (!wbc->for_reclaim) { 808 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 809 goto redirty; 810 } 811 812 /* 813 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 814 * value into swapfile.c, the only way we can correctly account for a 815 * fallocated page arriving here is now to initialize it and write it. 816 * 817 * That's okay for a page already fallocated earlier, but if we have 818 * not yet completed the fallocation, then (a) we want to keep track 819 * of this page in case we have to undo it, and (b) it may not be a 820 * good idea to continue anyway, once we're pushing into swap. So 821 * reactivate the page, and let shmem_fallocate() quit when too many. 822 */ 823 if (!PageUptodate(page)) { 824 if (inode->i_private) { 825 struct shmem_falloc *shmem_falloc; 826 spin_lock(&inode->i_lock); 827 shmem_falloc = inode->i_private; 828 if (shmem_falloc && 829 index >= shmem_falloc->start && 830 index < shmem_falloc->next) 831 shmem_falloc->nr_unswapped++; 832 else 833 shmem_falloc = NULL; 834 spin_unlock(&inode->i_lock); 835 if (shmem_falloc) 836 goto redirty; 837 } 838 clear_highpage(page); 839 flush_dcache_page(page); 840 SetPageUptodate(page); 841 } 842 843 swap = get_swap_page(); 844 if (!swap.val) 845 goto redirty; 846 847 /* 848 * Add inode to shmem_unuse()'s list of swapped-out inodes, 849 * if it's not already there. Do it now before the page is 850 * moved to swap cache, when its pagelock no longer protects 851 * the inode from eviction. But don't unlock the mutex until 852 * we've incremented swapped, because shmem_unuse_inode() will 853 * prune a !swapped inode from the swaplist under this mutex. 854 */ 855 mutex_lock(&shmem_swaplist_mutex); 856 if (list_empty(&info->swaplist)) 857 list_add_tail(&info->swaplist, &shmem_swaplist); 858 859 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 860 swap_shmem_alloc(swap); 861 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 862 863 spin_lock(&info->lock); 864 info->swapped++; 865 shmem_recalc_inode(inode); 866 spin_unlock(&info->lock); 867 868 mutex_unlock(&shmem_swaplist_mutex); 869 BUG_ON(page_mapped(page)); 870 swap_writepage(page, wbc); 871 return 0; 872 } 873 874 mutex_unlock(&shmem_swaplist_mutex); 875 swapcache_free(swap, NULL); 876 redirty: 877 set_page_dirty(page); 878 if (wbc->for_reclaim) 879 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 880 unlock_page(page); 881 return 0; 882 } 883 884 #ifdef CONFIG_NUMA 885 #ifdef CONFIG_TMPFS 886 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 887 { 888 char buffer[64]; 889 890 if (!mpol || mpol->mode == MPOL_DEFAULT) 891 return; /* show nothing */ 892 893 mpol_to_str(buffer, sizeof(buffer), mpol); 894 895 seq_printf(seq, ",mpol=%s", buffer); 896 } 897 898 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 899 { 900 struct mempolicy *mpol = NULL; 901 if (sbinfo->mpol) { 902 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 903 mpol = sbinfo->mpol; 904 mpol_get(mpol); 905 spin_unlock(&sbinfo->stat_lock); 906 } 907 return mpol; 908 } 909 #endif /* CONFIG_TMPFS */ 910 911 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 912 struct shmem_inode_info *info, pgoff_t index) 913 { 914 struct vm_area_struct pvma; 915 struct page *page; 916 917 /* Create a pseudo vma that just contains the policy */ 918 pvma.vm_start = 0; 919 /* Bias interleave by inode number to distribute better across nodes */ 920 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 921 pvma.vm_ops = NULL; 922 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 923 924 page = swapin_readahead(swap, gfp, &pvma, 0); 925 926 /* Drop reference taken by mpol_shared_policy_lookup() */ 927 mpol_cond_put(pvma.vm_policy); 928 929 return page; 930 } 931 932 static struct page *shmem_alloc_page(gfp_t gfp, 933 struct shmem_inode_info *info, pgoff_t index) 934 { 935 struct vm_area_struct pvma; 936 struct page *page; 937 938 /* Create a pseudo vma that just contains the policy */ 939 pvma.vm_start = 0; 940 /* Bias interleave by inode number to distribute better across nodes */ 941 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 942 pvma.vm_ops = NULL; 943 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 944 945 page = alloc_page_vma(gfp, &pvma, 0); 946 947 /* Drop reference taken by mpol_shared_policy_lookup() */ 948 mpol_cond_put(pvma.vm_policy); 949 950 return page; 951 } 952 #else /* !CONFIG_NUMA */ 953 #ifdef CONFIG_TMPFS 954 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 955 { 956 } 957 #endif /* CONFIG_TMPFS */ 958 959 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 960 struct shmem_inode_info *info, pgoff_t index) 961 { 962 return swapin_readahead(swap, gfp, NULL, 0); 963 } 964 965 static inline struct page *shmem_alloc_page(gfp_t gfp, 966 struct shmem_inode_info *info, pgoff_t index) 967 { 968 return alloc_page(gfp); 969 } 970 #endif /* CONFIG_NUMA */ 971 972 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 973 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 974 { 975 return NULL; 976 } 977 #endif 978 979 /* 980 * When a page is moved from swapcache to shmem filecache (either by the 981 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 982 * shmem_unuse_inode()), it may have been read in earlier from swap, in 983 * ignorance of the mapping it belongs to. If that mapping has special 984 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 985 * we may need to copy to a suitable page before moving to filecache. 986 * 987 * In a future release, this may well be extended to respect cpuset and 988 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 989 * but for now it is a simple matter of zone. 990 */ 991 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 992 { 993 return page_zonenum(page) > gfp_zone(gfp); 994 } 995 996 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 997 struct shmem_inode_info *info, pgoff_t index) 998 { 999 struct page *oldpage, *newpage; 1000 struct address_space *swap_mapping; 1001 pgoff_t swap_index; 1002 int error; 1003 1004 oldpage = *pagep; 1005 swap_index = page_private(oldpage); 1006 swap_mapping = page_mapping(oldpage); 1007 1008 /* 1009 * We have arrived here because our zones are constrained, so don't 1010 * limit chance of success by further cpuset and node constraints. 1011 */ 1012 gfp &= ~GFP_CONSTRAINT_MASK; 1013 newpage = shmem_alloc_page(gfp, info, index); 1014 if (!newpage) 1015 return -ENOMEM; 1016 1017 page_cache_get(newpage); 1018 copy_highpage(newpage, oldpage); 1019 flush_dcache_page(newpage); 1020 1021 __set_page_locked(newpage); 1022 SetPageUptodate(newpage); 1023 SetPageSwapBacked(newpage); 1024 set_page_private(newpage, swap_index); 1025 SetPageSwapCache(newpage); 1026 1027 /* 1028 * Our caller will very soon move newpage out of swapcache, but it's 1029 * a nice clean interface for us to replace oldpage by newpage there. 1030 */ 1031 spin_lock_irq(&swap_mapping->tree_lock); 1032 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1033 newpage); 1034 if (!error) { 1035 __inc_zone_page_state(newpage, NR_FILE_PAGES); 1036 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1037 } 1038 spin_unlock_irq(&swap_mapping->tree_lock); 1039 1040 if (unlikely(error)) { 1041 /* 1042 * Is this possible? I think not, now that our callers check 1043 * both PageSwapCache and page_private after getting page lock; 1044 * but be defensive. Reverse old to newpage for clear and free. 1045 */ 1046 oldpage = newpage; 1047 } else { 1048 mem_cgroup_replace_page_cache(oldpage, newpage); 1049 lru_cache_add_anon(newpage); 1050 *pagep = newpage; 1051 } 1052 1053 ClearPageSwapCache(oldpage); 1054 set_page_private(oldpage, 0); 1055 1056 unlock_page(oldpage); 1057 page_cache_release(oldpage); 1058 page_cache_release(oldpage); 1059 return error; 1060 } 1061 1062 /* 1063 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1064 * 1065 * If we allocate a new one we do not mark it dirty. That's up to the 1066 * vm. If we swap it in we mark it dirty since we also free the swap 1067 * entry since a page cannot live in both the swap and page cache 1068 */ 1069 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1070 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1071 { 1072 struct address_space *mapping = inode->i_mapping; 1073 struct shmem_inode_info *info; 1074 struct shmem_sb_info *sbinfo; 1075 struct page *page; 1076 swp_entry_t swap; 1077 int error; 1078 int once = 0; 1079 int alloced = 0; 1080 1081 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1082 return -EFBIG; 1083 repeat: 1084 swap.val = 0; 1085 page = find_lock_page(mapping, index); 1086 if (radix_tree_exceptional_entry(page)) { 1087 swap = radix_to_swp_entry(page); 1088 page = NULL; 1089 } 1090 1091 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1092 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1093 error = -EINVAL; 1094 goto failed; 1095 } 1096 1097 /* fallocated page? */ 1098 if (page && !PageUptodate(page)) { 1099 if (sgp != SGP_READ) 1100 goto clear; 1101 unlock_page(page); 1102 page_cache_release(page); 1103 page = NULL; 1104 } 1105 if (page || (sgp == SGP_READ && !swap.val)) { 1106 *pagep = page; 1107 return 0; 1108 } 1109 1110 /* 1111 * Fast cache lookup did not find it: 1112 * bring it back from swap or allocate. 1113 */ 1114 info = SHMEM_I(inode); 1115 sbinfo = SHMEM_SB(inode->i_sb); 1116 1117 if (swap.val) { 1118 /* Look it up and read it in.. */ 1119 page = lookup_swap_cache(swap); 1120 if (!page) { 1121 /* here we actually do the io */ 1122 if (fault_type) 1123 *fault_type |= VM_FAULT_MAJOR; 1124 page = shmem_swapin(swap, gfp, info, index); 1125 if (!page) { 1126 error = -ENOMEM; 1127 goto failed; 1128 } 1129 } 1130 1131 /* We have to do this with page locked to prevent races */ 1132 lock_page(page); 1133 if (!PageSwapCache(page) || page_private(page) != swap.val || 1134 !shmem_confirm_swap(mapping, index, swap)) { 1135 error = -EEXIST; /* try again */ 1136 goto unlock; 1137 } 1138 if (!PageUptodate(page)) { 1139 error = -EIO; 1140 goto failed; 1141 } 1142 wait_on_page_writeback(page); 1143 1144 if (shmem_should_replace_page(page, gfp)) { 1145 error = shmem_replace_page(&page, gfp, info, index); 1146 if (error) 1147 goto failed; 1148 } 1149 1150 error = mem_cgroup_cache_charge(page, current->mm, 1151 gfp & GFP_RECLAIM_MASK); 1152 if (!error) { 1153 error = shmem_add_to_page_cache(page, mapping, index, 1154 gfp, swp_to_radix_entry(swap)); 1155 /* 1156 * We already confirmed swap under page lock, and make 1157 * no memory allocation here, so usually no possibility 1158 * of error; but free_swap_and_cache() only trylocks a 1159 * page, so it is just possible that the entry has been 1160 * truncated or holepunched since swap was confirmed. 1161 * shmem_undo_range() will have done some of the 1162 * unaccounting, now delete_from_swap_cache() will do 1163 * the rest (including mem_cgroup_uncharge_swapcache). 1164 * Reset swap.val? No, leave it so "failed" goes back to 1165 * "repeat": reading a hole and writing should succeed. 1166 */ 1167 if (error) 1168 delete_from_swap_cache(page); 1169 } 1170 if (error) 1171 goto failed; 1172 1173 spin_lock(&info->lock); 1174 info->swapped--; 1175 shmem_recalc_inode(inode); 1176 spin_unlock(&info->lock); 1177 1178 delete_from_swap_cache(page); 1179 set_page_dirty(page); 1180 swap_free(swap); 1181 1182 } else { 1183 if (shmem_acct_block(info->flags)) { 1184 error = -ENOSPC; 1185 goto failed; 1186 } 1187 if (sbinfo->max_blocks) { 1188 if (percpu_counter_compare(&sbinfo->used_blocks, 1189 sbinfo->max_blocks) >= 0) { 1190 error = -ENOSPC; 1191 goto unacct; 1192 } 1193 percpu_counter_inc(&sbinfo->used_blocks); 1194 } 1195 1196 page = shmem_alloc_page(gfp, info, index); 1197 if (!page) { 1198 error = -ENOMEM; 1199 goto decused; 1200 } 1201 1202 SetPageSwapBacked(page); 1203 __set_page_locked(page); 1204 error = mem_cgroup_cache_charge(page, current->mm, 1205 gfp & GFP_RECLAIM_MASK); 1206 if (error) 1207 goto decused; 1208 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); 1209 if (!error) { 1210 error = shmem_add_to_page_cache(page, mapping, index, 1211 gfp, NULL); 1212 radix_tree_preload_end(); 1213 } 1214 if (error) { 1215 mem_cgroup_uncharge_cache_page(page); 1216 goto decused; 1217 } 1218 lru_cache_add_anon(page); 1219 1220 spin_lock(&info->lock); 1221 info->alloced++; 1222 inode->i_blocks += BLOCKS_PER_PAGE; 1223 shmem_recalc_inode(inode); 1224 spin_unlock(&info->lock); 1225 alloced = true; 1226 1227 /* 1228 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1229 */ 1230 if (sgp == SGP_FALLOC) 1231 sgp = SGP_WRITE; 1232 clear: 1233 /* 1234 * Let SGP_WRITE caller clear ends if write does not fill page; 1235 * but SGP_FALLOC on a page fallocated earlier must initialize 1236 * it now, lest undo on failure cancel our earlier guarantee. 1237 */ 1238 if (sgp != SGP_WRITE) { 1239 clear_highpage(page); 1240 flush_dcache_page(page); 1241 SetPageUptodate(page); 1242 } 1243 if (sgp == SGP_DIRTY) 1244 set_page_dirty(page); 1245 } 1246 1247 /* Perhaps the file has been truncated since we checked */ 1248 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1249 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1250 error = -EINVAL; 1251 if (alloced) 1252 goto trunc; 1253 else 1254 goto failed; 1255 } 1256 *pagep = page; 1257 return 0; 1258 1259 /* 1260 * Error recovery. 1261 */ 1262 trunc: 1263 info = SHMEM_I(inode); 1264 ClearPageDirty(page); 1265 delete_from_page_cache(page); 1266 spin_lock(&info->lock); 1267 info->alloced--; 1268 inode->i_blocks -= BLOCKS_PER_PAGE; 1269 spin_unlock(&info->lock); 1270 decused: 1271 sbinfo = SHMEM_SB(inode->i_sb); 1272 if (sbinfo->max_blocks) 1273 percpu_counter_add(&sbinfo->used_blocks, -1); 1274 unacct: 1275 shmem_unacct_blocks(info->flags, 1); 1276 failed: 1277 if (swap.val && error != -EINVAL && 1278 !shmem_confirm_swap(mapping, index, swap)) 1279 error = -EEXIST; 1280 unlock: 1281 if (page) { 1282 unlock_page(page); 1283 page_cache_release(page); 1284 } 1285 if (error == -ENOSPC && !once++) { 1286 info = SHMEM_I(inode); 1287 spin_lock(&info->lock); 1288 shmem_recalc_inode(inode); 1289 spin_unlock(&info->lock); 1290 goto repeat; 1291 } 1292 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1293 goto repeat; 1294 return error; 1295 } 1296 1297 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1298 { 1299 struct inode *inode = file_inode(vma->vm_file); 1300 int error; 1301 int ret = VM_FAULT_LOCKED; 1302 1303 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1304 if (error) 1305 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1306 1307 if (ret & VM_FAULT_MAJOR) { 1308 count_vm_event(PGMAJFAULT); 1309 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1310 } 1311 return ret; 1312 } 1313 1314 #ifdef CONFIG_NUMA 1315 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1316 { 1317 struct inode *inode = file_inode(vma->vm_file); 1318 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1319 } 1320 1321 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1322 unsigned long addr) 1323 { 1324 struct inode *inode = file_inode(vma->vm_file); 1325 pgoff_t index; 1326 1327 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1328 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1329 } 1330 #endif 1331 1332 int shmem_lock(struct file *file, int lock, struct user_struct *user) 1333 { 1334 struct inode *inode = file_inode(file); 1335 struct shmem_inode_info *info = SHMEM_I(inode); 1336 int retval = -ENOMEM; 1337 1338 spin_lock(&info->lock); 1339 if (lock && !(info->flags & VM_LOCKED)) { 1340 if (!user_shm_lock(inode->i_size, user)) 1341 goto out_nomem; 1342 info->flags |= VM_LOCKED; 1343 mapping_set_unevictable(file->f_mapping); 1344 } 1345 if (!lock && (info->flags & VM_LOCKED) && user) { 1346 user_shm_unlock(inode->i_size, user); 1347 info->flags &= ~VM_LOCKED; 1348 mapping_clear_unevictable(file->f_mapping); 1349 } 1350 retval = 0; 1351 1352 out_nomem: 1353 spin_unlock(&info->lock); 1354 return retval; 1355 } 1356 1357 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1358 { 1359 file_accessed(file); 1360 vma->vm_ops = &shmem_vm_ops; 1361 return 0; 1362 } 1363 1364 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1365 umode_t mode, dev_t dev, unsigned long flags) 1366 { 1367 struct inode *inode; 1368 struct shmem_inode_info *info; 1369 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1370 1371 if (shmem_reserve_inode(sb)) 1372 return NULL; 1373 1374 inode = new_inode(sb); 1375 if (inode) { 1376 inode->i_ino = get_next_ino(); 1377 inode_init_owner(inode, dir, mode); 1378 inode->i_blocks = 0; 1379 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1380 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1381 inode->i_generation = get_seconds(); 1382 info = SHMEM_I(inode); 1383 memset(info, 0, (char *)inode - (char *)info); 1384 spin_lock_init(&info->lock); 1385 info->flags = flags & VM_NORESERVE; 1386 INIT_LIST_HEAD(&info->swaplist); 1387 simple_xattrs_init(&info->xattrs); 1388 cache_no_acl(inode); 1389 1390 switch (mode & S_IFMT) { 1391 default: 1392 inode->i_op = &shmem_special_inode_operations; 1393 init_special_inode(inode, mode, dev); 1394 break; 1395 case S_IFREG: 1396 inode->i_mapping->a_ops = &shmem_aops; 1397 inode->i_op = &shmem_inode_operations; 1398 inode->i_fop = &shmem_file_operations; 1399 mpol_shared_policy_init(&info->policy, 1400 shmem_get_sbmpol(sbinfo)); 1401 break; 1402 case S_IFDIR: 1403 inc_nlink(inode); 1404 /* Some things misbehave if size == 0 on a directory */ 1405 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1406 inode->i_op = &shmem_dir_inode_operations; 1407 inode->i_fop = &simple_dir_operations; 1408 break; 1409 case S_IFLNK: 1410 /* 1411 * Must not load anything in the rbtree, 1412 * mpol_free_shared_policy will not be called. 1413 */ 1414 mpol_shared_policy_init(&info->policy, NULL); 1415 break; 1416 } 1417 } else 1418 shmem_free_inode(sb); 1419 return inode; 1420 } 1421 1422 #ifdef CONFIG_TMPFS 1423 static const struct inode_operations shmem_symlink_inode_operations; 1424 static const struct inode_operations shmem_short_symlink_operations; 1425 1426 #ifdef CONFIG_TMPFS_XATTR 1427 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1428 #else 1429 #define shmem_initxattrs NULL 1430 #endif 1431 1432 static int 1433 shmem_write_begin(struct file *file, struct address_space *mapping, 1434 loff_t pos, unsigned len, unsigned flags, 1435 struct page **pagep, void **fsdata) 1436 { 1437 struct inode *inode = mapping->host; 1438 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1439 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1440 } 1441 1442 static int 1443 shmem_write_end(struct file *file, struct address_space *mapping, 1444 loff_t pos, unsigned len, unsigned copied, 1445 struct page *page, void *fsdata) 1446 { 1447 struct inode *inode = mapping->host; 1448 1449 if (pos + copied > inode->i_size) 1450 i_size_write(inode, pos + copied); 1451 1452 if (!PageUptodate(page)) { 1453 if (copied < PAGE_CACHE_SIZE) { 1454 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1455 zero_user_segments(page, 0, from, 1456 from + copied, PAGE_CACHE_SIZE); 1457 } 1458 SetPageUptodate(page); 1459 } 1460 set_page_dirty(page); 1461 unlock_page(page); 1462 page_cache_release(page); 1463 1464 return copied; 1465 } 1466 1467 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1468 { 1469 struct inode *inode = file_inode(filp); 1470 struct address_space *mapping = inode->i_mapping; 1471 pgoff_t index; 1472 unsigned long offset; 1473 enum sgp_type sgp = SGP_READ; 1474 1475 /* 1476 * Might this read be for a stacking filesystem? Then when reading 1477 * holes of a sparse file, we actually need to allocate those pages, 1478 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1479 */ 1480 if (segment_eq(get_fs(), KERNEL_DS)) 1481 sgp = SGP_DIRTY; 1482 1483 index = *ppos >> PAGE_CACHE_SHIFT; 1484 offset = *ppos & ~PAGE_CACHE_MASK; 1485 1486 for (;;) { 1487 struct page *page = NULL; 1488 pgoff_t end_index; 1489 unsigned long nr, ret; 1490 loff_t i_size = i_size_read(inode); 1491 1492 end_index = i_size >> PAGE_CACHE_SHIFT; 1493 if (index > end_index) 1494 break; 1495 if (index == end_index) { 1496 nr = i_size & ~PAGE_CACHE_MASK; 1497 if (nr <= offset) 1498 break; 1499 } 1500 1501 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1502 if (desc->error) { 1503 if (desc->error == -EINVAL) 1504 desc->error = 0; 1505 break; 1506 } 1507 if (page) 1508 unlock_page(page); 1509 1510 /* 1511 * We must evaluate after, since reads (unlike writes) 1512 * are called without i_mutex protection against truncate 1513 */ 1514 nr = PAGE_CACHE_SIZE; 1515 i_size = i_size_read(inode); 1516 end_index = i_size >> PAGE_CACHE_SHIFT; 1517 if (index == end_index) { 1518 nr = i_size & ~PAGE_CACHE_MASK; 1519 if (nr <= offset) { 1520 if (page) 1521 page_cache_release(page); 1522 break; 1523 } 1524 } 1525 nr -= offset; 1526 1527 if (page) { 1528 /* 1529 * If users can be writing to this page using arbitrary 1530 * virtual addresses, take care about potential aliasing 1531 * before reading the page on the kernel side. 1532 */ 1533 if (mapping_writably_mapped(mapping)) 1534 flush_dcache_page(page); 1535 /* 1536 * Mark the page accessed if we read the beginning. 1537 */ 1538 if (!offset) 1539 mark_page_accessed(page); 1540 } else { 1541 page = ZERO_PAGE(0); 1542 page_cache_get(page); 1543 } 1544 1545 /* 1546 * Ok, we have the page, and it's up-to-date, so 1547 * now we can copy it to user space... 1548 * 1549 * The actor routine returns how many bytes were actually used.. 1550 * NOTE! This may not be the same as how much of a user buffer 1551 * we filled up (we may be padding etc), so we can only update 1552 * "pos" here (the actor routine has to update the user buffer 1553 * pointers and the remaining count). 1554 */ 1555 ret = actor(desc, page, offset, nr); 1556 offset += ret; 1557 index += offset >> PAGE_CACHE_SHIFT; 1558 offset &= ~PAGE_CACHE_MASK; 1559 1560 page_cache_release(page); 1561 if (ret != nr || !desc->count) 1562 break; 1563 1564 cond_resched(); 1565 } 1566 1567 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1568 file_accessed(filp); 1569 } 1570 1571 static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1572 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1573 { 1574 struct file *filp = iocb->ki_filp; 1575 ssize_t retval; 1576 unsigned long seg; 1577 size_t count; 1578 loff_t *ppos = &iocb->ki_pos; 1579 1580 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1581 if (retval) 1582 return retval; 1583 1584 for (seg = 0; seg < nr_segs; seg++) { 1585 read_descriptor_t desc; 1586 1587 desc.written = 0; 1588 desc.arg.buf = iov[seg].iov_base; 1589 desc.count = iov[seg].iov_len; 1590 if (desc.count == 0) 1591 continue; 1592 desc.error = 0; 1593 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1594 retval += desc.written; 1595 if (desc.error) { 1596 retval = retval ?: desc.error; 1597 break; 1598 } 1599 if (desc.count > 0) 1600 break; 1601 } 1602 return retval; 1603 } 1604 1605 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1606 struct pipe_inode_info *pipe, size_t len, 1607 unsigned int flags) 1608 { 1609 struct address_space *mapping = in->f_mapping; 1610 struct inode *inode = mapping->host; 1611 unsigned int loff, nr_pages, req_pages; 1612 struct page *pages[PIPE_DEF_BUFFERS]; 1613 struct partial_page partial[PIPE_DEF_BUFFERS]; 1614 struct page *page; 1615 pgoff_t index, end_index; 1616 loff_t isize, left; 1617 int error, page_nr; 1618 struct splice_pipe_desc spd = { 1619 .pages = pages, 1620 .partial = partial, 1621 .nr_pages_max = PIPE_DEF_BUFFERS, 1622 .flags = flags, 1623 .ops = &page_cache_pipe_buf_ops, 1624 .spd_release = spd_release_page, 1625 }; 1626 1627 isize = i_size_read(inode); 1628 if (unlikely(*ppos >= isize)) 1629 return 0; 1630 1631 left = isize - *ppos; 1632 if (unlikely(left < len)) 1633 len = left; 1634 1635 if (splice_grow_spd(pipe, &spd)) 1636 return -ENOMEM; 1637 1638 index = *ppos >> PAGE_CACHE_SHIFT; 1639 loff = *ppos & ~PAGE_CACHE_MASK; 1640 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1641 nr_pages = min(req_pages, pipe->buffers); 1642 1643 spd.nr_pages = find_get_pages_contig(mapping, index, 1644 nr_pages, spd.pages); 1645 index += spd.nr_pages; 1646 error = 0; 1647 1648 while (spd.nr_pages < nr_pages) { 1649 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1650 if (error) 1651 break; 1652 unlock_page(page); 1653 spd.pages[spd.nr_pages++] = page; 1654 index++; 1655 } 1656 1657 index = *ppos >> PAGE_CACHE_SHIFT; 1658 nr_pages = spd.nr_pages; 1659 spd.nr_pages = 0; 1660 1661 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1662 unsigned int this_len; 1663 1664 if (!len) 1665 break; 1666 1667 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1668 page = spd.pages[page_nr]; 1669 1670 if (!PageUptodate(page) || page->mapping != mapping) { 1671 error = shmem_getpage(inode, index, &page, 1672 SGP_CACHE, NULL); 1673 if (error) 1674 break; 1675 unlock_page(page); 1676 page_cache_release(spd.pages[page_nr]); 1677 spd.pages[page_nr] = page; 1678 } 1679 1680 isize = i_size_read(inode); 1681 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1682 if (unlikely(!isize || index > end_index)) 1683 break; 1684 1685 if (end_index == index) { 1686 unsigned int plen; 1687 1688 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1689 if (plen <= loff) 1690 break; 1691 1692 this_len = min(this_len, plen - loff); 1693 len = this_len; 1694 } 1695 1696 spd.partial[page_nr].offset = loff; 1697 spd.partial[page_nr].len = this_len; 1698 len -= this_len; 1699 loff = 0; 1700 spd.nr_pages++; 1701 index++; 1702 } 1703 1704 while (page_nr < nr_pages) 1705 page_cache_release(spd.pages[page_nr++]); 1706 1707 if (spd.nr_pages) 1708 error = splice_to_pipe(pipe, &spd); 1709 1710 splice_shrink_spd(&spd); 1711 1712 if (error > 0) { 1713 *ppos += error; 1714 file_accessed(in); 1715 } 1716 return error; 1717 } 1718 1719 /* 1720 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 1721 */ 1722 static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 1723 pgoff_t index, pgoff_t end, int whence) 1724 { 1725 struct page *page; 1726 struct pagevec pvec; 1727 pgoff_t indices[PAGEVEC_SIZE]; 1728 bool done = false; 1729 int i; 1730 1731 pagevec_init(&pvec, 0); 1732 pvec.nr = 1; /* start small: we may be there already */ 1733 while (!done) { 1734 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 1735 pvec.nr, pvec.pages, indices); 1736 if (!pvec.nr) { 1737 if (whence == SEEK_DATA) 1738 index = end; 1739 break; 1740 } 1741 for (i = 0; i < pvec.nr; i++, index++) { 1742 if (index < indices[i]) { 1743 if (whence == SEEK_HOLE) { 1744 done = true; 1745 break; 1746 } 1747 index = indices[i]; 1748 } 1749 page = pvec.pages[i]; 1750 if (page && !radix_tree_exceptional_entry(page)) { 1751 if (!PageUptodate(page)) 1752 page = NULL; 1753 } 1754 if (index >= end || 1755 (page && whence == SEEK_DATA) || 1756 (!page && whence == SEEK_HOLE)) { 1757 done = true; 1758 break; 1759 } 1760 } 1761 shmem_deswap_pagevec(&pvec); 1762 pagevec_release(&pvec); 1763 pvec.nr = PAGEVEC_SIZE; 1764 cond_resched(); 1765 } 1766 return index; 1767 } 1768 1769 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 1770 { 1771 struct address_space *mapping = file->f_mapping; 1772 struct inode *inode = mapping->host; 1773 pgoff_t start, end; 1774 loff_t new_offset; 1775 1776 if (whence != SEEK_DATA && whence != SEEK_HOLE) 1777 return generic_file_llseek_size(file, offset, whence, 1778 MAX_LFS_FILESIZE, i_size_read(inode)); 1779 mutex_lock(&inode->i_mutex); 1780 /* We're holding i_mutex so we can access i_size directly */ 1781 1782 if (offset < 0) 1783 offset = -EINVAL; 1784 else if (offset >= inode->i_size) 1785 offset = -ENXIO; 1786 else { 1787 start = offset >> PAGE_CACHE_SHIFT; 1788 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1789 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1790 new_offset <<= PAGE_CACHE_SHIFT; 1791 if (new_offset > offset) { 1792 if (new_offset < inode->i_size) 1793 offset = new_offset; 1794 else if (whence == SEEK_DATA) 1795 offset = -ENXIO; 1796 else 1797 offset = inode->i_size; 1798 } 1799 } 1800 1801 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 1802 mutex_unlock(&inode->i_mutex); 1803 return offset; 1804 } 1805 1806 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1807 loff_t len) 1808 { 1809 struct inode *inode = file_inode(file); 1810 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1811 struct shmem_falloc shmem_falloc; 1812 pgoff_t start, index, end; 1813 int error; 1814 1815 mutex_lock(&inode->i_mutex); 1816 1817 if (mode & FALLOC_FL_PUNCH_HOLE) { 1818 struct address_space *mapping = file->f_mapping; 1819 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1820 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1821 1822 if ((u64)unmap_end > (u64)unmap_start) 1823 unmap_mapping_range(mapping, unmap_start, 1824 1 + unmap_end - unmap_start, 0); 1825 shmem_truncate_range(inode, offset, offset + len - 1); 1826 /* No need to unmap again: hole-punching leaves COWed pages */ 1827 error = 0; 1828 goto out; 1829 } 1830 1831 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1832 error = inode_newsize_ok(inode, offset + len); 1833 if (error) 1834 goto out; 1835 1836 start = offset >> PAGE_CACHE_SHIFT; 1837 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1838 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1839 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1840 error = -ENOSPC; 1841 goto out; 1842 } 1843 1844 shmem_falloc.start = start; 1845 shmem_falloc.next = start; 1846 shmem_falloc.nr_falloced = 0; 1847 shmem_falloc.nr_unswapped = 0; 1848 spin_lock(&inode->i_lock); 1849 inode->i_private = &shmem_falloc; 1850 spin_unlock(&inode->i_lock); 1851 1852 for (index = start; index < end; index++) { 1853 struct page *page; 1854 1855 /* 1856 * Good, the fallocate(2) manpage permits EINTR: we may have 1857 * been interrupted because we are using up too much memory. 1858 */ 1859 if (signal_pending(current)) 1860 error = -EINTR; 1861 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1862 error = -ENOMEM; 1863 else 1864 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1865 NULL); 1866 if (error) { 1867 /* Remove the !PageUptodate pages we added */ 1868 shmem_undo_range(inode, 1869 (loff_t)start << PAGE_CACHE_SHIFT, 1870 (loff_t)index << PAGE_CACHE_SHIFT, true); 1871 goto undone; 1872 } 1873 1874 /* 1875 * Inform shmem_writepage() how far we have reached. 1876 * No need for lock or barrier: we have the page lock. 1877 */ 1878 shmem_falloc.next++; 1879 if (!PageUptodate(page)) 1880 shmem_falloc.nr_falloced++; 1881 1882 /* 1883 * If !PageUptodate, leave it that way so that freeable pages 1884 * can be recognized if we need to rollback on error later. 1885 * But set_page_dirty so that memory pressure will swap rather 1886 * than free the pages we are allocating (and SGP_CACHE pages 1887 * might still be clean: we now need to mark those dirty too). 1888 */ 1889 set_page_dirty(page); 1890 unlock_page(page); 1891 page_cache_release(page); 1892 cond_resched(); 1893 } 1894 1895 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1896 i_size_write(inode, offset + len); 1897 inode->i_ctime = CURRENT_TIME; 1898 undone: 1899 spin_lock(&inode->i_lock); 1900 inode->i_private = NULL; 1901 spin_unlock(&inode->i_lock); 1902 out: 1903 mutex_unlock(&inode->i_mutex); 1904 return error; 1905 } 1906 1907 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1908 { 1909 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1910 1911 buf->f_type = TMPFS_MAGIC; 1912 buf->f_bsize = PAGE_CACHE_SIZE; 1913 buf->f_namelen = NAME_MAX; 1914 if (sbinfo->max_blocks) { 1915 buf->f_blocks = sbinfo->max_blocks; 1916 buf->f_bavail = 1917 buf->f_bfree = sbinfo->max_blocks - 1918 percpu_counter_sum(&sbinfo->used_blocks); 1919 } 1920 if (sbinfo->max_inodes) { 1921 buf->f_files = sbinfo->max_inodes; 1922 buf->f_ffree = sbinfo->free_inodes; 1923 } 1924 /* else leave those fields 0 like simple_statfs */ 1925 return 0; 1926 } 1927 1928 /* 1929 * File creation. Allocate an inode, and we're done.. 1930 */ 1931 static int 1932 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1933 { 1934 struct inode *inode; 1935 int error = -ENOSPC; 1936 1937 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1938 if (inode) { 1939 #ifdef CONFIG_TMPFS_POSIX_ACL 1940 error = generic_acl_init(inode, dir); 1941 if (error) { 1942 iput(inode); 1943 return error; 1944 } 1945 #endif 1946 error = security_inode_init_security(inode, dir, 1947 &dentry->d_name, 1948 shmem_initxattrs, NULL); 1949 if (error) { 1950 if (error != -EOPNOTSUPP) { 1951 iput(inode); 1952 return error; 1953 } 1954 } 1955 1956 error = 0; 1957 dir->i_size += BOGO_DIRENT_SIZE; 1958 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1959 d_instantiate(dentry, inode); 1960 dget(dentry); /* Extra count - pin the dentry in core */ 1961 } 1962 return error; 1963 } 1964 1965 static int 1966 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 1967 { 1968 struct inode *inode; 1969 int error = -ENOSPC; 1970 1971 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 1972 if (inode) { 1973 error = security_inode_init_security(inode, dir, 1974 NULL, 1975 shmem_initxattrs, NULL); 1976 if (error) { 1977 if (error != -EOPNOTSUPP) { 1978 iput(inode); 1979 return error; 1980 } 1981 } 1982 #ifdef CONFIG_TMPFS_POSIX_ACL 1983 error = generic_acl_init(inode, dir); 1984 if (error) { 1985 iput(inode); 1986 return error; 1987 } 1988 #else 1989 error = 0; 1990 #endif 1991 d_tmpfile(dentry, inode); 1992 } 1993 return error; 1994 } 1995 1996 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1997 { 1998 int error; 1999 2000 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 2001 return error; 2002 inc_nlink(dir); 2003 return 0; 2004 } 2005 2006 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2007 bool excl) 2008 { 2009 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 2010 } 2011 2012 /* 2013 * Link a file.. 2014 */ 2015 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 2016 { 2017 struct inode *inode = old_dentry->d_inode; 2018 int ret; 2019 2020 /* 2021 * No ordinary (disk based) filesystem counts links as inodes; 2022 * but each new link needs a new dentry, pinning lowmem, and 2023 * tmpfs dentries cannot be pruned until they are unlinked. 2024 */ 2025 ret = shmem_reserve_inode(inode->i_sb); 2026 if (ret) 2027 goto out; 2028 2029 dir->i_size += BOGO_DIRENT_SIZE; 2030 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2031 inc_nlink(inode); 2032 ihold(inode); /* New dentry reference */ 2033 dget(dentry); /* Extra pinning count for the created dentry */ 2034 d_instantiate(dentry, inode); 2035 out: 2036 return ret; 2037 } 2038 2039 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 2040 { 2041 struct inode *inode = dentry->d_inode; 2042 2043 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 2044 shmem_free_inode(inode->i_sb); 2045 2046 dir->i_size -= BOGO_DIRENT_SIZE; 2047 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2048 drop_nlink(inode); 2049 dput(dentry); /* Undo the count from "create" - this does all the work */ 2050 return 0; 2051 } 2052 2053 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 2054 { 2055 if (!simple_empty(dentry)) 2056 return -ENOTEMPTY; 2057 2058 drop_nlink(dentry->d_inode); 2059 drop_nlink(dir); 2060 return shmem_unlink(dir, dentry); 2061 } 2062 2063 /* 2064 * The VFS layer already does all the dentry stuff for rename, 2065 * we just have to decrement the usage count for the target if 2066 * it exists so that the VFS layer correctly free's it when it 2067 * gets overwritten. 2068 */ 2069 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2070 { 2071 struct inode *inode = old_dentry->d_inode; 2072 int they_are_dirs = S_ISDIR(inode->i_mode); 2073 2074 if (!simple_empty(new_dentry)) 2075 return -ENOTEMPTY; 2076 2077 if (new_dentry->d_inode) { 2078 (void) shmem_unlink(new_dir, new_dentry); 2079 if (they_are_dirs) 2080 drop_nlink(old_dir); 2081 } else if (they_are_dirs) { 2082 drop_nlink(old_dir); 2083 inc_nlink(new_dir); 2084 } 2085 2086 old_dir->i_size -= BOGO_DIRENT_SIZE; 2087 new_dir->i_size += BOGO_DIRENT_SIZE; 2088 old_dir->i_ctime = old_dir->i_mtime = 2089 new_dir->i_ctime = new_dir->i_mtime = 2090 inode->i_ctime = CURRENT_TIME; 2091 return 0; 2092 } 2093 2094 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 2095 { 2096 int error; 2097 int len; 2098 struct inode *inode; 2099 struct page *page; 2100 char *kaddr; 2101 struct shmem_inode_info *info; 2102 2103 len = strlen(symname) + 1; 2104 if (len > PAGE_CACHE_SIZE) 2105 return -ENAMETOOLONG; 2106 2107 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2108 if (!inode) 2109 return -ENOSPC; 2110 2111 error = security_inode_init_security(inode, dir, &dentry->d_name, 2112 shmem_initxattrs, NULL); 2113 if (error) { 2114 if (error != -EOPNOTSUPP) { 2115 iput(inode); 2116 return error; 2117 } 2118 error = 0; 2119 } 2120 2121 info = SHMEM_I(inode); 2122 inode->i_size = len-1; 2123 if (len <= SHORT_SYMLINK_LEN) { 2124 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2125 if (!info->symlink) { 2126 iput(inode); 2127 return -ENOMEM; 2128 } 2129 inode->i_op = &shmem_short_symlink_operations; 2130 } else { 2131 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2132 if (error) { 2133 iput(inode); 2134 return error; 2135 } 2136 inode->i_mapping->a_ops = &shmem_aops; 2137 inode->i_op = &shmem_symlink_inode_operations; 2138 kaddr = kmap_atomic(page); 2139 memcpy(kaddr, symname, len); 2140 kunmap_atomic(kaddr); 2141 SetPageUptodate(page); 2142 set_page_dirty(page); 2143 unlock_page(page); 2144 page_cache_release(page); 2145 } 2146 dir->i_size += BOGO_DIRENT_SIZE; 2147 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2148 d_instantiate(dentry, inode); 2149 dget(dentry); 2150 return 0; 2151 } 2152 2153 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2154 { 2155 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2156 return NULL; 2157 } 2158 2159 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2160 { 2161 struct page *page = NULL; 2162 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2163 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2164 if (page) 2165 unlock_page(page); 2166 return page; 2167 } 2168 2169 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2170 { 2171 if (!IS_ERR(nd_get_link(nd))) { 2172 struct page *page = cookie; 2173 kunmap(page); 2174 mark_page_accessed(page); 2175 page_cache_release(page); 2176 } 2177 } 2178 2179 #ifdef CONFIG_TMPFS_XATTR 2180 /* 2181 * Superblocks without xattr inode operations may get some security.* xattr 2182 * support from the LSM "for free". As soon as we have any other xattrs 2183 * like ACLs, we also need to implement the security.* handlers at 2184 * filesystem level, though. 2185 */ 2186 2187 /* 2188 * Callback for security_inode_init_security() for acquiring xattrs. 2189 */ 2190 static int shmem_initxattrs(struct inode *inode, 2191 const struct xattr *xattr_array, 2192 void *fs_info) 2193 { 2194 struct shmem_inode_info *info = SHMEM_I(inode); 2195 const struct xattr *xattr; 2196 struct simple_xattr *new_xattr; 2197 size_t len; 2198 2199 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2200 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2201 if (!new_xattr) 2202 return -ENOMEM; 2203 2204 len = strlen(xattr->name) + 1; 2205 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2206 GFP_KERNEL); 2207 if (!new_xattr->name) { 2208 kfree(new_xattr); 2209 return -ENOMEM; 2210 } 2211 2212 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2213 XATTR_SECURITY_PREFIX_LEN); 2214 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2215 xattr->name, len); 2216 2217 simple_xattr_list_add(&info->xattrs, new_xattr); 2218 } 2219 2220 return 0; 2221 } 2222 2223 static const struct xattr_handler *shmem_xattr_handlers[] = { 2224 #ifdef CONFIG_TMPFS_POSIX_ACL 2225 &generic_acl_access_handler, 2226 &generic_acl_default_handler, 2227 #endif 2228 NULL 2229 }; 2230 2231 static int shmem_xattr_validate(const char *name) 2232 { 2233 struct { const char *prefix; size_t len; } arr[] = { 2234 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2235 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2236 }; 2237 int i; 2238 2239 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2240 size_t preflen = arr[i].len; 2241 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2242 if (!name[preflen]) 2243 return -EINVAL; 2244 return 0; 2245 } 2246 } 2247 return -EOPNOTSUPP; 2248 } 2249 2250 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2251 void *buffer, size_t size) 2252 { 2253 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2254 int err; 2255 2256 /* 2257 * If this is a request for a synthetic attribute in the system.* 2258 * namespace use the generic infrastructure to resolve a handler 2259 * for it via sb->s_xattr. 2260 */ 2261 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2262 return generic_getxattr(dentry, name, buffer, size); 2263 2264 err = shmem_xattr_validate(name); 2265 if (err) 2266 return err; 2267 2268 return simple_xattr_get(&info->xattrs, name, buffer, size); 2269 } 2270 2271 static int shmem_setxattr(struct dentry *dentry, const char *name, 2272 const void *value, size_t size, int flags) 2273 { 2274 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2275 int err; 2276 2277 /* 2278 * If this is a request for a synthetic attribute in the system.* 2279 * namespace use the generic infrastructure to resolve a handler 2280 * for it via sb->s_xattr. 2281 */ 2282 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2283 return generic_setxattr(dentry, name, value, size, flags); 2284 2285 err = shmem_xattr_validate(name); 2286 if (err) 2287 return err; 2288 2289 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2290 } 2291 2292 static int shmem_removexattr(struct dentry *dentry, const char *name) 2293 { 2294 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2295 int err; 2296 2297 /* 2298 * If this is a request for a synthetic attribute in the system.* 2299 * namespace use the generic infrastructure to resolve a handler 2300 * for it via sb->s_xattr. 2301 */ 2302 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2303 return generic_removexattr(dentry, name); 2304 2305 err = shmem_xattr_validate(name); 2306 if (err) 2307 return err; 2308 2309 return simple_xattr_remove(&info->xattrs, name); 2310 } 2311 2312 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2313 { 2314 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2315 return simple_xattr_list(&info->xattrs, buffer, size); 2316 } 2317 #endif /* CONFIG_TMPFS_XATTR */ 2318 2319 static const struct inode_operations shmem_short_symlink_operations = { 2320 .readlink = generic_readlink, 2321 .follow_link = shmem_follow_short_symlink, 2322 #ifdef CONFIG_TMPFS_XATTR 2323 .setxattr = shmem_setxattr, 2324 .getxattr = shmem_getxattr, 2325 .listxattr = shmem_listxattr, 2326 .removexattr = shmem_removexattr, 2327 #endif 2328 }; 2329 2330 static const struct inode_operations shmem_symlink_inode_operations = { 2331 .readlink = generic_readlink, 2332 .follow_link = shmem_follow_link, 2333 .put_link = shmem_put_link, 2334 #ifdef CONFIG_TMPFS_XATTR 2335 .setxattr = shmem_setxattr, 2336 .getxattr = shmem_getxattr, 2337 .listxattr = shmem_listxattr, 2338 .removexattr = shmem_removexattr, 2339 #endif 2340 }; 2341 2342 static struct dentry *shmem_get_parent(struct dentry *child) 2343 { 2344 return ERR_PTR(-ESTALE); 2345 } 2346 2347 static int shmem_match(struct inode *ino, void *vfh) 2348 { 2349 __u32 *fh = vfh; 2350 __u64 inum = fh[2]; 2351 inum = (inum << 32) | fh[1]; 2352 return ino->i_ino == inum && fh[0] == ino->i_generation; 2353 } 2354 2355 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2356 struct fid *fid, int fh_len, int fh_type) 2357 { 2358 struct inode *inode; 2359 struct dentry *dentry = NULL; 2360 u64 inum; 2361 2362 if (fh_len < 3) 2363 return NULL; 2364 2365 inum = fid->raw[2]; 2366 inum = (inum << 32) | fid->raw[1]; 2367 2368 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2369 shmem_match, fid->raw); 2370 if (inode) { 2371 dentry = d_find_alias(inode); 2372 iput(inode); 2373 } 2374 2375 return dentry; 2376 } 2377 2378 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2379 struct inode *parent) 2380 { 2381 if (*len < 3) { 2382 *len = 3; 2383 return FILEID_INVALID; 2384 } 2385 2386 if (inode_unhashed(inode)) { 2387 /* Unfortunately insert_inode_hash is not idempotent, 2388 * so as we hash inodes here rather than at creation 2389 * time, we need a lock to ensure we only try 2390 * to do it once 2391 */ 2392 static DEFINE_SPINLOCK(lock); 2393 spin_lock(&lock); 2394 if (inode_unhashed(inode)) 2395 __insert_inode_hash(inode, 2396 inode->i_ino + inode->i_generation); 2397 spin_unlock(&lock); 2398 } 2399 2400 fh[0] = inode->i_generation; 2401 fh[1] = inode->i_ino; 2402 fh[2] = ((__u64)inode->i_ino) >> 32; 2403 2404 *len = 3; 2405 return 1; 2406 } 2407 2408 static const struct export_operations shmem_export_ops = { 2409 .get_parent = shmem_get_parent, 2410 .encode_fh = shmem_encode_fh, 2411 .fh_to_dentry = shmem_fh_to_dentry, 2412 }; 2413 2414 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2415 bool remount) 2416 { 2417 char *this_char, *value, *rest; 2418 struct mempolicy *mpol = NULL; 2419 uid_t uid; 2420 gid_t gid; 2421 2422 while (options != NULL) { 2423 this_char = options; 2424 for (;;) { 2425 /* 2426 * NUL-terminate this option: unfortunately, 2427 * mount options form a comma-separated list, 2428 * but mpol's nodelist may also contain commas. 2429 */ 2430 options = strchr(options, ','); 2431 if (options == NULL) 2432 break; 2433 options++; 2434 if (!isdigit(*options)) { 2435 options[-1] = '\0'; 2436 break; 2437 } 2438 } 2439 if (!*this_char) 2440 continue; 2441 if ((value = strchr(this_char,'=')) != NULL) { 2442 *value++ = 0; 2443 } else { 2444 printk(KERN_ERR 2445 "tmpfs: No value for mount option '%s'\n", 2446 this_char); 2447 goto error; 2448 } 2449 2450 if (!strcmp(this_char,"size")) { 2451 unsigned long long size; 2452 size = memparse(value,&rest); 2453 if (*rest == '%') { 2454 size <<= PAGE_SHIFT; 2455 size *= totalram_pages; 2456 do_div(size, 100); 2457 rest++; 2458 } 2459 if (*rest) 2460 goto bad_val; 2461 sbinfo->max_blocks = 2462 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2463 } else if (!strcmp(this_char,"nr_blocks")) { 2464 sbinfo->max_blocks = memparse(value, &rest); 2465 if (*rest) 2466 goto bad_val; 2467 } else if (!strcmp(this_char,"nr_inodes")) { 2468 sbinfo->max_inodes = memparse(value, &rest); 2469 if (*rest) 2470 goto bad_val; 2471 } else if (!strcmp(this_char,"mode")) { 2472 if (remount) 2473 continue; 2474 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2475 if (*rest) 2476 goto bad_val; 2477 } else if (!strcmp(this_char,"uid")) { 2478 if (remount) 2479 continue; 2480 uid = simple_strtoul(value, &rest, 0); 2481 if (*rest) 2482 goto bad_val; 2483 sbinfo->uid = make_kuid(current_user_ns(), uid); 2484 if (!uid_valid(sbinfo->uid)) 2485 goto bad_val; 2486 } else if (!strcmp(this_char,"gid")) { 2487 if (remount) 2488 continue; 2489 gid = simple_strtoul(value, &rest, 0); 2490 if (*rest) 2491 goto bad_val; 2492 sbinfo->gid = make_kgid(current_user_ns(), gid); 2493 if (!gid_valid(sbinfo->gid)) 2494 goto bad_val; 2495 } else if (!strcmp(this_char,"mpol")) { 2496 mpol_put(mpol); 2497 mpol = NULL; 2498 if (mpol_parse_str(value, &mpol)) 2499 goto bad_val; 2500 } else { 2501 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2502 this_char); 2503 goto error; 2504 } 2505 } 2506 sbinfo->mpol = mpol; 2507 return 0; 2508 2509 bad_val: 2510 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2511 value, this_char); 2512 error: 2513 mpol_put(mpol); 2514 return 1; 2515 2516 } 2517 2518 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2519 { 2520 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2521 struct shmem_sb_info config = *sbinfo; 2522 unsigned long inodes; 2523 int error = -EINVAL; 2524 2525 config.mpol = NULL; 2526 if (shmem_parse_options(data, &config, true)) 2527 return error; 2528 2529 spin_lock(&sbinfo->stat_lock); 2530 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2531 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2532 goto out; 2533 if (config.max_inodes < inodes) 2534 goto out; 2535 /* 2536 * Those tests disallow limited->unlimited while any are in use; 2537 * but we must separately disallow unlimited->limited, because 2538 * in that case we have no record of how much is already in use. 2539 */ 2540 if (config.max_blocks && !sbinfo->max_blocks) 2541 goto out; 2542 if (config.max_inodes && !sbinfo->max_inodes) 2543 goto out; 2544 2545 error = 0; 2546 sbinfo->max_blocks = config.max_blocks; 2547 sbinfo->max_inodes = config.max_inodes; 2548 sbinfo->free_inodes = config.max_inodes - inodes; 2549 2550 /* 2551 * Preserve previous mempolicy unless mpol remount option was specified. 2552 */ 2553 if (config.mpol) { 2554 mpol_put(sbinfo->mpol); 2555 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2556 } 2557 out: 2558 spin_unlock(&sbinfo->stat_lock); 2559 return error; 2560 } 2561 2562 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2563 { 2564 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2565 2566 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2567 seq_printf(seq, ",size=%luk", 2568 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2569 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2570 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2571 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2572 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2573 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2574 seq_printf(seq, ",uid=%u", 2575 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2576 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2577 seq_printf(seq, ",gid=%u", 2578 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2579 shmem_show_mpol(seq, sbinfo->mpol); 2580 return 0; 2581 } 2582 #endif /* CONFIG_TMPFS */ 2583 2584 static void shmem_put_super(struct super_block *sb) 2585 { 2586 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2587 2588 percpu_counter_destroy(&sbinfo->used_blocks); 2589 mpol_put(sbinfo->mpol); 2590 kfree(sbinfo); 2591 sb->s_fs_info = NULL; 2592 } 2593 2594 int shmem_fill_super(struct super_block *sb, void *data, int silent) 2595 { 2596 struct inode *inode; 2597 struct shmem_sb_info *sbinfo; 2598 int err = -ENOMEM; 2599 2600 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2601 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2602 L1_CACHE_BYTES), GFP_KERNEL); 2603 if (!sbinfo) 2604 return -ENOMEM; 2605 2606 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2607 sbinfo->uid = current_fsuid(); 2608 sbinfo->gid = current_fsgid(); 2609 sb->s_fs_info = sbinfo; 2610 2611 #ifdef CONFIG_TMPFS 2612 /* 2613 * Per default we only allow half of the physical ram per 2614 * tmpfs instance, limiting inodes to one per page of lowmem; 2615 * but the internal instance is left unlimited. 2616 */ 2617 if (!(sb->s_flags & MS_NOUSER)) { 2618 sbinfo->max_blocks = shmem_default_max_blocks(); 2619 sbinfo->max_inodes = shmem_default_max_inodes(); 2620 if (shmem_parse_options(data, sbinfo, false)) { 2621 err = -EINVAL; 2622 goto failed; 2623 } 2624 } 2625 sb->s_export_op = &shmem_export_ops; 2626 sb->s_flags |= MS_NOSEC; 2627 #else 2628 sb->s_flags |= MS_NOUSER; 2629 #endif 2630 2631 spin_lock_init(&sbinfo->stat_lock); 2632 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2633 goto failed; 2634 sbinfo->free_inodes = sbinfo->max_inodes; 2635 2636 sb->s_maxbytes = MAX_LFS_FILESIZE; 2637 sb->s_blocksize = PAGE_CACHE_SIZE; 2638 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2639 sb->s_magic = TMPFS_MAGIC; 2640 sb->s_op = &shmem_ops; 2641 sb->s_time_gran = 1; 2642 #ifdef CONFIG_TMPFS_XATTR 2643 sb->s_xattr = shmem_xattr_handlers; 2644 #endif 2645 #ifdef CONFIG_TMPFS_POSIX_ACL 2646 sb->s_flags |= MS_POSIXACL; 2647 #endif 2648 2649 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2650 if (!inode) 2651 goto failed; 2652 inode->i_uid = sbinfo->uid; 2653 inode->i_gid = sbinfo->gid; 2654 sb->s_root = d_make_root(inode); 2655 if (!sb->s_root) 2656 goto failed; 2657 return 0; 2658 2659 failed: 2660 shmem_put_super(sb); 2661 return err; 2662 } 2663 2664 static struct kmem_cache *shmem_inode_cachep; 2665 2666 static struct inode *shmem_alloc_inode(struct super_block *sb) 2667 { 2668 struct shmem_inode_info *info; 2669 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2670 if (!info) 2671 return NULL; 2672 return &info->vfs_inode; 2673 } 2674 2675 static void shmem_destroy_callback(struct rcu_head *head) 2676 { 2677 struct inode *inode = container_of(head, struct inode, i_rcu); 2678 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2679 } 2680 2681 static void shmem_destroy_inode(struct inode *inode) 2682 { 2683 if (S_ISREG(inode->i_mode)) 2684 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2685 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2686 } 2687 2688 static void shmem_init_inode(void *foo) 2689 { 2690 struct shmem_inode_info *info = foo; 2691 inode_init_once(&info->vfs_inode); 2692 } 2693 2694 static int shmem_init_inodecache(void) 2695 { 2696 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2697 sizeof(struct shmem_inode_info), 2698 0, SLAB_PANIC, shmem_init_inode); 2699 return 0; 2700 } 2701 2702 static void shmem_destroy_inodecache(void) 2703 { 2704 kmem_cache_destroy(shmem_inode_cachep); 2705 } 2706 2707 static const struct address_space_operations shmem_aops = { 2708 .writepage = shmem_writepage, 2709 .set_page_dirty = __set_page_dirty_no_writeback, 2710 #ifdef CONFIG_TMPFS 2711 .write_begin = shmem_write_begin, 2712 .write_end = shmem_write_end, 2713 #endif 2714 .migratepage = migrate_page, 2715 .error_remove_page = generic_error_remove_page, 2716 }; 2717 2718 static const struct file_operations shmem_file_operations = { 2719 .mmap = shmem_mmap, 2720 #ifdef CONFIG_TMPFS 2721 .llseek = shmem_file_llseek, 2722 .read = do_sync_read, 2723 .write = do_sync_write, 2724 .aio_read = shmem_file_aio_read, 2725 .aio_write = generic_file_aio_write, 2726 .fsync = noop_fsync, 2727 .splice_read = shmem_file_splice_read, 2728 .splice_write = generic_file_splice_write, 2729 .fallocate = shmem_fallocate, 2730 #endif 2731 }; 2732 2733 static const struct inode_operations shmem_inode_operations = { 2734 .setattr = shmem_setattr, 2735 #ifdef CONFIG_TMPFS_XATTR 2736 .setxattr = shmem_setxattr, 2737 .getxattr = shmem_getxattr, 2738 .listxattr = shmem_listxattr, 2739 .removexattr = shmem_removexattr, 2740 #endif 2741 }; 2742 2743 static const struct inode_operations shmem_dir_inode_operations = { 2744 #ifdef CONFIG_TMPFS 2745 .create = shmem_create, 2746 .lookup = simple_lookup, 2747 .link = shmem_link, 2748 .unlink = shmem_unlink, 2749 .symlink = shmem_symlink, 2750 .mkdir = shmem_mkdir, 2751 .rmdir = shmem_rmdir, 2752 .mknod = shmem_mknod, 2753 .rename = shmem_rename, 2754 .tmpfile = shmem_tmpfile, 2755 #endif 2756 #ifdef CONFIG_TMPFS_XATTR 2757 .setxattr = shmem_setxattr, 2758 .getxattr = shmem_getxattr, 2759 .listxattr = shmem_listxattr, 2760 .removexattr = shmem_removexattr, 2761 #endif 2762 #ifdef CONFIG_TMPFS_POSIX_ACL 2763 .setattr = shmem_setattr, 2764 #endif 2765 }; 2766 2767 static const struct inode_operations shmem_special_inode_operations = { 2768 #ifdef CONFIG_TMPFS_XATTR 2769 .setxattr = shmem_setxattr, 2770 .getxattr = shmem_getxattr, 2771 .listxattr = shmem_listxattr, 2772 .removexattr = shmem_removexattr, 2773 #endif 2774 #ifdef CONFIG_TMPFS_POSIX_ACL 2775 .setattr = shmem_setattr, 2776 #endif 2777 }; 2778 2779 static const struct super_operations shmem_ops = { 2780 .alloc_inode = shmem_alloc_inode, 2781 .destroy_inode = shmem_destroy_inode, 2782 #ifdef CONFIG_TMPFS 2783 .statfs = shmem_statfs, 2784 .remount_fs = shmem_remount_fs, 2785 .show_options = shmem_show_options, 2786 #endif 2787 .evict_inode = shmem_evict_inode, 2788 .drop_inode = generic_delete_inode, 2789 .put_super = shmem_put_super, 2790 }; 2791 2792 static const struct vm_operations_struct shmem_vm_ops = { 2793 .fault = shmem_fault, 2794 #ifdef CONFIG_NUMA 2795 .set_policy = shmem_set_policy, 2796 .get_policy = shmem_get_policy, 2797 #endif 2798 .remap_pages = generic_file_remap_pages, 2799 }; 2800 2801 static struct dentry *shmem_mount(struct file_system_type *fs_type, 2802 int flags, const char *dev_name, void *data) 2803 { 2804 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2805 } 2806 2807 static struct file_system_type shmem_fs_type = { 2808 .owner = THIS_MODULE, 2809 .name = "tmpfs", 2810 .mount = shmem_mount, 2811 .kill_sb = kill_litter_super, 2812 .fs_flags = FS_USERNS_MOUNT, 2813 }; 2814 2815 int __init shmem_init(void) 2816 { 2817 int error; 2818 2819 error = bdi_init(&shmem_backing_dev_info); 2820 if (error) 2821 goto out4; 2822 2823 error = shmem_init_inodecache(); 2824 if (error) 2825 goto out3; 2826 2827 error = register_filesystem(&shmem_fs_type); 2828 if (error) { 2829 printk(KERN_ERR "Could not register tmpfs\n"); 2830 goto out2; 2831 } 2832 2833 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, 2834 shmem_fs_type.name, NULL); 2835 if (IS_ERR(shm_mnt)) { 2836 error = PTR_ERR(shm_mnt); 2837 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2838 goto out1; 2839 } 2840 return 0; 2841 2842 out1: 2843 unregister_filesystem(&shmem_fs_type); 2844 out2: 2845 shmem_destroy_inodecache(); 2846 out3: 2847 bdi_destroy(&shmem_backing_dev_info); 2848 out4: 2849 shm_mnt = ERR_PTR(error); 2850 return error; 2851 } 2852 2853 #else /* !CONFIG_SHMEM */ 2854 2855 /* 2856 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2857 * 2858 * This is intended for small system where the benefits of the full 2859 * shmem code (swap-backed and resource-limited) are outweighed by 2860 * their complexity. On systems without swap this code should be 2861 * effectively equivalent, but much lighter weight. 2862 */ 2863 2864 static struct file_system_type shmem_fs_type = { 2865 .name = "tmpfs", 2866 .mount = ramfs_mount, 2867 .kill_sb = kill_litter_super, 2868 .fs_flags = FS_USERNS_MOUNT, 2869 }; 2870 2871 int __init shmem_init(void) 2872 { 2873 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2874 2875 shm_mnt = kern_mount(&shmem_fs_type); 2876 BUG_ON(IS_ERR(shm_mnt)); 2877 2878 return 0; 2879 } 2880 2881 int shmem_unuse(swp_entry_t swap, struct page *page) 2882 { 2883 return 0; 2884 } 2885 2886 int shmem_lock(struct file *file, int lock, struct user_struct *user) 2887 { 2888 return 0; 2889 } 2890 2891 void shmem_unlock_mapping(struct address_space *mapping) 2892 { 2893 } 2894 2895 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2896 { 2897 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2898 } 2899 EXPORT_SYMBOL_GPL(shmem_truncate_range); 2900 2901 #define shmem_vm_ops generic_file_vm_ops 2902 #define shmem_file_operations ramfs_file_operations 2903 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2904 #define shmem_acct_size(flags, size) 0 2905 #define shmem_unacct_size(flags, size) do {} while (0) 2906 2907 #endif /* CONFIG_SHMEM */ 2908 2909 /* common code */ 2910 2911 static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen) 2912 { 2913 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", 2914 dentry->d_name.name); 2915 } 2916 2917 static struct dentry_operations anon_ops = { 2918 .d_dname = shmem_dname 2919 }; 2920 2921 /** 2922 * shmem_file_setup - get an unlinked file living in tmpfs 2923 * @name: name for dentry (to be seen in /proc/<pid>/maps 2924 * @size: size to be set for the file 2925 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2926 */ 2927 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2928 { 2929 struct file *res; 2930 struct inode *inode; 2931 struct path path; 2932 struct super_block *sb; 2933 struct qstr this; 2934 2935 if (IS_ERR(shm_mnt)) 2936 return ERR_CAST(shm_mnt); 2937 2938 if (size < 0 || size > MAX_LFS_FILESIZE) 2939 return ERR_PTR(-EINVAL); 2940 2941 if (shmem_acct_size(flags, size)) 2942 return ERR_PTR(-ENOMEM); 2943 2944 res = ERR_PTR(-ENOMEM); 2945 this.name = name; 2946 this.len = strlen(name); 2947 this.hash = 0; /* will go */ 2948 sb = shm_mnt->mnt_sb; 2949 path.dentry = d_alloc_pseudo(sb, &this); 2950 if (!path.dentry) 2951 goto put_memory; 2952 d_set_d_op(path.dentry, &anon_ops); 2953 path.mnt = mntget(shm_mnt); 2954 2955 res = ERR_PTR(-ENOSPC); 2956 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2957 if (!inode) 2958 goto put_dentry; 2959 2960 d_instantiate(path.dentry, inode); 2961 inode->i_size = size; 2962 clear_nlink(inode); /* It is unlinked */ 2963 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 2964 if (IS_ERR(res)) 2965 goto put_dentry; 2966 2967 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2968 &shmem_file_operations); 2969 if (IS_ERR(res)) 2970 goto put_dentry; 2971 2972 return res; 2973 2974 put_dentry: 2975 path_put(&path); 2976 put_memory: 2977 shmem_unacct_size(flags, size); 2978 return res; 2979 } 2980 EXPORT_SYMBOL_GPL(shmem_file_setup); 2981 2982 /** 2983 * shmem_zero_setup - setup a shared anonymous mapping 2984 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2985 */ 2986 int shmem_zero_setup(struct vm_area_struct *vma) 2987 { 2988 struct file *file; 2989 loff_t size = vma->vm_end - vma->vm_start; 2990 2991 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2992 if (IS_ERR(file)) 2993 return PTR_ERR(file); 2994 2995 if (vma->vm_file) 2996 fput(vma->vm_file); 2997 vma->vm_file = file; 2998 vma->vm_ops = &shmem_vm_ops; 2999 return 0; 3000 } 3001 3002 /** 3003 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 3004 * @mapping: the page's address_space 3005 * @index: the page index 3006 * @gfp: the page allocator flags to use if allocating 3007 * 3008 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 3009 * with any new page allocations done using the specified allocation flags. 3010 * But read_cache_page_gfp() uses the ->readpage() method: which does not 3011 * suit tmpfs, since it may have pages in swapcache, and needs to find those 3012 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 3013 * 3014 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 3015 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 3016 */ 3017 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 3018 pgoff_t index, gfp_t gfp) 3019 { 3020 #ifdef CONFIG_SHMEM 3021 struct inode *inode = mapping->host; 3022 struct page *page; 3023 int error; 3024 3025 BUG_ON(mapping->a_ops != &shmem_aops); 3026 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 3027 if (error) 3028 page = ERR_PTR(error); 3029 else 3030 unlock_page(page); 3031 return page; 3032 #else 3033 /* 3034 * The tiny !SHMEM case uses ramfs without swap 3035 */ 3036 return read_cache_page_gfp(mapping, index, gfp); 3037 #endif 3038 } 3039 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 3040