1 /* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 28 #include <linux/pagemap.h> 29 #include <linux/file.h> 30 #include <linux/mm.h> 31 #include <linux/export.h> 32 #include <linux/swap.h> 33 34 static struct vfsmount *shm_mnt; 35 36 #ifdef CONFIG_SHMEM 37 /* 38 * This virtual memory filesystem is heavily based on the ramfs. It 39 * extends ramfs by the ability to use swap and honor resource limits 40 * which makes it a completely usable filesystem. 41 */ 42 43 #include <linux/xattr.h> 44 #include <linux/exportfs.h> 45 #include <linux/posix_acl.h> 46 #include <linux/generic_acl.h> 47 #include <linux/mman.h> 48 #include <linux/string.h> 49 #include <linux/slab.h> 50 #include <linux/backing-dev.h> 51 #include <linux/shmem_fs.h> 52 #include <linux/writeback.h> 53 #include <linux/blkdev.h> 54 #include <linux/pagevec.h> 55 #include <linux/percpu_counter.h> 56 #include <linux/falloc.h> 57 #include <linux/splice.h> 58 #include <linux/security.h> 59 #include <linux/swapops.h> 60 #include <linux/mempolicy.h> 61 #include <linux/namei.h> 62 #include <linux/ctype.h> 63 #include <linux/migrate.h> 64 #include <linux/highmem.h> 65 #include <linux/seq_file.h> 66 #include <linux/magic.h> 67 68 #include <asm/uaccess.h> 69 #include <asm/pgtable.h> 70 71 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 72 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 73 74 /* Pretend that each entry is of this size in directory's i_size */ 75 #define BOGO_DIRENT_SIZE 20 76 77 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 78 #define SHORT_SYMLINK_LEN 128 79 80 /* 81 * shmem_fallocate and shmem_writepage communicate via inode->i_private 82 * (with i_mutex making sure that it has only one user at a time): 83 * we would prefer not to enlarge the shmem inode just for that. 84 */ 85 struct shmem_falloc { 86 pgoff_t start; /* start of range currently being fallocated */ 87 pgoff_t next; /* the next page offset to be fallocated */ 88 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 89 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 90 }; 91 92 /* Flag allocation requirements to shmem_getpage */ 93 enum sgp_type { 94 SGP_READ, /* don't exceed i_size, don't allocate page */ 95 SGP_CACHE, /* don't exceed i_size, may allocate page */ 96 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 97 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 98 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 99 }; 100 101 #ifdef CONFIG_TMPFS 102 static unsigned long shmem_default_max_blocks(void) 103 { 104 return totalram_pages / 2; 105 } 106 107 static unsigned long shmem_default_max_inodes(void) 108 { 109 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 110 } 111 #endif 112 113 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 114 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 115 struct shmem_inode_info *info, pgoff_t index); 116 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 117 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 118 119 static inline int shmem_getpage(struct inode *inode, pgoff_t index, 120 struct page **pagep, enum sgp_type sgp, int *fault_type) 121 { 122 return shmem_getpage_gfp(inode, index, pagep, sgp, 123 mapping_gfp_mask(inode->i_mapping), fault_type); 124 } 125 126 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 127 { 128 return sb->s_fs_info; 129 } 130 131 /* 132 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 133 * for shared memory and for shared anonymous (/dev/zero) mappings 134 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 135 * consistent with the pre-accounting of private mappings ... 136 */ 137 static inline int shmem_acct_size(unsigned long flags, loff_t size) 138 { 139 return (flags & VM_NORESERVE) ? 140 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 141 } 142 143 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 144 { 145 if (!(flags & VM_NORESERVE)) 146 vm_unacct_memory(VM_ACCT(size)); 147 } 148 149 /* 150 * ... whereas tmpfs objects are accounted incrementally as 151 * pages are allocated, in order to allow huge sparse files. 152 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 153 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 154 */ 155 static inline int shmem_acct_block(unsigned long flags) 156 { 157 return (flags & VM_NORESERVE) ? 158 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 159 } 160 161 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 162 { 163 if (flags & VM_NORESERVE) 164 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 165 } 166 167 static const struct super_operations shmem_ops; 168 static const struct address_space_operations shmem_aops; 169 static const struct file_operations shmem_file_operations; 170 static const struct inode_operations shmem_inode_operations; 171 static const struct inode_operations shmem_dir_inode_operations; 172 static const struct inode_operations shmem_special_inode_operations; 173 static const struct vm_operations_struct shmem_vm_ops; 174 175 static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 176 .ra_pages = 0, /* No readahead */ 177 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 178 }; 179 180 static LIST_HEAD(shmem_swaplist); 181 static DEFINE_MUTEX(shmem_swaplist_mutex); 182 183 static int shmem_reserve_inode(struct super_block *sb) 184 { 185 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 186 if (sbinfo->max_inodes) { 187 spin_lock(&sbinfo->stat_lock); 188 if (!sbinfo->free_inodes) { 189 spin_unlock(&sbinfo->stat_lock); 190 return -ENOSPC; 191 } 192 sbinfo->free_inodes--; 193 spin_unlock(&sbinfo->stat_lock); 194 } 195 return 0; 196 } 197 198 static void shmem_free_inode(struct super_block *sb) 199 { 200 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 201 if (sbinfo->max_inodes) { 202 spin_lock(&sbinfo->stat_lock); 203 sbinfo->free_inodes++; 204 spin_unlock(&sbinfo->stat_lock); 205 } 206 } 207 208 /** 209 * shmem_recalc_inode - recalculate the block usage of an inode 210 * @inode: inode to recalc 211 * 212 * We have to calculate the free blocks since the mm can drop 213 * undirtied hole pages behind our back. 214 * 215 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 216 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 217 * 218 * It has to be called with the spinlock held. 219 */ 220 static void shmem_recalc_inode(struct inode *inode) 221 { 222 struct shmem_inode_info *info = SHMEM_I(inode); 223 long freed; 224 225 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 226 if (freed > 0) { 227 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 228 if (sbinfo->max_blocks) 229 percpu_counter_add(&sbinfo->used_blocks, -freed); 230 info->alloced -= freed; 231 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 232 shmem_unacct_blocks(info->flags, freed); 233 } 234 } 235 236 /* 237 * Replace item expected in radix tree by a new item, while holding tree lock. 238 */ 239 static int shmem_radix_tree_replace(struct address_space *mapping, 240 pgoff_t index, void *expected, void *replacement) 241 { 242 void **pslot; 243 void *item = NULL; 244 245 VM_BUG_ON(!expected); 246 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 247 if (pslot) 248 item = radix_tree_deref_slot_protected(pslot, 249 &mapping->tree_lock); 250 if (item != expected) 251 return -ENOENT; 252 if (replacement) 253 radix_tree_replace_slot(pslot, replacement); 254 else 255 radix_tree_delete(&mapping->page_tree, index); 256 return 0; 257 } 258 259 /* 260 * Sometimes, before we decide whether to proceed or to fail, we must check 261 * that an entry was not already brought back from swap by a racing thread. 262 * 263 * Checking page is not enough: by the time a SwapCache page is locked, it 264 * might be reused, and again be SwapCache, using the same swap as before. 265 */ 266 static bool shmem_confirm_swap(struct address_space *mapping, 267 pgoff_t index, swp_entry_t swap) 268 { 269 void *item; 270 271 rcu_read_lock(); 272 item = radix_tree_lookup(&mapping->page_tree, index); 273 rcu_read_unlock(); 274 return item == swp_to_radix_entry(swap); 275 } 276 277 /* 278 * Like add_to_page_cache_locked, but error if expected item has gone. 279 */ 280 static int shmem_add_to_page_cache(struct page *page, 281 struct address_space *mapping, 282 pgoff_t index, gfp_t gfp, void *expected) 283 { 284 int error; 285 286 VM_BUG_ON(!PageLocked(page)); 287 VM_BUG_ON(!PageSwapBacked(page)); 288 289 page_cache_get(page); 290 page->mapping = mapping; 291 page->index = index; 292 293 spin_lock_irq(&mapping->tree_lock); 294 if (!expected) 295 error = radix_tree_insert(&mapping->page_tree, index, page); 296 else 297 error = shmem_radix_tree_replace(mapping, index, expected, 298 page); 299 if (!error) { 300 mapping->nrpages++; 301 __inc_zone_page_state(page, NR_FILE_PAGES); 302 __inc_zone_page_state(page, NR_SHMEM); 303 spin_unlock_irq(&mapping->tree_lock); 304 } else { 305 page->mapping = NULL; 306 spin_unlock_irq(&mapping->tree_lock); 307 page_cache_release(page); 308 } 309 return error; 310 } 311 312 /* 313 * Like delete_from_page_cache, but substitutes swap for page. 314 */ 315 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 316 { 317 struct address_space *mapping = page->mapping; 318 int error; 319 320 spin_lock_irq(&mapping->tree_lock); 321 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 322 page->mapping = NULL; 323 mapping->nrpages--; 324 __dec_zone_page_state(page, NR_FILE_PAGES); 325 __dec_zone_page_state(page, NR_SHMEM); 326 spin_unlock_irq(&mapping->tree_lock); 327 page_cache_release(page); 328 BUG_ON(error); 329 } 330 331 /* 332 * Like find_get_pages, but collecting swap entries as well as pages. 333 */ 334 static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, 335 pgoff_t start, unsigned int nr_pages, 336 struct page **pages, pgoff_t *indices) 337 { 338 unsigned int i; 339 unsigned int ret; 340 unsigned int nr_found; 341 342 rcu_read_lock(); 343 restart: 344 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 345 (void ***)pages, indices, start, nr_pages); 346 ret = 0; 347 for (i = 0; i < nr_found; i++) { 348 struct page *page; 349 repeat: 350 page = radix_tree_deref_slot((void **)pages[i]); 351 if (unlikely(!page)) 352 continue; 353 if (radix_tree_exception(page)) { 354 if (radix_tree_deref_retry(page)) 355 goto restart; 356 /* 357 * Otherwise, we must be storing a swap entry 358 * here as an exceptional entry: so return it 359 * without attempting to raise page count. 360 */ 361 goto export; 362 } 363 if (!page_cache_get_speculative(page)) 364 goto repeat; 365 366 /* Has the page moved? */ 367 if (unlikely(page != *((void **)pages[i]))) { 368 page_cache_release(page); 369 goto repeat; 370 } 371 export: 372 indices[ret] = indices[i]; 373 pages[ret] = page; 374 ret++; 375 } 376 if (unlikely(!ret && nr_found)) 377 goto restart; 378 rcu_read_unlock(); 379 return ret; 380 } 381 382 /* 383 * Remove swap entry from radix tree, free the swap and its page cache. 384 */ 385 static int shmem_free_swap(struct address_space *mapping, 386 pgoff_t index, void *radswap) 387 { 388 int error; 389 390 spin_lock_irq(&mapping->tree_lock); 391 error = shmem_radix_tree_replace(mapping, index, radswap, NULL); 392 spin_unlock_irq(&mapping->tree_lock); 393 if (!error) 394 free_swap_and_cache(radix_to_swp_entry(radswap)); 395 return error; 396 } 397 398 /* 399 * Pagevec may contain swap entries, so shuffle up pages before releasing. 400 */ 401 static void shmem_deswap_pagevec(struct pagevec *pvec) 402 { 403 int i, j; 404 405 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 406 struct page *page = pvec->pages[i]; 407 if (!radix_tree_exceptional_entry(page)) 408 pvec->pages[j++] = page; 409 } 410 pvec->nr = j; 411 } 412 413 /* 414 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 415 */ 416 void shmem_unlock_mapping(struct address_space *mapping) 417 { 418 struct pagevec pvec; 419 pgoff_t indices[PAGEVEC_SIZE]; 420 pgoff_t index = 0; 421 422 pagevec_init(&pvec, 0); 423 /* 424 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 425 */ 426 while (!mapping_unevictable(mapping)) { 427 /* 428 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 429 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 430 */ 431 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 432 PAGEVEC_SIZE, pvec.pages, indices); 433 if (!pvec.nr) 434 break; 435 index = indices[pvec.nr - 1] + 1; 436 shmem_deswap_pagevec(&pvec); 437 check_move_unevictable_pages(pvec.pages, pvec.nr); 438 pagevec_release(&pvec); 439 cond_resched(); 440 } 441 } 442 443 /* 444 * Remove range of pages and swap entries from radix tree, and free them. 445 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 446 */ 447 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 448 bool unfalloc) 449 { 450 struct address_space *mapping = inode->i_mapping; 451 struct shmem_inode_info *info = SHMEM_I(inode); 452 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 453 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 454 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 455 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 456 struct pagevec pvec; 457 pgoff_t indices[PAGEVEC_SIZE]; 458 long nr_swaps_freed = 0; 459 pgoff_t index; 460 int i; 461 462 if (lend == -1) 463 end = -1; /* unsigned, so actually very big */ 464 465 pagevec_init(&pvec, 0); 466 index = start; 467 while (index < end) { 468 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 469 min(end - index, (pgoff_t)PAGEVEC_SIZE), 470 pvec.pages, indices); 471 if (!pvec.nr) 472 break; 473 mem_cgroup_uncharge_start(); 474 for (i = 0; i < pagevec_count(&pvec); i++) { 475 struct page *page = pvec.pages[i]; 476 477 index = indices[i]; 478 if (index >= end) 479 break; 480 481 if (radix_tree_exceptional_entry(page)) { 482 if (unfalloc) 483 continue; 484 nr_swaps_freed += !shmem_free_swap(mapping, 485 index, page); 486 continue; 487 } 488 489 if (!trylock_page(page)) 490 continue; 491 if (!unfalloc || !PageUptodate(page)) { 492 if (page->mapping == mapping) { 493 VM_BUG_ON(PageWriteback(page)); 494 truncate_inode_page(mapping, page); 495 } 496 } 497 unlock_page(page); 498 } 499 shmem_deswap_pagevec(&pvec); 500 pagevec_release(&pvec); 501 mem_cgroup_uncharge_end(); 502 cond_resched(); 503 index++; 504 } 505 506 if (partial_start) { 507 struct page *page = NULL; 508 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 509 if (page) { 510 unsigned int top = PAGE_CACHE_SIZE; 511 if (start > end) { 512 top = partial_end; 513 partial_end = 0; 514 } 515 zero_user_segment(page, partial_start, top); 516 set_page_dirty(page); 517 unlock_page(page); 518 page_cache_release(page); 519 } 520 } 521 if (partial_end) { 522 struct page *page = NULL; 523 shmem_getpage(inode, end, &page, SGP_READ, NULL); 524 if (page) { 525 zero_user_segment(page, 0, partial_end); 526 set_page_dirty(page); 527 unlock_page(page); 528 page_cache_release(page); 529 } 530 } 531 if (start >= end) 532 return; 533 534 index = start; 535 for ( ; ; ) { 536 cond_resched(); 537 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 538 min(end - index, (pgoff_t)PAGEVEC_SIZE), 539 pvec.pages, indices); 540 if (!pvec.nr) { 541 if (index == start || unfalloc) 542 break; 543 index = start; 544 continue; 545 } 546 if ((index == start || unfalloc) && indices[0] >= end) { 547 shmem_deswap_pagevec(&pvec); 548 pagevec_release(&pvec); 549 break; 550 } 551 mem_cgroup_uncharge_start(); 552 for (i = 0; i < pagevec_count(&pvec); i++) { 553 struct page *page = pvec.pages[i]; 554 555 index = indices[i]; 556 if (index >= end) 557 break; 558 559 if (radix_tree_exceptional_entry(page)) { 560 if (unfalloc) 561 continue; 562 nr_swaps_freed += !shmem_free_swap(mapping, 563 index, page); 564 continue; 565 } 566 567 lock_page(page); 568 if (!unfalloc || !PageUptodate(page)) { 569 if (page->mapping == mapping) { 570 VM_BUG_ON(PageWriteback(page)); 571 truncate_inode_page(mapping, page); 572 } 573 } 574 unlock_page(page); 575 } 576 shmem_deswap_pagevec(&pvec); 577 pagevec_release(&pvec); 578 mem_cgroup_uncharge_end(); 579 index++; 580 } 581 582 spin_lock(&info->lock); 583 info->swapped -= nr_swaps_freed; 584 shmem_recalc_inode(inode); 585 spin_unlock(&info->lock); 586 } 587 588 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 589 { 590 shmem_undo_range(inode, lstart, lend, false); 591 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 592 } 593 EXPORT_SYMBOL_GPL(shmem_truncate_range); 594 595 static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 596 { 597 struct inode *inode = dentry->d_inode; 598 int error; 599 600 error = inode_change_ok(inode, attr); 601 if (error) 602 return error; 603 604 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 605 loff_t oldsize = inode->i_size; 606 loff_t newsize = attr->ia_size; 607 608 if (newsize != oldsize) { 609 i_size_write(inode, newsize); 610 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 611 } 612 if (newsize < oldsize) { 613 loff_t holebegin = round_up(newsize, PAGE_SIZE); 614 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 615 shmem_truncate_range(inode, newsize, (loff_t)-1); 616 /* unmap again to remove racily COWed private pages */ 617 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 618 } 619 } 620 621 setattr_copy(inode, attr); 622 #ifdef CONFIG_TMPFS_POSIX_ACL 623 if (attr->ia_valid & ATTR_MODE) 624 error = generic_acl_chmod(inode); 625 #endif 626 return error; 627 } 628 629 static void shmem_evict_inode(struct inode *inode) 630 { 631 struct shmem_inode_info *info = SHMEM_I(inode); 632 633 if (inode->i_mapping->a_ops == &shmem_aops) { 634 shmem_unacct_size(info->flags, inode->i_size); 635 inode->i_size = 0; 636 shmem_truncate_range(inode, 0, (loff_t)-1); 637 if (!list_empty(&info->swaplist)) { 638 mutex_lock(&shmem_swaplist_mutex); 639 list_del_init(&info->swaplist); 640 mutex_unlock(&shmem_swaplist_mutex); 641 } 642 } else 643 kfree(info->symlink); 644 645 simple_xattrs_free(&info->xattrs); 646 WARN_ON(inode->i_blocks); 647 shmem_free_inode(inode->i_sb); 648 clear_inode(inode); 649 } 650 651 /* 652 * If swap found in inode, free it and move page from swapcache to filecache. 653 */ 654 static int shmem_unuse_inode(struct shmem_inode_info *info, 655 swp_entry_t swap, struct page **pagep) 656 { 657 struct address_space *mapping = info->vfs_inode.i_mapping; 658 void *radswap; 659 pgoff_t index; 660 gfp_t gfp; 661 int error = 0; 662 663 radswap = swp_to_radix_entry(swap); 664 index = radix_tree_locate_item(&mapping->page_tree, radswap); 665 if (index == -1) 666 return 0; 667 668 /* 669 * Move _head_ to start search for next from here. 670 * But be careful: shmem_evict_inode checks list_empty without taking 671 * mutex, and there's an instant in list_move_tail when info->swaplist 672 * would appear empty, if it were the only one on shmem_swaplist. 673 */ 674 if (shmem_swaplist.next != &info->swaplist) 675 list_move_tail(&shmem_swaplist, &info->swaplist); 676 677 gfp = mapping_gfp_mask(mapping); 678 if (shmem_should_replace_page(*pagep, gfp)) { 679 mutex_unlock(&shmem_swaplist_mutex); 680 error = shmem_replace_page(pagep, gfp, info, index); 681 mutex_lock(&shmem_swaplist_mutex); 682 /* 683 * We needed to drop mutex to make that restrictive page 684 * allocation, but the inode might have been freed while we 685 * dropped it: although a racing shmem_evict_inode() cannot 686 * complete without emptying the radix_tree, our page lock 687 * on this swapcache page is not enough to prevent that - 688 * free_swap_and_cache() of our swap entry will only 689 * trylock_page(), removing swap from radix_tree whatever. 690 * 691 * We must not proceed to shmem_add_to_page_cache() if the 692 * inode has been freed, but of course we cannot rely on 693 * inode or mapping or info to check that. However, we can 694 * safely check if our swap entry is still in use (and here 695 * it can't have got reused for another page): if it's still 696 * in use, then the inode cannot have been freed yet, and we 697 * can safely proceed (if it's no longer in use, that tells 698 * nothing about the inode, but we don't need to unuse swap). 699 */ 700 if (!page_swapcount(*pagep)) 701 error = -ENOENT; 702 } 703 704 /* 705 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 706 * but also to hold up shmem_evict_inode(): so inode cannot be freed 707 * beneath us (pagelock doesn't help until the page is in pagecache). 708 */ 709 if (!error) 710 error = shmem_add_to_page_cache(*pagep, mapping, index, 711 GFP_NOWAIT, radswap); 712 if (error != -ENOMEM) { 713 /* 714 * Truncation and eviction use free_swap_and_cache(), which 715 * only does trylock page: if we raced, best clean up here. 716 */ 717 delete_from_swap_cache(*pagep); 718 set_page_dirty(*pagep); 719 if (!error) { 720 spin_lock(&info->lock); 721 info->swapped--; 722 spin_unlock(&info->lock); 723 swap_free(swap); 724 } 725 error = 1; /* not an error, but entry was found */ 726 } 727 return error; 728 } 729 730 /* 731 * Search through swapped inodes to find and replace swap by page. 732 */ 733 int shmem_unuse(swp_entry_t swap, struct page *page) 734 { 735 struct list_head *this, *next; 736 struct shmem_inode_info *info; 737 int found = 0; 738 int error = 0; 739 740 /* 741 * There's a faint possibility that swap page was replaced before 742 * caller locked it: caller will come back later with the right page. 743 */ 744 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 745 goto out; 746 747 /* 748 * Charge page using GFP_KERNEL while we can wait, before taking 749 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 750 * Charged back to the user (not to caller) when swap account is used. 751 */ 752 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 753 if (error) 754 goto out; 755 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 756 757 mutex_lock(&shmem_swaplist_mutex); 758 list_for_each_safe(this, next, &shmem_swaplist) { 759 info = list_entry(this, struct shmem_inode_info, swaplist); 760 if (info->swapped) 761 found = shmem_unuse_inode(info, swap, &page); 762 else 763 list_del_init(&info->swaplist); 764 cond_resched(); 765 if (found) 766 break; 767 } 768 mutex_unlock(&shmem_swaplist_mutex); 769 770 if (found < 0) 771 error = found; 772 out: 773 unlock_page(page); 774 page_cache_release(page); 775 return error; 776 } 777 778 /* 779 * Move the page from the page cache to the swap cache. 780 */ 781 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 782 { 783 struct shmem_inode_info *info; 784 struct address_space *mapping; 785 struct inode *inode; 786 swp_entry_t swap; 787 pgoff_t index; 788 789 BUG_ON(!PageLocked(page)); 790 mapping = page->mapping; 791 index = page->index; 792 inode = mapping->host; 793 info = SHMEM_I(inode); 794 if (info->flags & VM_LOCKED) 795 goto redirty; 796 if (!total_swap_pages) 797 goto redirty; 798 799 /* 800 * shmem_backing_dev_info's capabilities prevent regular writeback or 801 * sync from ever calling shmem_writepage; but a stacking filesystem 802 * might use ->writepage of its underlying filesystem, in which case 803 * tmpfs should write out to swap only in response to memory pressure, 804 * and not for the writeback threads or sync. 805 */ 806 if (!wbc->for_reclaim) { 807 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 808 goto redirty; 809 } 810 811 /* 812 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 813 * value into swapfile.c, the only way we can correctly account for a 814 * fallocated page arriving here is now to initialize it and write it. 815 * 816 * That's okay for a page already fallocated earlier, but if we have 817 * not yet completed the fallocation, then (a) we want to keep track 818 * of this page in case we have to undo it, and (b) it may not be a 819 * good idea to continue anyway, once we're pushing into swap. So 820 * reactivate the page, and let shmem_fallocate() quit when too many. 821 */ 822 if (!PageUptodate(page)) { 823 if (inode->i_private) { 824 struct shmem_falloc *shmem_falloc; 825 spin_lock(&inode->i_lock); 826 shmem_falloc = inode->i_private; 827 if (shmem_falloc && 828 index >= shmem_falloc->start && 829 index < shmem_falloc->next) 830 shmem_falloc->nr_unswapped++; 831 else 832 shmem_falloc = NULL; 833 spin_unlock(&inode->i_lock); 834 if (shmem_falloc) 835 goto redirty; 836 } 837 clear_highpage(page); 838 flush_dcache_page(page); 839 SetPageUptodate(page); 840 } 841 842 swap = get_swap_page(); 843 if (!swap.val) 844 goto redirty; 845 846 /* 847 * Add inode to shmem_unuse()'s list of swapped-out inodes, 848 * if it's not already there. Do it now before the page is 849 * moved to swap cache, when its pagelock no longer protects 850 * the inode from eviction. But don't unlock the mutex until 851 * we've incremented swapped, because shmem_unuse_inode() will 852 * prune a !swapped inode from the swaplist under this mutex. 853 */ 854 mutex_lock(&shmem_swaplist_mutex); 855 if (list_empty(&info->swaplist)) 856 list_add_tail(&info->swaplist, &shmem_swaplist); 857 858 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 859 swap_shmem_alloc(swap); 860 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 861 862 spin_lock(&info->lock); 863 info->swapped++; 864 shmem_recalc_inode(inode); 865 spin_unlock(&info->lock); 866 867 mutex_unlock(&shmem_swaplist_mutex); 868 BUG_ON(page_mapped(page)); 869 swap_writepage(page, wbc); 870 return 0; 871 } 872 873 mutex_unlock(&shmem_swaplist_mutex); 874 swapcache_free(swap, NULL); 875 redirty: 876 set_page_dirty(page); 877 if (wbc->for_reclaim) 878 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 879 unlock_page(page); 880 return 0; 881 } 882 883 #ifdef CONFIG_NUMA 884 #ifdef CONFIG_TMPFS 885 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 886 { 887 char buffer[64]; 888 889 if (!mpol || mpol->mode == MPOL_DEFAULT) 890 return; /* show nothing */ 891 892 mpol_to_str(buffer, sizeof(buffer), mpol, 1); 893 894 seq_printf(seq, ",mpol=%s", buffer); 895 } 896 897 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 898 { 899 struct mempolicy *mpol = NULL; 900 if (sbinfo->mpol) { 901 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 902 mpol = sbinfo->mpol; 903 mpol_get(mpol); 904 spin_unlock(&sbinfo->stat_lock); 905 } 906 return mpol; 907 } 908 #endif /* CONFIG_TMPFS */ 909 910 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 911 struct shmem_inode_info *info, pgoff_t index) 912 { 913 struct mempolicy mpol, *spol; 914 struct vm_area_struct pvma; 915 916 spol = mpol_cond_copy(&mpol, 917 mpol_shared_policy_lookup(&info->policy, index)); 918 919 /* Create a pseudo vma that just contains the policy */ 920 pvma.vm_start = 0; 921 /* Bias interleave by inode number to distribute better across nodes */ 922 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 923 pvma.vm_ops = NULL; 924 pvma.vm_policy = spol; 925 return swapin_readahead(swap, gfp, &pvma, 0); 926 } 927 928 static struct page *shmem_alloc_page(gfp_t gfp, 929 struct shmem_inode_info *info, pgoff_t index) 930 { 931 struct vm_area_struct pvma; 932 933 /* Create a pseudo vma that just contains the policy */ 934 pvma.vm_start = 0; 935 /* Bias interleave by inode number to distribute better across nodes */ 936 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 937 pvma.vm_ops = NULL; 938 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 939 940 /* 941 * alloc_page_vma() will drop the shared policy reference 942 */ 943 return alloc_page_vma(gfp, &pvma, 0); 944 } 945 #else /* !CONFIG_NUMA */ 946 #ifdef CONFIG_TMPFS 947 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 948 { 949 } 950 #endif /* CONFIG_TMPFS */ 951 952 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 953 struct shmem_inode_info *info, pgoff_t index) 954 { 955 return swapin_readahead(swap, gfp, NULL, 0); 956 } 957 958 static inline struct page *shmem_alloc_page(gfp_t gfp, 959 struct shmem_inode_info *info, pgoff_t index) 960 { 961 return alloc_page(gfp); 962 } 963 #endif /* CONFIG_NUMA */ 964 965 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 966 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 967 { 968 return NULL; 969 } 970 #endif 971 972 /* 973 * When a page is moved from swapcache to shmem filecache (either by the 974 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 975 * shmem_unuse_inode()), it may have been read in earlier from swap, in 976 * ignorance of the mapping it belongs to. If that mapping has special 977 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 978 * we may need to copy to a suitable page before moving to filecache. 979 * 980 * In a future release, this may well be extended to respect cpuset and 981 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 982 * but for now it is a simple matter of zone. 983 */ 984 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 985 { 986 return page_zonenum(page) > gfp_zone(gfp); 987 } 988 989 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 990 struct shmem_inode_info *info, pgoff_t index) 991 { 992 struct page *oldpage, *newpage; 993 struct address_space *swap_mapping; 994 pgoff_t swap_index; 995 int error; 996 997 oldpage = *pagep; 998 swap_index = page_private(oldpage); 999 swap_mapping = page_mapping(oldpage); 1000 1001 /* 1002 * We have arrived here because our zones are constrained, so don't 1003 * limit chance of success by further cpuset and node constraints. 1004 */ 1005 gfp &= ~GFP_CONSTRAINT_MASK; 1006 newpage = shmem_alloc_page(gfp, info, index); 1007 if (!newpage) 1008 return -ENOMEM; 1009 1010 page_cache_get(newpage); 1011 copy_highpage(newpage, oldpage); 1012 flush_dcache_page(newpage); 1013 1014 __set_page_locked(newpage); 1015 SetPageUptodate(newpage); 1016 SetPageSwapBacked(newpage); 1017 set_page_private(newpage, swap_index); 1018 SetPageSwapCache(newpage); 1019 1020 /* 1021 * Our caller will very soon move newpage out of swapcache, but it's 1022 * a nice clean interface for us to replace oldpage by newpage there. 1023 */ 1024 spin_lock_irq(&swap_mapping->tree_lock); 1025 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1026 newpage); 1027 if (!error) { 1028 __inc_zone_page_state(newpage, NR_FILE_PAGES); 1029 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1030 } 1031 spin_unlock_irq(&swap_mapping->tree_lock); 1032 1033 if (unlikely(error)) { 1034 /* 1035 * Is this possible? I think not, now that our callers check 1036 * both PageSwapCache and page_private after getting page lock; 1037 * but be defensive. Reverse old to newpage for clear and free. 1038 */ 1039 oldpage = newpage; 1040 } else { 1041 mem_cgroup_replace_page_cache(oldpage, newpage); 1042 lru_cache_add_anon(newpage); 1043 *pagep = newpage; 1044 } 1045 1046 ClearPageSwapCache(oldpage); 1047 set_page_private(oldpage, 0); 1048 1049 unlock_page(oldpage); 1050 page_cache_release(oldpage); 1051 page_cache_release(oldpage); 1052 return error; 1053 } 1054 1055 /* 1056 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1057 * 1058 * If we allocate a new one we do not mark it dirty. That's up to the 1059 * vm. If we swap it in we mark it dirty since we also free the swap 1060 * entry since a page cannot live in both the swap and page cache 1061 */ 1062 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1063 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1064 { 1065 struct address_space *mapping = inode->i_mapping; 1066 struct shmem_inode_info *info; 1067 struct shmem_sb_info *sbinfo; 1068 struct page *page; 1069 swp_entry_t swap; 1070 int error; 1071 int once = 0; 1072 int alloced = 0; 1073 1074 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1075 return -EFBIG; 1076 repeat: 1077 swap.val = 0; 1078 page = find_lock_page(mapping, index); 1079 if (radix_tree_exceptional_entry(page)) { 1080 swap = radix_to_swp_entry(page); 1081 page = NULL; 1082 } 1083 1084 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1085 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1086 error = -EINVAL; 1087 goto failed; 1088 } 1089 1090 /* fallocated page? */ 1091 if (page && !PageUptodate(page)) { 1092 if (sgp != SGP_READ) 1093 goto clear; 1094 unlock_page(page); 1095 page_cache_release(page); 1096 page = NULL; 1097 } 1098 if (page || (sgp == SGP_READ && !swap.val)) { 1099 *pagep = page; 1100 return 0; 1101 } 1102 1103 /* 1104 * Fast cache lookup did not find it: 1105 * bring it back from swap or allocate. 1106 */ 1107 info = SHMEM_I(inode); 1108 sbinfo = SHMEM_SB(inode->i_sb); 1109 1110 if (swap.val) { 1111 /* Look it up and read it in.. */ 1112 page = lookup_swap_cache(swap); 1113 if (!page) { 1114 /* here we actually do the io */ 1115 if (fault_type) 1116 *fault_type |= VM_FAULT_MAJOR; 1117 page = shmem_swapin(swap, gfp, info, index); 1118 if (!page) { 1119 error = -ENOMEM; 1120 goto failed; 1121 } 1122 } 1123 1124 /* We have to do this with page locked to prevent races */ 1125 lock_page(page); 1126 if (!PageSwapCache(page) || page_private(page) != swap.val || 1127 !shmem_confirm_swap(mapping, index, swap)) { 1128 error = -EEXIST; /* try again */ 1129 goto unlock; 1130 } 1131 if (!PageUptodate(page)) { 1132 error = -EIO; 1133 goto failed; 1134 } 1135 wait_on_page_writeback(page); 1136 1137 if (shmem_should_replace_page(page, gfp)) { 1138 error = shmem_replace_page(&page, gfp, info, index); 1139 if (error) 1140 goto failed; 1141 } 1142 1143 error = mem_cgroup_cache_charge(page, current->mm, 1144 gfp & GFP_RECLAIM_MASK); 1145 if (!error) { 1146 error = shmem_add_to_page_cache(page, mapping, index, 1147 gfp, swp_to_radix_entry(swap)); 1148 /* 1149 * We already confirmed swap under page lock, and make 1150 * no memory allocation here, so usually no possibility 1151 * of error; but free_swap_and_cache() only trylocks a 1152 * page, so it is just possible that the entry has been 1153 * truncated or holepunched since swap was confirmed. 1154 * shmem_undo_range() will have done some of the 1155 * unaccounting, now delete_from_swap_cache() will do 1156 * the rest (including mem_cgroup_uncharge_swapcache). 1157 * Reset swap.val? No, leave it so "failed" goes back to 1158 * "repeat": reading a hole and writing should succeed. 1159 */ 1160 if (error) 1161 delete_from_swap_cache(page); 1162 } 1163 if (error) 1164 goto failed; 1165 1166 spin_lock(&info->lock); 1167 info->swapped--; 1168 shmem_recalc_inode(inode); 1169 spin_unlock(&info->lock); 1170 1171 delete_from_swap_cache(page); 1172 set_page_dirty(page); 1173 swap_free(swap); 1174 1175 } else { 1176 if (shmem_acct_block(info->flags)) { 1177 error = -ENOSPC; 1178 goto failed; 1179 } 1180 if (sbinfo->max_blocks) { 1181 if (percpu_counter_compare(&sbinfo->used_blocks, 1182 sbinfo->max_blocks) >= 0) { 1183 error = -ENOSPC; 1184 goto unacct; 1185 } 1186 percpu_counter_inc(&sbinfo->used_blocks); 1187 } 1188 1189 page = shmem_alloc_page(gfp, info, index); 1190 if (!page) { 1191 error = -ENOMEM; 1192 goto decused; 1193 } 1194 1195 SetPageSwapBacked(page); 1196 __set_page_locked(page); 1197 error = mem_cgroup_cache_charge(page, current->mm, 1198 gfp & GFP_RECLAIM_MASK); 1199 if (error) 1200 goto decused; 1201 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); 1202 if (!error) { 1203 error = shmem_add_to_page_cache(page, mapping, index, 1204 gfp, NULL); 1205 radix_tree_preload_end(); 1206 } 1207 if (error) { 1208 mem_cgroup_uncharge_cache_page(page); 1209 goto decused; 1210 } 1211 lru_cache_add_anon(page); 1212 1213 spin_lock(&info->lock); 1214 info->alloced++; 1215 inode->i_blocks += BLOCKS_PER_PAGE; 1216 shmem_recalc_inode(inode); 1217 spin_unlock(&info->lock); 1218 alloced = true; 1219 1220 /* 1221 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1222 */ 1223 if (sgp == SGP_FALLOC) 1224 sgp = SGP_WRITE; 1225 clear: 1226 /* 1227 * Let SGP_WRITE caller clear ends if write does not fill page; 1228 * but SGP_FALLOC on a page fallocated earlier must initialize 1229 * it now, lest undo on failure cancel our earlier guarantee. 1230 */ 1231 if (sgp != SGP_WRITE) { 1232 clear_highpage(page); 1233 flush_dcache_page(page); 1234 SetPageUptodate(page); 1235 } 1236 if (sgp == SGP_DIRTY) 1237 set_page_dirty(page); 1238 } 1239 1240 /* Perhaps the file has been truncated since we checked */ 1241 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1242 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1243 error = -EINVAL; 1244 if (alloced) 1245 goto trunc; 1246 else 1247 goto failed; 1248 } 1249 *pagep = page; 1250 return 0; 1251 1252 /* 1253 * Error recovery. 1254 */ 1255 trunc: 1256 info = SHMEM_I(inode); 1257 ClearPageDirty(page); 1258 delete_from_page_cache(page); 1259 spin_lock(&info->lock); 1260 info->alloced--; 1261 inode->i_blocks -= BLOCKS_PER_PAGE; 1262 spin_unlock(&info->lock); 1263 decused: 1264 sbinfo = SHMEM_SB(inode->i_sb); 1265 if (sbinfo->max_blocks) 1266 percpu_counter_add(&sbinfo->used_blocks, -1); 1267 unacct: 1268 shmem_unacct_blocks(info->flags, 1); 1269 failed: 1270 if (swap.val && error != -EINVAL && 1271 !shmem_confirm_swap(mapping, index, swap)) 1272 error = -EEXIST; 1273 unlock: 1274 if (page) { 1275 unlock_page(page); 1276 page_cache_release(page); 1277 } 1278 if (error == -ENOSPC && !once++) { 1279 info = SHMEM_I(inode); 1280 spin_lock(&info->lock); 1281 shmem_recalc_inode(inode); 1282 spin_unlock(&info->lock); 1283 goto repeat; 1284 } 1285 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1286 goto repeat; 1287 return error; 1288 } 1289 1290 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1291 { 1292 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1293 int error; 1294 int ret = VM_FAULT_LOCKED; 1295 1296 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1297 if (error) 1298 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1299 1300 if (ret & VM_FAULT_MAJOR) { 1301 count_vm_event(PGMAJFAULT); 1302 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1303 } 1304 return ret; 1305 } 1306 1307 #ifdef CONFIG_NUMA 1308 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1309 { 1310 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1311 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1312 } 1313 1314 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1315 unsigned long addr) 1316 { 1317 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1318 pgoff_t index; 1319 1320 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1321 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1322 } 1323 #endif 1324 1325 int shmem_lock(struct file *file, int lock, struct user_struct *user) 1326 { 1327 struct inode *inode = file->f_path.dentry->d_inode; 1328 struct shmem_inode_info *info = SHMEM_I(inode); 1329 int retval = -ENOMEM; 1330 1331 spin_lock(&info->lock); 1332 if (lock && !(info->flags & VM_LOCKED)) { 1333 if (!user_shm_lock(inode->i_size, user)) 1334 goto out_nomem; 1335 info->flags |= VM_LOCKED; 1336 mapping_set_unevictable(file->f_mapping); 1337 } 1338 if (!lock && (info->flags & VM_LOCKED) && user) { 1339 user_shm_unlock(inode->i_size, user); 1340 info->flags &= ~VM_LOCKED; 1341 mapping_clear_unevictable(file->f_mapping); 1342 } 1343 retval = 0; 1344 1345 out_nomem: 1346 spin_unlock(&info->lock); 1347 return retval; 1348 } 1349 1350 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1351 { 1352 file_accessed(file); 1353 vma->vm_ops = &shmem_vm_ops; 1354 return 0; 1355 } 1356 1357 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1358 umode_t mode, dev_t dev, unsigned long flags) 1359 { 1360 struct inode *inode; 1361 struct shmem_inode_info *info; 1362 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1363 1364 if (shmem_reserve_inode(sb)) 1365 return NULL; 1366 1367 inode = new_inode(sb); 1368 if (inode) { 1369 inode->i_ino = get_next_ino(); 1370 inode_init_owner(inode, dir, mode); 1371 inode->i_blocks = 0; 1372 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1373 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1374 inode->i_generation = get_seconds(); 1375 info = SHMEM_I(inode); 1376 memset(info, 0, (char *)inode - (char *)info); 1377 spin_lock_init(&info->lock); 1378 info->flags = flags & VM_NORESERVE; 1379 INIT_LIST_HEAD(&info->swaplist); 1380 simple_xattrs_init(&info->xattrs); 1381 cache_no_acl(inode); 1382 1383 switch (mode & S_IFMT) { 1384 default: 1385 inode->i_op = &shmem_special_inode_operations; 1386 init_special_inode(inode, mode, dev); 1387 break; 1388 case S_IFREG: 1389 inode->i_mapping->a_ops = &shmem_aops; 1390 inode->i_op = &shmem_inode_operations; 1391 inode->i_fop = &shmem_file_operations; 1392 mpol_shared_policy_init(&info->policy, 1393 shmem_get_sbmpol(sbinfo)); 1394 break; 1395 case S_IFDIR: 1396 inc_nlink(inode); 1397 /* Some things misbehave if size == 0 on a directory */ 1398 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1399 inode->i_op = &shmem_dir_inode_operations; 1400 inode->i_fop = &simple_dir_operations; 1401 break; 1402 case S_IFLNK: 1403 /* 1404 * Must not load anything in the rbtree, 1405 * mpol_free_shared_policy will not be called. 1406 */ 1407 mpol_shared_policy_init(&info->policy, NULL); 1408 break; 1409 } 1410 } else 1411 shmem_free_inode(sb); 1412 return inode; 1413 } 1414 1415 #ifdef CONFIG_TMPFS 1416 static const struct inode_operations shmem_symlink_inode_operations; 1417 static const struct inode_operations shmem_short_symlink_operations; 1418 1419 #ifdef CONFIG_TMPFS_XATTR 1420 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1421 #else 1422 #define shmem_initxattrs NULL 1423 #endif 1424 1425 static int 1426 shmem_write_begin(struct file *file, struct address_space *mapping, 1427 loff_t pos, unsigned len, unsigned flags, 1428 struct page **pagep, void **fsdata) 1429 { 1430 struct inode *inode = mapping->host; 1431 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1432 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1433 } 1434 1435 static int 1436 shmem_write_end(struct file *file, struct address_space *mapping, 1437 loff_t pos, unsigned len, unsigned copied, 1438 struct page *page, void *fsdata) 1439 { 1440 struct inode *inode = mapping->host; 1441 1442 if (pos + copied > inode->i_size) 1443 i_size_write(inode, pos + copied); 1444 1445 if (!PageUptodate(page)) { 1446 if (copied < PAGE_CACHE_SIZE) { 1447 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1448 zero_user_segments(page, 0, from, 1449 from + copied, PAGE_CACHE_SIZE); 1450 } 1451 SetPageUptodate(page); 1452 } 1453 set_page_dirty(page); 1454 unlock_page(page); 1455 page_cache_release(page); 1456 1457 return copied; 1458 } 1459 1460 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1461 { 1462 struct inode *inode = filp->f_path.dentry->d_inode; 1463 struct address_space *mapping = inode->i_mapping; 1464 pgoff_t index; 1465 unsigned long offset; 1466 enum sgp_type sgp = SGP_READ; 1467 1468 /* 1469 * Might this read be for a stacking filesystem? Then when reading 1470 * holes of a sparse file, we actually need to allocate those pages, 1471 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1472 */ 1473 if (segment_eq(get_fs(), KERNEL_DS)) 1474 sgp = SGP_DIRTY; 1475 1476 index = *ppos >> PAGE_CACHE_SHIFT; 1477 offset = *ppos & ~PAGE_CACHE_MASK; 1478 1479 for (;;) { 1480 struct page *page = NULL; 1481 pgoff_t end_index; 1482 unsigned long nr, ret; 1483 loff_t i_size = i_size_read(inode); 1484 1485 end_index = i_size >> PAGE_CACHE_SHIFT; 1486 if (index > end_index) 1487 break; 1488 if (index == end_index) { 1489 nr = i_size & ~PAGE_CACHE_MASK; 1490 if (nr <= offset) 1491 break; 1492 } 1493 1494 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1495 if (desc->error) { 1496 if (desc->error == -EINVAL) 1497 desc->error = 0; 1498 break; 1499 } 1500 if (page) 1501 unlock_page(page); 1502 1503 /* 1504 * We must evaluate after, since reads (unlike writes) 1505 * are called without i_mutex protection against truncate 1506 */ 1507 nr = PAGE_CACHE_SIZE; 1508 i_size = i_size_read(inode); 1509 end_index = i_size >> PAGE_CACHE_SHIFT; 1510 if (index == end_index) { 1511 nr = i_size & ~PAGE_CACHE_MASK; 1512 if (nr <= offset) { 1513 if (page) 1514 page_cache_release(page); 1515 break; 1516 } 1517 } 1518 nr -= offset; 1519 1520 if (page) { 1521 /* 1522 * If users can be writing to this page using arbitrary 1523 * virtual addresses, take care about potential aliasing 1524 * before reading the page on the kernel side. 1525 */ 1526 if (mapping_writably_mapped(mapping)) 1527 flush_dcache_page(page); 1528 /* 1529 * Mark the page accessed if we read the beginning. 1530 */ 1531 if (!offset) 1532 mark_page_accessed(page); 1533 } else { 1534 page = ZERO_PAGE(0); 1535 page_cache_get(page); 1536 } 1537 1538 /* 1539 * Ok, we have the page, and it's up-to-date, so 1540 * now we can copy it to user space... 1541 * 1542 * The actor routine returns how many bytes were actually used.. 1543 * NOTE! This may not be the same as how much of a user buffer 1544 * we filled up (we may be padding etc), so we can only update 1545 * "pos" here (the actor routine has to update the user buffer 1546 * pointers and the remaining count). 1547 */ 1548 ret = actor(desc, page, offset, nr); 1549 offset += ret; 1550 index += offset >> PAGE_CACHE_SHIFT; 1551 offset &= ~PAGE_CACHE_MASK; 1552 1553 page_cache_release(page); 1554 if (ret != nr || !desc->count) 1555 break; 1556 1557 cond_resched(); 1558 } 1559 1560 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1561 file_accessed(filp); 1562 } 1563 1564 static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1565 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1566 { 1567 struct file *filp = iocb->ki_filp; 1568 ssize_t retval; 1569 unsigned long seg; 1570 size_t count; 1571 loff_t *ppos = &iocb->ki_pos; 1572 1573 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1574 if (retval) 1575 return retval; 1576 1577 for (seg = 0; seg < nr_segs; seg++) { 1578 read_descriptor_t desc; 1579 1580 desc.written = 0; 1581 desc.arg.buf = iov[seg].iov_base; 1582 desc.count = iov[seg].iov_len; 1583 if (desc.count == 0) 1584 continue; 1585 desc.error = 0; 1586 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1587 retval += desc.written; 1588 if (desc.error) { 1589 retval = retval ?: desc.error; 1590 break; 1591 } 1592 if (desc.count > 0) 1593 break; 1594 } 1595 return retval; 1596 } 1597 1598 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1599 struct pipe_inode_info *pipe, size_t len, 1600 unsigned int flags) 1601 { 1602 struct address_space *mapping = in->f_mapping; 1603 struct inode *inode = mapping->host; 1604 unsigned int loff, nr_pages, req_pages; 1605 struct page *pages[PIPE_DEF_BUFFERS]; 1606 struct partial_page partial[PIPE_DEF_BUFFERS]; 1607 struct page *page; 1608 pgoff_t index, end_index; 1609 loff_t isize, left; 1610 int error, page_nr; 1611 struct splice_pipe_desc spd = { 1612 .pages = pages, 1613 .partial = partial, 1614 .nr_pages_max = PIPE_DEF_BUFFERS, 1615 .flags = flags, 1616 .ops = &page_cache_pipe_buf_ops, 1617 .spd_release = spd_release_page, 1618 }; 1619 1620 isize = i_size_read(inode); 1621 if (unlikely(*ppos >= isize)) 1622 return 0; 1623 1624 left = isize - *ppos; 1625 if (unlikely(left < len)) 1626 len = left; 1627 1628 if (splice_grow_spd(pipe, &spd)) 1629 return -ENOMEM; 1630 1631 index = *ppos >> PAGE_CACHE_SHIFT; 1632 loff = *ppos & ~PAGE_CACHE_MASK; 1633 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1634 nr_pages = min(req_pages, pipe->buffers); 1635 1636 spd.nr_pages = find_get_pages_contig(mapping, index, 1637 nr_pages, spd.pages); 1638 index += spd.nr_pages; 1639 error = 0; 1640 1641 while (spd.nr_pages < nr_pages) { 1642 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1643 if (error) 1644 break; 1645 unlock_page(page); 1646 spd.pages[spd.nr_pages++] = page; 1647 index++; 1648 } 1649 1650 index = *ppos >> PAGE_CACHE_SHIFT; 1651 nr_pages = spd.nr_pages; 1652 spd.nr_pages = 0; 1653 1654 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1655 unsigned int this_len; 1656 1657 if (!len) 1658 break; 1659 1660 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1661 page = spd.pages[page_nr]; 1662 1663 if (!PageUptodate(page) || page->mapping != mapping) { 1664 error = shmem_getpage(inode, index, &page, 1665 SGP_CACHE, NULL); 1666 if (error) 1667 break; 1668 unlock_page(page); 1669 page_cache_release(spd.pages[page_nr]); 1670 spd.pages[page_nr] = page; 1671 } 1672 1673 isize = i_size_read(inode); 1674 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1675 if (unlikely(!isize || index > end_index)) 1676 break; 1677 1678 if (end_index == index) { 1679 unsigned int plen; 1680 1681 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1682 if (plen <= loff) 1683 break; 1684 1685 this_len = min(this_len, plen - loff); 1686 len = this_len; 1687 } 1688 1689 spd.partial[page_nr].offset = loff; 1690 spd.partial[page_nr].len = this_len; 1691 len -= this_len; 1692 loff = 0; 1693 spd.nr_pages++; 1694 index++; 1695 } 1696 1697 while (page_nr < nr_pages) 1698 page_cache_release(spd.pages[page_nr++]); 1699 1700 if (spd.nr_pages) 1701 error = splice_to_pipe(pipe, &spd); 1702 1703 splice_shrink_spd(&spd); 1704 1705 if (error > 0) { 1706 *ppos += error; 1707 file_accessed(in); 1708 } 1709 return error; 1710 } 1711 1712 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1713 loff_t len) 1714 { 1715 struct inode *inode = file->f_path.dentry->d_inode; 1716 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1717 struct shmem_falloc shmem_falloc; 1718 pgoff_t start, index, end; 1719 int error; 1720 1721 mutex_lock(&inode->i_mutex); 1722 1723 if (mode & FALLOC_FL_PUNCH_HOLE) { 1724 struct address_space *mapping = file->f_mapping; 1725 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1726 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1727 1728 if ((u64)unmap_end > (u64)unmap_start) 1729 unmap_mapping_range(mapping, unmap_start, 1730 1 + unmap_end - unmap_start, 0); 1731 shmem_truncate_range(inode, offset, offset + len - 1); 1732 /* No need to unmap again: hole-punching leaves COWed pages */ 1733 error = 0; 1734 goto out; 1735 } 1736 1737 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1738 error = inode_newsize_ok(inode, offset + len); 1739 if (error) 1740 goto out; 1741 1742 start = offset >> PAGE_CACHE_SHIFT; 1743 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1744 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1745 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1746 error = -ENOSPC; 1747 goto out; 1748 } 1749 1750 shmem_falloc.start = start; 1751 shmem_falloc.next = start; 1752 shmem_falloc.nr_falloced = 0; 1753 shmem_falloc.nr_unswapped = 0; 1754 spin_lock(&inode->i_lock); 1755 inode->i_private = &shmem_falloc; 1756 spin_unlock(&inode->i_lock); 1757 1758 for (index = start; index < end; index++) { 1759 struct page *page; 1760 1761 /* 1762 * Good, the fallocate(2) manpage permits EINTR: we may have 1763 * been interrupted because we are using up too much memory. 1764 */ 1765 if (signal_pending(current)) 1766 error = -EINTR; 1767 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1768 error = -ENOMEM; 1769 else 1770 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1771 NULL); 1772 if (error) { 1773 /* Remove the !PageUptodate pages we added */ 1774 shmem_undo_range(inode, 1775 (loff_t)start << PAGE_CACHE_SHIFT, 1776 (loff_t)index << PAGE_CACHE_SHIFT, true); 1777 goto undone; 1778 } 1779 1780 /* 1781 * Inform shmem_writepage() how far we have reached. 1782 * No need for lock or barrier: we have the page lock. 1783 */ 1784 shmem_falloc.next++; 1785 if (!PageUptodate(page)) 1786 shmem_falloc.nr_falloced++; 1787 1788 /* 1789 * If !PageUptodate, leave it that way so that freeable pages 1790 * can be recognized if we need to rollback on error later. 1791 * But set_page_dirty so that memory pressure will swap rather 1792 * than free the pages we are allocating (and SGP_CACHE pages 1793 * might still be clean: we now need to mark those dirty too). 1794 */ 1795 set_page_dirty(page); 1796 unlock_page(page); 1797 page_cache_release(page); 1798 cond_resched(); 1799 } 1800 1801 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1802 i_size_write(inode, offset + len); 1803 inode->i_ctime = CURRENT_TIME; 1804 undone: 1805 spin_lock(&inode->i_lock); 1806 inode->i_private = NULL; 1807 spin_unlock(&inode->i_lock); 1808 out: 1809 mutex_unlock(&inode->i_mutex); 1810 return error; 1811 } 1812 1813 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1814 { 1815 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1816 1817 buf->f_type = TMPFS_MAGIC; 1818 buf->f_bsize = PAGE_CACHE_SIZE; 1819 buf->f_namelen = NAME_MAX; 1820 if (sbinfo->max_blocks) { 1821 buf->f_blocks = sbinfo->max_blocks; 1822 buf->f_bavail = 1823 buf->f_bfree = sbinfo->max_blocks - 1824 percpu_counter_sum(&sbinfo->used_blocks); 1825 } 1826 if (sbinfo->max_inodes) { 1827 buf->f_files = sbinfo->max_inodes; 1828 buf->f_ffree = sbinfo->free_inodes; 1829 } 1830 /* else leave those fields 0 like simple_statfs */ 1831 return 0; 1832 } 1833 1834 /* 1835 * File creation. Allocate an inode, and we're done.. 1836 */ 1837 static int 1838 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1839 { 1840 struct inode *inode; 1841 int error = -ENOSPC; 1842 1843 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1844 if (inode) { 1845 error = security_inode_init_security(inode, dir, 1846 &dentry->d_name, 1847 shmem_initxattrs, NULL); 1848 if (error) { 1849 if (error != -EOPNOTSUPP) { 1850 iput(inode); 1851 return error; 1852 } 1853 } 1854 #ifdef CONFIG_TMPFS_POSIX_ACL 1855 error = generic_acl_init(inode, dir); 1856 if (error) { 1857 iput(inode); 1858 return error; 1859 } 1860 #else 1861 error = 0; 1862 #endif 1863 dir->i_size += BOGO_DIRENT_SIZE; 1864 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1865 d_instantiate(dentry, inode); 1866 dget(dentry); /* Extra count - pin the dentry in core */ 1867 } 1868 return error; 1869 } 1870 1871 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1872 { 1873 int error; 1874 1875 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1876 return error; 1877 inc_nlink(dir); 1878 return 0; 1879 } 1880 1881 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 1882 bool excl) 1883 { 1884 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1885 } 1886 1887 /* 1888 * Link a file.. 1889 */ 1890 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1891 { 1892 struct inode *inode = old_dentry->d_inode; 1893 int ret; 1894 1895 /* 1896 * No ordinary (disk based) filesystem counts links as inodes; 1897 * but each new link needs a new dentry, pinning lowmem, and 1898 * tmpfs dentries cannot be pruned until they are unlinked. 1899 */ 1900 ret = shmem_reserve_inode(inode->i_sb); 1901 if (ret) 1902 goto out; 1903 1904 dir->i_size += BOGO_DIRENT_SIZE; 1905 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1906 inc_nlink(inode); 1907 ihold(inode); /* New dentry reference */ 1908 dget(dentry); /* Extra pinning count for the created dentry */ 1909 d_instantiate(dentry, inode); 1910 out: 1911 return ret; 1912 } 1913 1914 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1915 { 1916 struct inode *inode = dentry->d_inode; 1917 1918 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 1919 shmem_free_inode(inode->i_sb); 1920 1921 dir->i_size -= BOGO_DIRENT_SIZE; 1922 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1923 drop_nlink(inode); 1924 dput(dentry); /* Undo the count from "create" - this does all the work */ 1925 return 0; 1926 } 1927 1928 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1929 { 1930 if (!simple_empty(dentry)) 1931 return -ENOTEMPTY; 1932 1933 drop_nlink(dentry->d_inode); 1934 drop_nlink(dir); 1935 return shmem_unlink(dir, dentry); 1936 } 1937 1938 /* 1939 * The VFS layer already does all the dentry stuff for rename, 1940 * we just have to decrement the usage count for the target if 1941 * it exists so that the VFS layer correctly free's it when it 1942 * gets overwritten. 1943 */ 1944 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1945 { 1946 struct inode *inode = old_dentry->d_inode; 1947 int they_are_dirs = S_ISDIR(inode->i_mode); 1948 1949 if (!simple_empty(new_dentry)) 1950 return -ENOTEMPTY; 1951 1952 if (new_dentry->d_inode) { 1953 (void) shmem_unlink(new_dir, new_dentry); 1954 if (they_are_dirs) 1955 drop_nlink(old_dir); 1956 } else if (they_are_dirs) { 1957 drop_nlink(old_dir); 1958 inc_nlink(new_dir); 1959 } 1960 1961 old_dir->i_size -= BOGO_DIRENT_SIZE; 1962 new_dir->i_size += BOGO_DIRENT_SIZE; 1963 old_dir->i_ctime = old_dir->i_mtime = 1964 new_dir->i_ctime = new_dir->i_mtime = 1965 inode->i_ctime = CURRENT_TIME; 1966 return 0; 1967 } 1968 1969 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1970 { 1971 int error; 1972 int len; 1973 struct inode *inode; 1974 struct page *page; 1975 char *kaddr; 1976 struct shmem_inode_info *info; 1977 1978 len = strlen(symname) + 1; 1979 if (len > PAGE_CACHE_SIZE) 1980 return -ENAMETOOLONG; 1981 1982 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 1983 if (!inode) 1984 return -ENOSPC; 1985 1986 error = security_inode_init_security(inode, dir, &dentry->d_name, 1987 shmem_initxattrs, NULL); 1988 if (error) { 1989 if (error != -EOPNOTSUPP) { 1990 iput(inode); 1991 return error; 1992 } 1993 error = 0; 1994 } 1995 1996 info = SHMEM_I(inode); 1997 inode->i_size = len-1; 1998 if (len <= SHORT_SYMLINK_LEN) { 1999 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2000 if (!info->symlink) { 2001 iput(inode); 2002 return -ENOMEM; 2003 } 2004 inode->i_op = &shmem_short_symlink_operations; 2005 } else { 2006 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2007 if (error) { 2008 iput(inode); 2009 return error; 2010 } 2011 inode->i_mapping->a_ops = &shmem_aops; 2012 inode->i_op = &shmem_symlink_inode_operations; 2013 kaddr = kmap_atomic(page); 2014 memcpy(kaddr, symname, len); 2015 kunmap_atomic(kaddr); 2016 SetPageUptodate(page); 2017 set_page_dirty(page); 2018 unlock_page(page); 2019 page_cache_release(page); 2020 } 2021 dir->i_size += BOGO_DIRENT_SIZE; 2022 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2023 d_instantiate(dentry, inode); 2024 dget(dentry); 2025 return 0; 2026 } 2027 2028 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2029 { 2030 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2031 return NULL; 2032 } 2033 2034 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2035 { 2036 struct page *page = NULL; 2037 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2038 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2039 if (page) 2040 unlock_page(page); 2041 return page; 2042 } 2043 2044 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2045 { 2046 if (!IS_ERR(nd_get_link(nd))) { 2047 struct page *page = cookie; 2048 kunmap(page); 2049 mark_page_accessed(page); 2050 page_cache_release(page); 2051 } 2052 } 2053 2054 #ifdef CONFIG_TMPFS_XATTR 2055 /* 2056 * Superblocks without xattr inode operations may get some security.* xattr 2057 * support from the LSM "for free". As soon as we have any other xattrs 2058 * like ACLs, we also need to implement the security.* handlers at 2059 * filesystem level, though. 2060 */ 2061 2062 /* 2063 * Callback for security_inode_init_security() for acquiring xattrs. 2064 */ 2065 static int shmem_initxattrs(struct inode *inode, 2066 const struct xattr *xattr_array, 2067 void *fs_info) 2068 { 2069 struct shmem_inode_info *info = SHMEM_I(inode); 2070 const struct xattr *xattr; 2071 struct simple_xattr *new_xattr; 2072 size_t len; 2073 2074 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2075 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2076 if (!new_xattr) 2077 return -ENOMEM; 2078 2079 len = strlen(xattr->name) + 1; 2080 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2081 GFP_KERNEL); 2082 if (!new_xattr->name) { 2083 kfree(new_xattr); 2084 return -ENOMEM; 2085 } 2086 2087 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2088 XATTR_SECURITY_PREFIX_LEN); 2089 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2090 xattr->name, len); 2091 2092 simple_xattr_list_add(&info->xattrs, new_xattr); 2093 } 2094 2095 return 0; 2096 } 2097 2098 static const struct xattr_handler *shmem_xattr_handlers[] = { 2099 #ifdef CONFIG_TMPFS_POSIX_ACL 2100 &generic_acl_access_handler, 2101 &generic_acl_default_handler, 2102 #endif 2103 NULL 2104 }; 2105 2106 static int shmem_xattr_validate(const char *name) 2107 { 2108 struct { const char *prefix; size_t len; } arr[] = { 2109 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2110 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2111 }; 2112 int i; 2113 2114 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2115 size_t preflen = arr[i].len; 2116 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2117 if (!name[preflen]) 2118 return -EINVAL; 2119 return 0; 2120 } 2121 } 2122 return -EOPNOTSUPP; 2123 } 2124 2125 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2126 void *buffer, size_t size) 2127 { 2128 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2129 int err; 2130 2131 /* 2132 * If this is a request for a synthetic attribute in the system.* 2133 * namespace use the generic infrastructure to resolve a handler 2134 * for it via sb->s_xattr. 2135 */ 2136 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2137 return generic_getxattr(dentry, name, buffer, size); 2138 2139 err = shmem_xattr_validate(name); 2140 if (err) 2141 return err; 2142 2143 return simple_xattr_get(&info->xattrs, name, buffer, size); 2144 } 2145 2146 static int shmem_setxattr(struct dentry *dentry, const char *name, 2147 const void *value, size_t size, int flags) 2148 { 2149 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2150 int err; 2151 2152 /* 2153 * If this is a request for a synthetic attribute in the system.* 2154 * namespace use the generic infrastructure to resolve a handler 2155 * for it via sb->s_xattr. 2156 */ 2157 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2158 return generic_setxattr(dentry, name, value, size, flags); 2159 2160 err = shmem_xattr_validate(name); 2161 if (err) 2162 return err; 2163 2164 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2165 } 2166 2167 static int shmem_removexattr(struct dentry *dentry, const char *name) 2168 { 2169 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2170 int err; 2171 2172 /* 2173 * If this is a request for a synthetic attribute in the system.* 2174 * namespace use the generic infrastructure to resolve a handler 2175 * for it via sb->s_xattr. 2176 */ 2177 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2178 return generic_removexattr(dentry, name); 2179 2180 err = shmem_xattr_validate(name); 2181 if (err) 2182 return err; 2183 2184 return simple_xattr_remove(&info->xattrs, name); 2185 } 2186 2187 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2188 { 2189 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2190 return simple_xattr_list(&info->xattrs, buffer, size); 2191 } 2192 #endif /* CONFIG_TMPFS_XATTR */ 2193 2194 static const struct inode_operations shmem_short_symlink_operations = { 2195 .readlink = generic_readlink, 2196 .follow_link = shmem_follow_short_symlink, 2197 #ifdef CONFIG_TMPFS_XATTR 2198 .setxattr = shmem_setxattr, 2199 .getxattr = shmem_getxattr, 2200 .listxattr = shmem_listxattr, 2201 .removexattr = shmem_removexattr, 2202 #endif 2203 }; 2204 2205 static const struct inode_operations shmem_symlink_inode_operations = { 2206 .readlink = generic_readlink, 2207 .follow_link = shmem_follow_link, 2208 .put_link = shmem_put_link, 2209 #ifdef CONFIG_TMPFS_XATTR 2210 .setxattr = shmem_setxattr, 2211 .getxattr = shmem_getxattr, 2212 .listxattr = shmem_listxattr, 2213 .removexattr = shmem_removexattr, 2214 #endif 2215 }; 2216 2217 static struct dentry *shmem_get_parent(struct dentry *child) 2218 { 2219 return ERR_PTR(-ESTALE); 2220 } 2221 2222 static int shmem_match(struct inode *ino, void *vfh) 2223 { 2224 __u32 *fh = vfh; 2225 __u64 inum = fh[2]; 2226 inum = (inum << 32) | fh[1]; 2227 return ino->i_ino == inum && fh[0] == ino->i_generation; 2228 } 2229 2230 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2231 struct fid *fid, int fh_len, int fh_type) 2232 { 2233 struct inode *inode; 2234 struct dentry *dentry = NULL; 2235 u64 inum; 2236 2237 if (fh_len < 3) 2238 return NULL; 2239 2240 inum = fid->raw[2]; 2241 inum = (inum << 32) | fid->raw[1]; 2242 2243 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2244 shmem_match, fid->raw); 2245 if (inode) { 2246 dentry = d_find_alias(inode); 2247 iput(inode); 2248 } 2249 2250 return dentry; 2251 } 2252 2253 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2254 struct inode *parent) 2255 { 2256 if (*len < 3) { 2257 *len = 3; 2258 return 255; 2259 } 2260 2261 if (inode_unhashed(inode)) { 2262 /* Unfortunately insert_inode_hash is not idempotent, 2263 * so as we hash inodes here rather than at creation 2264 * time, we need a lock to ensure we only try 2265 * to do it once 2266 */ 2267 static DEFINE_SPINLOCK(lock); 2268 spin_lock(&lock); 2269 if (inode_unhashed(inode)) 2270 __insert_inode_hash(inode, 2271 inode->i_ino + inode->i_generation); 2272 spin_unlock(&lock); 2273 } 2274 2275 fh[0] = inode->i_generation; 2276 fh[1] = inode->i_ino; 2277 fh[2] = ((__u64)inode->i_ino) >> 32; 2278 2279 *len = 3; 2280 return 1; 2281 } 2282 2283 static const struct export_operations shmem_export_ops = { 2284 .get_parent = shmem_get_parent, 2285 .encode_fh = shmem_encode_fh, 2286 .fh_to_dentry = shmem_fh_to_dentry, 2287 }; 2288 2289 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2290 bool remount) 2291 { 2292 char *this_char, *value, *rest; 2293 uid_t uid; 2294 gid_t gid; 2295 2296 while (options != NULL) { 2297 this_char = options; 2298 for (;;) { 2299 /* 2300 * NUL-terminate this option: unfortunately, 2301 * mount options form a comma-separated list, 2302 * but mpol's nodelist may also contain commas. 2303 */ 2304 options = strchr(options, ','); 2305 if (options == NULL) 2306 break; 2307 options++; 2308 if (!isdigit(*options)) { 2309 options[-1] = '\0'; 2310 break; 2311 } 2312 } 2313 if (!*this_char) 2314 continue; 2315 if ((value = strchr(this_char,'=')) != NULL) { 2316 *value++ = 0; 2317 } else { 2318 printk(KERN_ERR 2319 "tmpfs: No value for mount option '%s'\n", 2320 this_char); 2321 return 1; 2322 } 2323 2324 if (!strcmp(this_char,"size")) { 2325 unsigned long long size; 2326 size = memparse(value,&rest); 2327 if (*rest == '%') { 2328 size <<= PAGE_SHIFT; 2329 size *= totalram_pages; 2330 do_div(size, 100); 2331 rest++; 2332 } 2333 if (*rest) 2334 goto bad_val; 2335 sbinfo->max_blocks = 2336 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2337 } else if (!strcmp(this_char,"nr_blocks")) { 2338 sbinfo->max_blocks = memparse(value, &rest); 2339 if (*rest) 2340 goto bad_val; 2341 } else if (!strcmp(this_char,"nr_inodes")) { 2342 sbinfo->max_inodes = memparse(value, &rest); 2343 if (*rest) 2344 goto bad_val; 2345 } else if (!strcmp(this_char,"mode")) { 2346 if (remount) 2347 continue; 2348 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2349 if (*rest) 2350 goto bad_val; 2351 } else if (!strcmp(this_char,"uid")) { 2352 if (remount) 2353 continue; 2354 uid = simple_strtoul(value, &rest, 0); 2355 if (*rest) 2356 goto bad_val; 2357 sbinfo->uid = make_kuid(current_user_ns(), uid); 2358 if (!uid_valid(sbinfo->uid)) 2359 goto bad_val; 2360 } else if (!strcmp(this_char,"gid")) { 2361 if (remount) 2362 continue; 2363 gid = simple_strtoul(value, &rest, 0); 2364 if (*rest) 2365 goto bad_val; 2366 sbinfo->gid = make_kgid(current_user_ns(), gid); 2367 if (!gid_valid(sbinfo->gid)) 2368 goto bad_val; 2369 } else if (!strcmp(this_char,"mpol")) { 2370 if (mpol_parse_str(value, &sbinfo->mpol, 1)) 2371 goto bad_val; 2372 } else { 2373 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2374 this_char); 2375 return 1; 2376 } 2377 } 2378 return 0; 2379 2380 bad_val: 2381 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2382 value, this_char); 2383 return 1; 2384 2385 } 2386 2387 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2388 { 2389 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2390 struct shmem_sb_info config = *sbinfo; 2391 unsigned long inodes; 2392 int error = -EINVAL; 2393 2394 if (shmem_parse_options(data, &config, true)) 2395 return error; 2396 2397 spin_lock(&sbinfo->stat_lock); 2398 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2399 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2400 goto out; 2401 if (config.max_inodes < inodes) 2402 goto out; 2403 /* 2404 * Those tests disallow limited->unlimited while any are in use; 2405 * but we must separately disallow unlimited->limited, because 2406 * in that case we have no record of how much is already in use. 2407 */ 2408 if (config.max_blocks && !sbinfo->max_blocks) 2409 goto out; 2410 if (config.max_inodes && !sbinfo->max_inodes) 2411 goto out; 2412 2413 error = 0; 2414 sbinfo->max_blocks = config.max_blocks; 2415 sbinfo->max_inodes = config.max_inodes; 2416 sbinfo->free_inodes = config.max_inodes - inodes; 2417 2418 mpol_put(sbinfo->mpol); 2419 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2420 out: 2421 spin_unlock(&sbinfo->stat_lock); 2422 return error; 2423 } 2424 2425 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2426 { 2427 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2428 2429 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2430 seq_printf(seq, ",size=%luk", 2431 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2432 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2433 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2434 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2435 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2436 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2437 seq_printf(seq, ",uid=%u", 2438 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2439 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2440 seq_printf(seq, ",gid=%u", 2441 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2442 shmem_show_mpol(seq, sbinfo->mpol); 2443 return 0; 2444 } 2445 #endif /* CONFIG_TMPFS */ 2446 2447 static void shmem_put_super(struct super_block *sb) 2448 { 2449 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2450 2451 percpu_counter_destroy(&sbinfo->used_blocks); 2452 kfree(sbinfo); 2453 sb->s_fs_info = NULL; 2454 } 2455 2456 int shmem_fill_super(struct super_block *sb, void *data, int silent) 2457 { 2458 struct inode *inode; 2459 struct shmem_sb_info *sbinfo; 2460 int err = -ENOMEM; 2461 2462 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2463 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2464 L1_CACHE_BYTES), GFP_KERNEL); 2465 if (!sbinfo) 2466 return -ENOMEM; 2467 2468 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2469 sbinfo->uid = current_fsuid(); 2470 sbinfo->gid = current_fsgid(); 2471 sb->s_fs_info = sbinfo; 2472 2473 #ifdef CONFIG_TMPFS 2474 /* 2475 * Per default we only allow half of the physical ram per 2476 * tmpfs instance, limiting inodes to one per page of lowmem; 2477 * but the internal instance is left unlimited. 2478 */ 2479 if (!(sb->s_flags & MS_NOUSER)) { 2480 sbinfo->max_blocks = shmem_default_max_blocks(); 2481 sbinfo->max_inodes = shmem_default_max_inodes(); 2482 if (shmem_parse_options(data, sbinfo, false)) { 2483 err = -EINVAL; 2484 goto failed; 2485 } 2486 } 2487 sb->s_export_op = &shmem_export_ops; 2488 sb->s_flags |= MS_NOSEC; 2489 #else 2490 sb->s_flags |= MS_NOUSER; 2491 #endif 2492 2493 spin_lock_init(&sbinfo->stat_lock); 2494 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2495 goto failed; 2496 sbinfo->free_inodes = sbinfo->max_inodes; 2497 2498 sb->s_maxbytes = MAX_LFS_FILESIZE; 2499 sb->s_blocksize = PAGE_CACHE_SIZE; 2500 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2501 sb->s_magic = TMPFS_MAGIC; 2502 sb->s_op = &shmem_ops; 2503 sb->s_time_gran = 1; 2504 #ifdef CONFIG_TMPFS_XATTR 2505 sb->s_xattr = shmem_xattr_handlers; 2506 #endif 2507 #ifdef CONFIG_TMPFS_POSIX_ACL 2508 sb->s_flags |= MS_POSIXACL; 2509 #endif 2510 2511 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2512 if (!inode) 2513 goto failed; 2514 inode->i_uid = sbinfo->uid; 2515 inode->i_gid = sbinfo->gid; 2516 sb->s_root = d_make_root(inode); 2517 if (!sb->s_root) 2518 goto failed; 2519 return 0; 2520 2521 failed: 2522 shmem_put_super(sb); 2523 return err; 2524 } 2525 2526 static struct kmem_cache *shmem_inode_cachep; 2527 2528 static struct inode *shmem_alloc_inode(struct super_block *sb) 2529 { 2530 struct shmem_inode_info *info; 2531 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2532 if (!info) 2533 return NULL; 2534 return &info->vfs_inode; 2535 } 2536 2537 static void shmem_destroy_callback(struct rcu_head *head) 2538 { 2539 struct inode *inode = container_of(head, struct inode, i_rcu); 2540 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2541 } 2542 2543 static void shmem_destroy_inode(struct inode *inode) 2544 { 2545 if (S_ISREG(inode->i_mode)) 2546 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2547 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2548 } 2549 2550 static void shmem_init_inode(void *foo) 2551 { 2552 struct shmem_inode_info *info = foo; 2553 inode_init_once(&info->vfs_inode); 2554 } 2555 2556 static int shmem_init_inodecache(void) 2557 { 2558 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2559 sizeof(struct shmem_inode_info), 2560 0, SLAB_PANIC, shmem_init_inode); 2561 return 0; 2562 } 2563 2564 static void shmem_destroy_inodecache(void) 2565 { 2566 kmem_cache_destroy(shmem_inode_cachep); 2567 } 2568 2569 static const struct address_space_operations shmem_aops = { 2570 .writepage = shmem_writepage, 2571 .set_page_dirty = __set_page_dirty_no_writeback, 2572 #ifdef CONFIG_TMPFS 2573 .write_begin = shmem_write_begin, 2574 .write_end = shmem_write_end, 2575 #endif 2576 .migratepage = migrate_page, 2577 .error_remove_page = generic_error_remove_page, 2578 }; 2579 2580 static const struct file_operations shmem_file_operations = { 2581 .mmap = shmem_mmap, 2582 #ifdef CONFIG_TMPFS 2583 .llseek = generic_file_llseek, 2584 .read = do_sync_read, 2585 .write = do_sync_write, 2586 .aio_read = shmem_file_aio_read, 2587 .aio_write = generic_file_aio_write, 2588 .fsync = noop_fsync, 2589 .splice_read = shmem_file_splice_read, 2590 .splice_write = generic_file_splice_write, 2591 .fallocate = shmem_fallocate, 2592 #endif 2593 }; 2594 2595 static const struct inode_operations shmem_inode_operations = { 2596 .setattr = shmem_setattr, 2597 #ifdef CONFIG_TMPFS_XATTR 2598 .setxattr = shmem_setxattr, 2599 .getxattr = shmem_getxattr, 2600 .listxattr = shmem_listxattr, 2601 .removexattr = shmem_removexattr, 2602 #endif 2603 }; 2604 2605 static const struct inode_operations shmem_dir_inode_operations = { 2606 #ifdef CONFIG_TMPFS 2607 .create = shmem_create, 2608 .lookup = simple_lookup, 2609 .link = shmem_link, 2610 .unlink = shmem_unlink, 2611 .symlink = shmem_symlink, 2612 .mkdir = shmem_mkdir, 2613 .rmdir = shmem_rmdir, 2614 .mknod = shmem_mknod, 2615 .rename = shmem_rename, 2616 #endif 2617 #ifdef CONFIG_TMPFS_XATTR 2618 .setxattr = shmem_setxattr, 2619 .getxattr = shmem_getxattr, 2620 .listxattr = shmem_listxattr, 2621 .removexattr = shmem_removexattr, 2622 #endif 2623 #ifdef CONFIG_TMPFS_POSIX_ACL 2624 .setattr = shmem_setattr, 2625 #endif 2626 }; 2627 2628 static const struct inode_operations shmem_special_inode_operations = { 2629 #ifdef CONFIG_TMPFS_XATTR 2630 .setxattr = shmem_setxattr, 2631 .getxattr = shmem_getxattr, 2632 .listxattr = shmem_listxattr, 2633 .removexattr = shmem_removexattr, 2634 #endif 2635 #ifdef CONFIG_TMPFS_POSIX_ACL 2636 .setattr = shmem_setattr, 2637 #endif 2638 }; 2639 2640 static const struct super_operations shmem_ops = { 2641 .alloc_inode = shmem_alloc_inode, 2642 .destroy_inode = shmem_destroy_inode, 2643 #ifdef CONFIG_TMPFS 2644 .statfs = shmem_statfs, 2645 .remount_fs = shmem_remount_fs, 2646 .show_options = shmem_show_options, 2647 #endif 2648 .evict_inode = shmem_evict_inode, 2649 .drop_inode = generic_delete_inode, 2650 .put_super = shmem_put_super, 2651 }; 2652 2653 static const struct vm_operations_struct shmem_vm_ops = { 2654 .fault = shmem_fault, 2655 #ifdef CONFIG_NUMA 2656 .set_policy = shmem_set_policy, 2657 .get_policy = shmem_get_policy, 2658 #endif 2659 .remap_pages = generic_file_remap_pages, 2660 }; 2661 2662 static struct dentry *shmem_mount(struct file_system_type *fs_type, 2663 int flags, const char *dev_name, void *data) 2664 { 2665 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2666 } 2667 2668 static struct file_system_type shmem_fs_type = { 2669 .owner = THIS_MODULE, 2670 .name = "tmpfs", 2671 .mount = shmem_mount, 2672 .kill_sb = kill_litter_super, 2673 }; 2674 2675 int __init shmem_init(void) 2676 { 2677 int error; 2678 2679 error = bdi_init(&shmem_backing_dev_info); 2680 if (error) 2681 goto out4; 2682 2683 error = shmem_init_inodecache(); 2684 if (error) 2685 goto out3; 2686 2687 error = register_filesystem(&shmem_fs_type); 2688 if (error) { 2689 printk(KERN_ERR "Could not register tmpfs\n"); 2690 goto out2; 2691 } 2692 2693 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, 2694 shmem_fs_type.name, NULL); 2695 if (IS_ERR(shm_mnt)) { 2696 error = PTR_ERR(shm_mnt); 2697 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2698 goto out1; 2699 } 2700 return 0; 2701 2702 out1: 2703 unregister_filesystem(&shmem_fs_type); 2704 out2: 2705 shmem_destroy_inodecache(); 2706 out3: 2707 bdi_destroy(&shmem_backing_dev_info); 2708 out4: 2709 shm_mnt = ERR_PTR(error); 2710 return error; 2711 } 2712 2713 #else /* !CONFIG_SHMEM */ 2714 2715 /* 2716 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2717 * 2718 * This is intended for small system where the benefits of the full 2719 * shmem code (swap-backed and resource-limited) are outweighed by 2720 * their complexity. On systems without swap this code should be 2721 * effectively equivalent, but much lighter weight. 2722 */ 2723 2724 #include <linux/ramfs.h> 2725 2726 static struct file_system_type shmem_fs_type = { 2727 .name = "tmpfs", 2728 .mount = ramfs_mount, 2729 .kill_sb = kill_litter_super, 2730 }; 2731 2732 int __init shmem_init(void) 2733 { 2734 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2735 2736 shm_mnt = kern_mount(&shmem_fs_type); 2737 BUG_ON(IS_ERR(shm_mnt)); 2738 2739 return 0; 2740 } 2741 2742 int shmem_unuse(swp_entry_t swap, struct page *page) 2743 { 2744 return 0; 2745 } 2746 2747 int shmem_lock(struct file *file, int lock, struct user_struct *user) 2748 { 2749 return 0; 2750 } 2751 2752 void shmem_unlock_mapping(struct address_space *mapping) 2753 { 2754 } 2755 2756 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2757 { 2758 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2759 } 2760 EXPORT_SYMBOL_GPL(shmem_truncate_range); 2761 2762 #define shmem_vm_ops generic_file_vm_ops 2763 #define shmem_file_operations ramfs_file_operations 2764 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2765 #define shmem_acct_size(flags, size) 0 2766 #define shmem_unacct_size(flags, size) do {} while (0) 2767 2768 #endif /* CONFIG_SHMEM */ 2769 2770 /* common code */ 2771 2772 /** 2773 * shmem_file_setup - get an unlinked file living in tmpfs 2774 * @name: name for dentry (to be seen in /proc/<pid>/maps 2775 * @size: size to be set for the file 2776 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2777 */ 2778 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2779 { 2780 int error; 2781 struct file *file; 2782 struct inode *inode; 2783 struct path path; 2784 struct dentry *root; 2785 struct qstr this; 2786 2787 if (IS_ERR(shm_mnt)) 2788 return (void *)shm_mnt; 2789 2790 if (size < 0 || size > MAX_LFS_FILESIZE) 2791 return ERR_PTR(-EINVAL); 2792 2793 if (shmem_acct_size(flags, size)) 2794 return ERR_PTR(-ENOMEM); 2795 2796 error = -ENOMEM; 2797 this.name = name; 2798 this.len = strlen(name); 2799 this.hash = 0; /* will go */ 2800 root = shm_mnt->mnt_root; 2801 path.dentry = d_alloc(root, &this); 2802 if (!path.dentry) 2803 goto put_memory; 2804 path.mnt = mntget(shm_mnt); 2805 2806 error = -ENOSPC; 2807 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2808 if (!inode) 2809 goto put_dentry; 2810 2811 d_instantiate(path.dentry, inode); 2812 inode->i_size = size; 2813 clear_nlink(inode); /* It is unlinked */ 2814 #ifndef CONFIG_MMU 2815 error = ramfs_nommu_expand_for_mapping(inode, size); 2816 if (error) 2817 goto put_dentry; 2818 #endif 2819 2820 error = -ENFILE; 2821 file = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2822 &shmem_file_operations); 2823 if (!file) 2824 goto put_dentry; 2825 2826 return file; 2827 2828 put_dentry: 2829 path_put(&path); 2830 put_memory: 2831 shmem_unacct_size(flags, size); 2832 return ERR_PTR(error); 2833 } 2834 EXPORT_SYMBOL_GPL(shmem_file_setup); 2835 2836 /** 2837 * shmem_zero_setup - setup a shared anonymous mapping 2838 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2839 */ 2840 int shmem_zero_setup(struct vm_area_struct *vma) 2841 { 2842 struct file *file; 2843 loff_t size = vma->vm_end - vma->vm_start; 2844 2845 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2846 if (IS_ERR(file)) 2847 return PTR_ERR(file); 2848 2849 if (vma->vm_file) 2850 fput(vma->vm_file); 2851 vma->vm_file = file; 2852 vma->vm_ops = &shmem_vm_ops; 2853 return 0; 2854 } 2855 2856 /** 2857 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2858 * @mapping: the page's address_space 2859 * @index: the page index 2860 * @gfp: the page allocator flags to use if allocating 2861 * 2862 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2863 * with any new page allocations done using the specified allocation flags. 2864 * But read_cache_page_gfp() uses the ->readpage() method: which does not 2865 * suit tmpfs, since it may have pages in swapcache, and needs to find those 2866 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 2867 * 2868 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 2869 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 2870 */ 2871 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 2872 pgoff_t index, gfp_t gfp) 2873 { 2874 #ifdef CONFIG_SHMEM 2875 struct inode *inode = mapping->host; 2876 struct page *page; 2877 int error; 2878 2879 BUG_ON(mapping->a_ops != &shmem_aops); 2880 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 2881 if (error) 2882 page = ERR_PTR(error); 2883 else 2884 unlock_page(page); 2885 return page; 2886 #else 2887 /* 2888 * The tiny !SHMEM case uses ramfs without swap 2889 */ 2890 return read_cache_page_gfp(mapping, index, gfp); 2891 #endif 2892 } 2893 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 2894