11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31853ac43aSMatt Mackall #include <linux/mm.h> 3246c9a946SArnd Bergmann #include <linux/random.h> 33174cd4b1SIngo Molnar #include <linux/sched/signal.h> 34b95f1b31SPaul Gortmaker #include <linux/export.h> 35853ac43aSMatt Mackall #include <linux/swap.h> 36e2e40f2cSChristoph Hellwig #include <linux/uio.h> 37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h> 38749df87bSMike Kravetz #include <linux/hugetlb.h> 39853ac43aSMatt Mackall 4095cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ 4195cc09d6SAndrea Arcangeli 42853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 43853ac43aSMatt Mackall 44853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 451da177e4SLinus Torvalds /* 461da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 471da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 481da177e4SLinus Torvalds * which makes it a completely usable filesystem. 491da177e4SLinus Torvalds */ 501da177e4SLinus Torvalds 5139f0247dSAndreas Gruenbacher #include <linux/xattr.h> 52a5694255SChristoph Hellwig #include <linux/exportfs.h> 531c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 54feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 551da177e4SLinus Torvalds #include <linux/mman.h> 561da177e4SLinus Torvalds #include <linux/string.h> 571da177e4SLinus Torvalds #include <linux/slab.h> 581da177e4SLinus Torvalds #include <linux/backing-dev.h> 591da177e4SLinus Torvalds #include <linux/shmem_fs.h> 601da177e4SLinus Torvalds #include <linux/writeback.h> 611da177e4SLinus Torvalds #include <linux/blkdev.h> 62bda97eabSHugh Dickins #include <linux/pagevec.h> 6341ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 6483e4fa9cSHugh Dickins #include <linux/falloc.h> 65708e3508SHugh Dickins #include <linux/splice.h> 661da177e4SLinus Torvalds #include <linux/security.h> 671da177e4SLinus Torvalds #include <linux/swapops.h> 681da177e4SLinus Torvalds #include <linux/mempolicy.h> 691da177e4SLinus Torvalds #include <linux/namei.h> 70b00dc3adSHugh Dickins #include <linux/ctype.h> 71304dbdb7SLee Schermerhorn #include <linux/migrate.h> 72c1f60a5aSChristoph Lameter #include <linux/highmem.h> 73680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 7492562927SMimi Zohar #include <linux/magic.h> 759183df25SDavid Herrmann #include <linux/syscalls.h> 7640e041a2SDavid Herrmann #include <linux/fcntl.h> 779183df25SDavid Herrmann #include <uapi/linux/memfd.h> 78cfda0526SMike Rapoport #include <linux/userfaultfd_k.h> 794c27fe4cSMike Rapoport #include <linux/rmap.h> 802b4db796SAmir Goldstein #include <linux/uuid.h> 81304dbdb7SLee Schermerhorn 827c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 831da177e4SLinus Torvalds #include <asm/pgtable.h> 841da177e4SLinus Torvalds 85dd56b046SMel Gorman #include "internal.h" 86dd56b046SMel Gorman 8709cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8809cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 911da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 921da177e4SLinus Torvalds 9369f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 9469f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 9569f07ec9SHugh Dickins 961aac1400SHugh Dickins /* 97f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 98f00cdc6dSHugh Dickins * inode->i_private (with i_mutex making sure that it has only one user at 99f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 1001aac1400SHugh Dickins */ 1011aac1400SHugh Dickins struct shmem_falloc { 1028e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 1031aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 1041aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 1051aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 1061aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1071aac1400SHugh Dickins }; 1081aac1400SHugh Dickins 109b76db735SAndrew Morton #ifdef CONFIG_TMPFS 110680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 111680d794bSakpm@linux-foundation.org { 112680d794bSakpm@linux-foundation.org return totalram_pages / 2; 113680d794bSakpm@linux-foundation.org } 114680d794bSakpm@linux-foundation.org 115680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 116680d794bSakpm@linux-foundation.org { 117680d794bSakpm@linux-foundation.org return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 118680d794bSakpm@linux-foundation.org } 119b76db735SAndrew Morton #endif 120680d794bSakpm@linux-foundation.org 121bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 122bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 123bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index); 12468da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1259e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, 126cfda0526SMike Rapoport gfp_t gfp, struct vm_area_struct *vma, 1272b740303SSouptick Joarder struct vm_fault *vmf, vm_fault_t *fault_type); 12868da9f05SHugh Dickins 129f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index, 1309e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp) 13168da9f05SHugh Dickins { 13268da9f05SHugh Dickins return shmem_getpage_gfp(inode, index, pagep, sgp, 133cfda0526SMike Rapoport mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 13468da9f05SHugh Dickins } 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1371da177e4SLinus Torvalds { 1381da177e4SLinus Torvalds return sb->s_fs_info; 1391da177e4SLinus Torvalds } 1401da177e4SLinus Torvalds 1411da177e4SLinus Torvalds /* 1421da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1431da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1441da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1451da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1461da177e4SLinus Torvalds */ 1471da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1481da177e4SLinus Torvalds { 1490b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 150191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1511da177e4SLinus Torvalds } 1521da177e4SLinus Torvalds 1531da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1541da177e4SLinus Torvalds { 1550b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1561da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1571da177e4SLinus Torvalds } 1581da177e4SLinus Torvalds 15977142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 16077142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 16177142517SKonstantin Khlebnikov { 16277142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 16377142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 16477142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 16577142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 16677142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 16777142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 16877142517SKonstantin Khlebnikov } 16977142517SKonstantin Khlebnikov return 0; 17077142517SKonstantin Khlebnikov } 17177142517SKonstantin Khlebnikov 1721da177e4SLinus Torvalds /* 1731da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 17475edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 1751da177e4SLinus Torvalds * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1761da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1771da177e4SLinus Torvalds */ 178800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 1791da177e4SLinus Torvalds { 180800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 181800d8c63SKirill A. Shutemov return 0; 182800d8c63SKirill A. Shutemov 183800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 184800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 1851da177e4SLinus Torvalds } 1861da177e4SLinus Torvalds 1871da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 1881da177e4SLinus Torvalds { 1890b0a0806SHugh Dickins if (flags & VM_NORESERVE) 19009cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 1911da177e4SLinus Torvalds } 1921da177e4SLinus Torvalds 1930f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 1940f079694SMike Rapoport { 1950f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 1960f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1970f079694SMike Rapoport 1980f079694SMike Rapoport if (shmem_acct_block(info->flags, pages)) 1990f079694SMike Rapoport return false; 2000f079694SMike Rapoport 2010f079694SMike Rapoport if (sbinfo->max_blocks) { 2020f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2030f079694SMike Rapoport sbinfo->max_blocks - pages) > 0) 2040f079694SMike Rapoport goto unacct; 2050f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages); 2060f079694SMike Rapoport } 2070f079694SMike Rapoport 2080f079694SMike Rapoport return true; 2090f079694SMike Rapoport 2100f079694SMike Rapoport unacct: 2110f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2120f079694SMike Rapoport return false; 2130f079694SMike Rapoport } 2140f079694SMike Rapoport 2150f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 2160f079694SMike Rapoport { 2170f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2180f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2190f079694SMike Rapoport 2200f079694SMike Rapoport if (sbinfo->max_blocks) 2210f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages); 2220f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2230f079694SMike Rapoport } 2240f079694SMike Rapoport 225759b9775SHugh Dickins static const struct super_operations shmem_ops; 226f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops; 22715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 22892e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 22992e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 23092e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 231f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 232779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 2331da177e4SLinus Torvalds 234b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma) 235b0506e48SMike Rapoport { 236b0506e48SMike Rapoport return vma->vm_ops == &shmem_vm_ops; 237b0506e48SMike Rapoport } 238b0506e48SMike Rapoport 2391da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 240cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 2411da177e4SLinus Torvalds 2425b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb) 2435b04c689SPavel Emelyanov { 2445b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2455b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2465b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2475b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 2485b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2495b04c689SPavel Emelyanov return -ENOSPC; 2505b04c689SPavel Emelyanov } 2515b04c689SPavel Emelyanov sbinfo->free_inodes--; 2525b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2535b04c689SPavel Emelyanov } 2545b04c689SPavel Emelyanov return 0; 2555b04c689SPavel Emelyanov } 2565b04c689SPavel Emelyanov 2575b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 2585b04c689SPavel Emelyanov { 2595b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2605b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2615b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2625b04c689SPavel Emelyanov sbinfo->free_inodes++; 2635b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2645b04c689SPavel Emelyanov } 2655b04c689SPavel Emelyanov } 2665b04c689SPavel Emelyanov 26746711810SRandy Dunlap /** 26841ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 2691da177e4SLinus Torvalds * @inode: inode to recalc 2701da177e4SLinus Torvalds * 2711da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 2721da177e4SLinus Torvalds * undirtied hole pages behind our back. 2731da177e4SLinus Torvalds * 2741da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 2751da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 2761da177e4SLinus Torvalds * 2771da177e4SLinus Torvalds * It has to be called with the spinlock held. 2781da177e4SLinus Torvalds */ 2791da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 2801da177e4SLinus Torvalds { 2811da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 2821da177e4SLinus Torvalds long freed; 2831da177e4SLinus Torvalds 2841da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 2851da177e4SLinus Torvalds if (freed > 0) { 2861da177e4SLinus Torvalds info->alloced -= freed; 28754af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 2880f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed); 2891da177e4SLinus Torvalds } 2901da177e4SLinus Torvalds } 2911da177e4SLinus Torvalds 292800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 293800d8c63SKirill A. Shutemov { 294800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 2954595ef88SKirill A. Shutemov unsigned long flags; 296800d8c63SKirill A. Shutemov 2970f079694SMike Rapoport if (!shmem_inode_acct_block(inode, pages)) 298800d8c63SKirill A. Shutemov return false; 299b1cc94abSMike Rapoport 3004595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 301800d8c63SKirill A. Shutemov info->alloced += pages; 302800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 303800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3044595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 305800d8c63SKirill A. Shutemov inode->i_mapping->nrpages += pages; 306800d8c63SKirill A. Shutemov 307800d8c63SKirill A. Shutemov return true; 308800d8c63SKirill A. Shutemov } 309800d8c63SKirill A. Shutemov 310800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 311800d8c63SKirill A. Shutemov { 312800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3134595ef88SKirill A. Shutemov unsigned long flags; 314800d8c63SKirill A. Shutemov 3154595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 316800d8c63SKirill A. Shutemov info->alloced -= pages; 317800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 318800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3194595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 320800d8c63SKirill A. Shutemov 3210f079694SMike Rapoport shmem_inode_unacct_blocks(inode, pages); 322800d8c63SKirill A. Shutemov } 323800d8c63SKirill A. Shutemov 3247a5d0fbbSHugh Dickins /* 32562f945b6SMatthew Wilcox * Replace item expected in xarray by a new item, while holding xa_lock. 3267a5d0fbbSHugh Dickins */ 32762f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping, 3287a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 3297a5d0fbbSHugh Dickins { 33062f945b6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 3316dbaf22cSJohannes Weiner void *item; 3327a5d0fbbSHugh Dickins 3337a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 3346dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 33562f945b6SMatthew Wilcox item = xas_load(&xas); 3367a5d0fbbSHugh Dickins if (item != expected) 3377a5d0fbbSHugh Dickins return -ENOENT; 33862f945b6SMatthew Wilcox xas_store(&xas, replacement); 3397a5d0fbbSHugh Dickins return 0; 3407a5d0fbbSHugh Dickins } 3417a5d0fbbSHugh Dickins 3427a5d0fbbSHugh Dickins /* 343d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 344d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 345d1899228SHugh Dickins * 346d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 347d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 348d1899228SHugh Dickins */ 349d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 350d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 351d1899228SHugh Dickins { 352*a12831bfSMatthew Wilcox return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 353d1899228SHugh Dickins } 354d1899228SHugh Dickins 355d1899228SHugh Dickins /* 3565a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 3575a6e75f8SKirill A. Shutemov * 3585a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 3595a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 3605a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 3615a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 3625a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 3635a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 3645a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 3655a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 3665a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 3675a6e75f8SKirill A. Shutemov */ 3685a6e75f8SKirill A. Shutemov 3695a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 3705a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 3715a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 3725a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 3735a6e75f8SKirill A. Shutemov 3745a6e75f8SKirill A. Shutemov /* 3755a6e75f8SKirill A. Shutemov * Special values. 3765a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 3775a6e75f8SKirill A. Shutemov * 3785a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 3795a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 3805a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 3815a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 3825a6e75f8SKirill A. Shutemov * 3835a6e75f8SKirill A. Shutemov */ 3845a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 3855a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 3865a6e75f8SKirill A. Shutemov 387e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3885a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 3895a6e75f8SKirill A. Shutemov 3905b9c98f3SMike Kravetz static int shmem_huge __read_mostly; 3915a6e75f8SKirill A. Shutemov 392f1f5929cSJérémy Lefaure #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 3935a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 3945a6e75f8SKirill A. Shutemov { 3955a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 3965a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 3975a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 3985a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 3995a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 4005a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 4015a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 4025a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 4035a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 4045a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 4055a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 4065a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 4075a6e75f8SKirill A. Shutemov return -EINVAL; 4085a6e75f8SKirill A. Shutemov } 4095a6e75f8SKirill A. Shutemov 4105a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 4115a6e75f8SKirill A. Shutemov { 4125a6e75f8SKirill A. Shutemov switch (huge) { 4135a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 4145a6e75f8SKirill A. Shutemov return "never"; 4155a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 4165a6e75f8SKirill A. Shutemov return "always"; 4175a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 4185a6e75f8SKirill A. Shutemov return "within_size"; 4195a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4205a6e75f8SKirill A. Shutemov return "advise"; 4215a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 4225a6e75f8SKirill A. Shutemov return "deny"; 4235a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 4245a6e75f8SKirill A. Shutemov return "force"; 4255a6e75f8SKirill A. Shutemov default: 4265a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 4275a6e75f8SKirill A. Shutemov return "bad_val"; 4285a6e75f8SKirill A. Shutemov } 4295a6e75f8SKirill A. Shutemov } 430f1f5929cSJérémy Lefaure #endif 4315a6e75f8SKirill A. Shutemov 432779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 433779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 434779750d2SKirill A. Shutemov { 435779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 436253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 437779750d2SKirill A. Shutemov struct inode *inode; 438779750d2SKirill A. Shutemov struct shmem_inode_info *info; 439779750d2SKirill A. Shutemov struct page *page; 440779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 441779750d2SKirill A. Shutemov int removed = 0, split = 0; 442779750d2SKirill A. Shutemov 443779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 444779750d2SKirill A. Shutemov return SHRINK_STOP; 445779750d2SKirill A. Shutemov 446779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 447779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 448779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 449779750d2SKirill A. Shutemov 450779750d2SKirill A. Shutemov /* pin the inode */ 451779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 452779750d2SKirill A. Shutemov 453779750d2SKirill A. Shutemov /* inode is about to be evicted */ 454779750d2SKirill A. Shutemov if (!inode) { 455779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 456779750d2SKirill A. Shutemov removed++; 457779750d2SKirill A. Shutemov goto next; 458779750d2SKirill A. Shutemov } 459779750d2SKirill A. Shutemov 460779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 461779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 462779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 463253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 464779750d2SKirill A. Shutemov removed++; 465779750d2SKirill A. Shutemov goto next; 466779750d2SKirill A. Shutemov } 467779750d2SKirill A. Shutemov 468779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 469779750d2SKirill A. Shutemov next: 470779750d2SKirill A. Shutemov if (!--batch) 471779750d2SKirill A. Shutemov break; 472779750d2SKirill A. Shutemov } 473779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 474779750d2SKirill A. Shutemov 475253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 476253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 477253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 478253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 479253fd0f0SKirill A. Shutemov iput(inode); 480253fd0f0SKirill A. Shutemov } 481253fd0f0SKirill A. Shutemov 482779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 483779750d2SKirill A. Shutemov int ret; 484779750d2SKirill A. Shutemov 485779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 486779750d2SKirill A. Shutemov inode = &info->vfs_inode; 487779750d2SKirill A. Shutemov 488b3cd54b2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) 489b3cd54b2SKirill A. Shutemov goto leave; 490779750d2SKirill A. Shutemov 491b3cd54b2SKirill A. Shutemov page = find_get_page(inode->i_mapping, 492779750d2SKirill A. Shutemov (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 493779750d2SKirill A. Shutemov if (!page) 494779750d2SKirill A. Shutemov goto drop; 495779750d2SKirill A. Shutemov 496b3cd54b2SKirill A. Shutemov /* No huge page at the end of the file: nothing to split */ 497779750d2SKirill A. Shutemov if (!PageTransHuge(page)) { 498779750d2SKirill A. Shutemov put_page(page); 499779750d2SKirill A. Shutemov goto drop; 500779750d2SKirill A. Shutemov } 501779750d2SKirill A. Shutemov 502b3cd54b2SKirill A. Shutemov /* 503b3cd54b2SKirill A. Shutemov * Leave the inode on the list if we failed to lock 504b3cd54b2SKirill A. Shutemov * the page at this time. 505b3cd54b2SKirill A. Shutemov * 506b3cd54b2SKirill A. Shutemov * Waiting for the lock may lead to deadlock in the 507b3cd54b2SKirill A. Shutemov * reclaim path. 508b3cd54b2SKirill A. Shutemov */ 509b3cd54b2SKirill A. Shutemov if (!trylock_page(page)) { 510b3cd54b2SKirill A. Shutemov put_page(page); 511b3cd54b2SKirill A. Shutemov goto leave; 512b3cd54b2SKirill A. Shutemov } 513b3cd54b2SKirill A. Shutemov 514779750d2SKirill A. Shutemov ret = split_huge_page(page); 515779750d2SKirill A. Shutemov unlock_page(page); 516779750d2SKirill A. Shutemov put_page(page); 517779750d2SKirill A. Shutemov 518b3cd54b2SKirill A. Shutemov /* If split failed leave the inode on the list */ 519b3cd54b2SKirill A. Shutemov if (ret) 520b3cd54b2SKirill A. Shutemov goto leave; 521779750d2SKirill A. Shutemov 522779750d2SKirill A. Shutemov split++; 523779750d2SKirill A. Shutemov drop: 524779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 525779750d2SKirill A. Shutemov removed++; 526b3cd54b2SKirill A. Shutemov leave: 527779750d2SKirill A. Shutemov iput(inode); 528779750d2SKirill A. Shutemov } 529779750d2SKirill A. Shutemov 530779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 531779750d2SKirill A. Shutemov list_splice_tail(&list, &sbinfo->shrinklist); 532779750d2SKirill A. Shutemov sbinfo->shrinklist_len -= removed; 533779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 534779750d2SKirill A. Shutemov 535779750d2SKirill A. Shutemov return split; 536779750d2SKirill A. Shutemov } 537779750d2SKirill A. Shutemov 538779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 539779750d2SKirill A. Shutemov struct shrink_control *sc) 540779750d2SKirill A. Shutemov { 541779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 542779750d2SKirill A. Shutemov 543779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 544779750d2SKirill A. Shutemov return SHRINK_STOP; 545779750d2SKirill A. Shutemov 546779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 547779750d2SKirill A. Shutemov } 548779750d2SKirill A. Shutemov 549779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 550779750d2SKirill A. Shutemov struct shrink_control *sc) 551779750d2SKirill A. Shutemov { 552779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 553779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 554779750d2SKirill A. Shutemov } 555e496cf3dSKirill A. Shutemov #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5565a6e75f8SKirill A. Shutemov 5575a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 5585a6e75f8SKirill A. Shutemov 559779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 560779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 561779750d2SKirill A. Shutemov { 562779750d2SKirill A. Shutemov return 0; 563779750d2SKirill A. Shutemov } 564e496cf3dSKirill A. Shutemov #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5655a6e75f8SKirill A. Shutemov 56689fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) 56789fdcd26SYang Shi { 56889fdcd26SYang Shi if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 56989fdcd26SYang Shi (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && 57089fdcd26SYang Shi shmem_huge != SHMEM_HUGE_DENY) 57189fdcd26SYang Shi return true; 57289fdcd26SYang Shi return false; 57389fdcd26SYang Shi } 57489fdcd26SYang Shi 5755a6e75f8SKirill A. Shutemov /* 57646f65ec1SHugh Dickins * Like add_to_page_cache_locked, but error if expected item has gone. 57746f65ec1SHugh Dickins */ 57846f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page, 57946f65ec1SHugh Dickins struct address_space *mapping, 580fed400a1SWang Sheng-Hui pgoff_t index, void *expected) 58146f65ec1SHugh Dickins { 582800d8c63SKirill A. Shutemov int error, nr = hpage_nr_pages(page); 58346f65ec1SHugh Dickins 584800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 585800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(index != round_down(index, nr), page); 586309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 587309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 588800d8c63SKirill A. Shutemov VM_BUG_ON(expected && PageTransHuge(page)); 58946f65ec1SHugh Dickins 590800d8c63SKirill A. Shutemov page_ref_add(page, nr); 59146f65ec1SHugh Dickins page->mapping = mapping; 59246f65ec1SHugh Dickins page->index = index; 59346f65ec1SHugh Dickins 594b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 595800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 596800d8c63SKirill A. Shutemov void __rcu **results; 597800d8c63SKirill A. Shutemov pgoff_t idx; 598800d8c63SKirill A. Shutemov int i; 599800d8c63SKirill A. Shutemov 600800d8c63SKirill A. Shutemov error = 0; 601b93b0163SMatthew Wilcox if (radix_tree_gang_lookup_slot(&mapping->i_pages, 602800d8c63SKirill A. Shutemov &results, &idx, index, 1) && 603800d8c63SKirill A. Shutemov idx < index + HPAGE_PMD_NR) { 604800d8c63SKirill A. Shutemov error = -EEXIST; 605800d8c63SKirill A. Shutemov } 606800d8c63SKirill A. Shutemov 607800d8c63SKirill A. Shutemov if (!error) { 608800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 609b93b0163SMatthew Wilcox error = radix_tree_insert(&mapping->i_pages, 610800d8c63SKirill A. Shutemov index + i, page + i); 611800d8c63SKirill A. Shutemov VM_BUG_ON(error); 612800d8c63SKirill A. Shutemov } 613800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 614800d8c63SKirill A. Shutemov } 615800d8c63SKirill A. Shutemov } else if (!expected) { 616b93b0163SMatthew Wilcox error = radix_tree_insert(&mapping->i_pages, index, page); 617800d8c63SKirill A. Shutemov } else { 61862f945b6SMatthew Wilcox error = shmem_replace_entry(mapping, index, expected, page); 619800d8c63SKirill A. Shutemov } 620800d8c63SKirill A. Shutemov 62146f65ec1SHugh Dickins if (!error) { 622800d8c63SKirill A. Shutemov mapping->nrpages += nr; 623800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 62411fb9989SMel Gorman __inc_node_page_state(page, NR_SHMEM_THPS); 62511fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 62611fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 627b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 62846f65ec1SHugh Dickins } else { 62946f65ec1SHugh Dickins page->mapping = NULL; 630b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 631800d8c63SKirill A. Shutemov page_ref_sub(page, nr); 63246f65ec1SHugh Dickins } 63346f65ec1SHugh Dickins return error; 63446f65ec1SHugh Dickins } 63546f65ec1SHugh Dickins 63646f65ec1SHugh Dickins /* 6376922c0c7SHugh Dickins * Like delete_from_page_cache, but substitutes swap for page. 6386922c0c7SHugh Dickins */ 6396922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap) 6406922c0c7SHugh Dickins { 6416922c0c7SHugh Dickins struct address_space *mapping = page->mapping; 6426922c0c7SHugh Dickins int error; 6436922c0c7SHugh Dickins 644800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 645800d8c63SKirill A. Shutemov 646b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 64762f945b6SMatthew Wilcox error = shmem_replace_entry(mapping, page->index, page, radswap); 6486922c0c7SHugh Dickins page->mapping = NULL; 6496922c0c7SHugh Dickins mapping->nrpages--; 65011fb9989SMel Gorman __dec_node_page_state(page, NR_FILE_PAGES); 65111fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM); 652b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 65309cbfeafSKirill A. Shutemov put_page(page); 6546922c0c7SHugh Dickins BUG_ON(error); 6556922c0c7SHugh Dickins } 6566922c0c7SHugh Dickins 6576922c0c7SHugh Dickins /* 6587a5d0fbbSHugh Dickins * Remove swap entry from radix tree, free the swap and its page cache. 6597a5d0fbbSHugh Dickins */ 6607a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 6617a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 6627a5d0fbbSHugh Dickins { 6636dbaf22cSJohannes Weiner void *old; 6647a5d0fbbSHugh Dickins 665b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 666b93b0163SMatthew Wilcox old = radix_tree_delete_item(&mapping->i_pages, index, radswap); 667b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 6686dbaf22cSJohannes Weiner if (old != radswap) 6696dbaf22cSJohannes Weiner return -ENOENT; 6707a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 6716dbaf22cSJohannes Weiner return 0; 6727a5d0fbbSHugh Dickins } 6737a5d0fbbSHugh Dickins 6747a5d0fbbSHugh Dickins /* 6756a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 67648131e03SVlastimil Babka * given offsets are swapped out. 6776a15a370SVlastimil Babka * 678b93b0163SMatthew Wilcox * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 6796a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 6806a15a370SVlastimil Babka */ 68148131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 68248131e03SVlastimil Babka pgoff_t start, pgoff_t end) 6836a15a370SVlastimil Babka { 6846a15a370SVlastimil Babka struct radix_tree_iter iter; 6855b9c98f3SMike Kravetz void __rcu **slot; 6866a15a370SVlastimil Babka struct page *page; 68748131e03SVlastimil Babka unsigned long swapped = 0; 6886a15a370SVlastimil Babka 6896a15a370SVlastimil Babka rcu_read_lock(); 6906a15a370SVlastimil Babka 691b93b0163SMatthew Wilcox radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) { 6926a15a370SVlastimil Babka if (iter.index >= end) 6936a15a370SVlastimil Babka break; 6946a15a370SVlastimil Babka 6956a15a370SVlastimil Babka page = radix_tree_deref_slot(slot); 6966a15a370SVlastimil Babka 6972cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 6982cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 6992cf938aaSMatthew Wilcox continue; 7002cf938aaSMatthew Wilcox } 7016a15a370SVlastimil Babka 7023159f943SMatthew Wilcox if (xa_is_value(page)) 7036a15a370SVlastimil Babka swapped++; 7046a15a370SVlastimil Babka 7056a15a370SVlastimil Babka if (need_resched()) { 706148deab2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 7076a15a370SVlastimil Babka cond_resched_rcu(); 7086a15a370SVlastimil Babka } 7096a15a370SVlastimil Babka } 7106a15a370SVlastimil Babka 7116a15a370SVlastimil Babka rcu_read_unlock(); 7126a15a370SVlastimil Babka 7136a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 7146a15a370SVlastimil Babka } 7156a15a370SVlastimil Babka 7166a15a370SVlastimil Babka /* 71748131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 71848131e03SVlastimil Babka * given vma is swapped out. 71948131e03SVlastimil Babka * 720b93b0163SMatthew Wilcox * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 72148131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 72248131e03SVlastimil Babka */ 72348131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 72448131e03SVlastimil Babka { 72548131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 72648131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 72748131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 72848131e03SVlastimil Babka unsigned long swapped; 72948131e03SVlastimil Babka 73048131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 73148131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 73248131e03SVlastimil Babka 73348131e03SVlastimil Babka /* 73448131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 73548131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 73648131e03SVlastimil Babka * already track. 73748131e03SVlastimil Babka */ 73848131e03SVlastimil Babka if (!swapped) 73948131e03SVlastimil Babka return 0; 74048131e03SVlastimil Babka 74148131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 74248131e03SVlastimil Babka return swapped << PAGE_SHIFT; 74348131e03SVlastimil Babka 74448131e03SVlastimil Babka /* Here comes the more involved part */ 74548131e03SVlastimil Babka return shmem_partial_swap_usage(mapping, 74648131e03SVlastimil Babka linear_page_index(vma, vma->vm_start), 74748131e03SVlastimil Babka linear_page_index(vma, vma->vm_end)); 74848131e03SVlastimil Babka } 74948131e03SVlastimil Babka 75048131e03SVlastimil Babka /* 75124513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 75224513264SHugh Dickins */ 75324513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 75424513264SHugh Dickins { 75524513264SHugh Dickins struct pagevec pvec; 75624513264SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 75724513264SHugh Dickins pgoff_t index = 0; 75824513264SHugh Dickins 75986679820SMel Gorman pagevec_init(&pvec); 76024513264SHugh Dickins /* 76124513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 76224513264SHugh Dickins */ 76324513264SHugh Dickins while (!mapping_unevictable(mapping)) { 76424513264SHugh Dickins /* 76524513264SHugh Dickins * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 76624513264SHugh Dickins * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 76724513264SHugh Dickins */ 7680cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 76924513264SHugh Dickins PAGEVEC_SIZE, pvec.pages, indices); 77024513264SHugh Dickins if (!pvec.nr) 77124513264SHugh Dickins break; 77224513264SHugh Dickins index = indices[pvec.nr - 1] + 1; 7730cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 77424513264SHugh Dickins check_move_unevictable_pages(pvec.pages, pvec.nr); 77524513264SHugh Dickins pagevec_release(&pvec); 77624513264SHugh Dickins cond_resched(); 77724513264SHugh Dickins } 7787a5d0fbbSHugh Dickins } 7797a5d0fbbSHugh Dickins 7807a5d0fbbSHugh Dickins /* 7817a5d0fbbSHugh Dickins * Remove range of pages and swap entries from radix tree, and free them. 7821635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 7837a5d0fbbSHugh Dickins */ 7841635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 7851635f6a7SHugh Dickins bool unfalloc) 7861da177e4SLinus Torvalds { 787285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 7881da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 78909cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 79009cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 79109cbfeafSKirill A. Shutemov unsigned int partial_start = lstart & (PAGE_SIZE - 1); 79209cbfeafSKirill A. Shutemov unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); 793bda97eabSHugh Dickins struct pagevec pvec; 7947a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 7957a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 796285b2c4fSHugh Dickins pgoff_t index; 797bda97eabSHugh Dickins int i; 7981da177e4SLinus Torvalds 79983e4fa9cSHugh Dickins if (lend == -1) 80083e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 801bda97eabSHugh Dickins 80286679820SMel Gorman pagevec_init(&pvec); 803bda97eabSHugh Dickins index = start; 80483e4fa9cSHugh Dickins while (index < end) { 8050cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 80683e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 8077a5d0fbbSHugh Dickins pvec.pages, indices); 8087a5d0fbbSHugh Dickins if (!pvec.nr) 8097a5d0fbbSHugh Dickins break; 810bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 811bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 812bda97eabSHugh Dickins 8137a5d0fbbSHugh Dickins index = indices[i]; 81483e4fa9cSHugh Dickins if (index >= end) 815bda97eabSHugh Dickins break; 816bda97eabSHugh Dickins 8173159f943SMatthew Wilcox if (xa_is_value(page)) { 8181635f6a7SHugh Dickins if (unfalloc) 8191635f6a7SHugh Dickins continue; 8207a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 8217a5d0fbbSHugh Dickins index, page); 8227a5d0fbbSHugh Dickins continue; 8237a5d0fbbSHugh Dickins } 8247a5d0fbbSHugh Dickins 825800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); 826800d8c63SKirill A. Shutemov 827bda97eabSHugh Dickins if (!trylock_page(page)) 828bda97eabSHugh Dickins continue; 829800d8c63SKirill A. Shutemov 830800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 831800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 832800d8c63SKirill A. Shutemov clear_highpage(page); 833800d8c63SKirill A. Shutemov unlock_page(page); 834800d8c63SKirill A. Shutemov continue; 835800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 836800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 837800d8c63SKirill A. Shutemov /* 838800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 839800d8c63SKirill A. Shutemov * zero out the page 840800d8c63SKirill A. Shutemov */ 841800d8c63SKirill A. Shutemov clear_highpage(page); 842800d8c63SKirill A. Shutemov unlock_page(page); 843800d8c63SKirill A. Shutemov continue; 844800d8c63SKirill A. Shutemov } 845800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 846800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 847800d8c63SKirill A. Shutemov } 848800d8c63SKirill A. Shutemov 8491635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 850800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 851800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 852309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 853bda97eabSHugh Dickins truncate_inode_page(mapping, page); 8547a5d0fbbSHugh Dickins } 8551635f6a7SHugh Dickins } 856bda97eabSHugh Dickins unlock_page(page); 857bda97eabSHugh Dickins } 8580cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 85924513264SHugh Dickins pagevec_release(&pvec); 860bda97eabSHugh Dickins cond_resched(); 861bda97eabSHugh Dickins index++; 862bda97eabSHugh Dickins } 863bda97eabSHugh Dickins 86483e4fa9cSHugh Dickins if (partial_start) { 865bda97eabSHugh Dickins struct page *page = NULL; 8669e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, start - 1, &page, SGP_READ); 867bda97eabSHugh Dickins if (page) { 86809cbfeafSKirill A. Shutemov unsigned int top = PAGE_SIZE; 86983e4fa9cSHugh Dickins if (start > end) { 87083e4fa9cSHugh Dickins top = partial_end; 87183e4fa9cSHugh Dickins partial_end = 0; 87283e4fa9cSHugh Dickins } 87383e4fa9cSHugh Dickins zero_user_segment(page, partial_start, top); 874bda97eabSHugh Dickins set_page_dirty(page); 875bda97eabSHugh Dickins unlock_page(page); 87609cbfeafSKirill A. Shutemov put_page(page); 877bda97eabSHugh Dickins } 878bda97eabSHugh Dickins } 87983e4fa9cSHugh Dickins if (partial_end) { 88083e4fa9cSHugh Dickins struct page *page = NULL; 8819e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, end, &page, SGP_READ); 88283e4fa9cSHugh Dickins if (page) { 88383e4fa9cSHugh Dickins zero_user_segment(page, 0, partial_end); 88483e4fa9cSHugh Dickins set_page_dirty(page); 88583e4fa9cSHugh Dickins unlock_page(page); 88609cbfeafSKirill A. Shutemov put_page(page); 88783e4fa9cSHugh Dickins } 88883e4fa9cSHugh Dickins } 88983e4fa9cSHugh Dickins if (start >= end) 89083e4fa9cSHugh Dickins return; 891bda97eabSHugh Dickins 892bda97eabSHugh Dickins index = start; 893b1a36650SHugh Dickins while (index < end) { 894bda97eabSHugh Dickins cond_resched(); 8950cd6144aSJohannes Weiner 8960cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 89783e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 8987a5d0fbbSHugh Dickins pvec.pages, indices); 8997a5d0fbbSHugh Dickins if (!pvec.nr) { 900b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 901b1a36650SHugh Dickins if (index == start || end != -1) 902bda97eabSHugh Dickins break; 903b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 904bda97eabSHugh Dickins index = start; 905bda97eabSHugh Dickins continue; 906bda97eabSHugh Dickins } 907bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 908bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 909bda97eabSHugh Dickins 9107a5d0fbbSHugh Dickins index = indices[i]; 91183e4fa9cSHugh Dickins if (index >= end) 912bda97eabSHugh Dickins break; 913bda97eabSHugh Dickins 9143159f943SMatthew Wilcox if (xa_is_value(page)) { 9151635f6a7SHugh Dickins if (unfalloc) 9161635f6a7SHugh Dickins continue; 917b1a36650SHugh Dickins if (shmem_free_swap(mapping, index, page)) { 918b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 919b1a36650SHugh Dickins index--; 920b1a36650SHugh Dickins break; 921b1a36650SHugh Dickins } 922b1a36650SHugh Dickins nr_swaps_freed++; 9237a5d0fbbSHugh Dickins continue; 9247a5d0fbbSHugh Dickins } 9257a5d0fbbSHugh Dickins 926bda97eabSHugh Dickins lock_page(page); 927800d8c63SKirill A. Shutemov 928800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 929800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 930800d8c63SKirill A. Shutemov clear_highpage(page); 931800d8c63SKirill A. Shutemov unlock_page(page); 932800d8c63SKirill A. Shutemov /* 933800d8c63SKirill A. Shutemov * Partial thp truncate due 'start' in middle 934800d8c63SKirill A. Shutemov * of THP: don't need to look on these pages 935800d8c63SKirill A. Shutemov * again on !pvec.nr restart. 936800d8c63SKirill A. Shutemov */ 937800d8c63SKirill A. Shutemov if (index != round_down(end, HPAGE_PMD_NR)) 938800d8c63SKirill A. Shutemov start++; 939800d8c63SKirill A. Shutemov continue; 940800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 941800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 942800d8c63SKirill A. Shutemov /* 943800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 944800d8c63SKirill A. Shutemov * zero out the page 945800d8c63SKirill A. Shutemov */ 946800d8c63SKirill A. Shutemov clear_highpage(page); 947800d8c63SKirill A. Shutemov unlock_page(page); 948800d8c63SKirill A. Shutemov continue; 949800d8c63SKirill A. Shutemov } 950800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 951800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 952800d8c63SKirill A. Shutemov } 953800d8c63SKirill A. Shutemov 9541635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 955800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 956800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 957309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 958bda97eabSHugh Dickins truncate_inode_page(mapping, page); 959b1a36650SHugh Dickins } else { 960b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 961b1a36650SHugh Dickins unlock_page(page); 962b1a36650SHugh Dickins index--; 963b1a36650SHugh Dickins break; 9647a5d0fbbSHugh Dickins } 9651635f6a7SHugh Dickins } 966bda97eabSHugh Dickins unlock_page(page); 967bda97eabSHugh Dickins } 9680cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 96924513264SHugh Dickins pagevec_release(&pvec); 970bda97eabSHugh Dickins index++; 971bda97eabSHugh Dickins } 97294c1e62dSHugh Dickins 9734595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 9747a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 9751da177e4SLinus Torvalds shmem_recalc_inode(inode); 9764595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 9771635f6a7SHugh Dickins } 9781da177e4SLinus Torvalds 9791635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 9801635f6a7SHugh Dickins { 9811635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 982078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 9831da177e4SLinus Torvalds } 98494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 9851da177e4SLinus Torvalds 986a528d35eSDavid Howells static int shmem_getattr(const struct path *path, struct kstat *stat, 987a528d35eSDavid Howells u32 request_mask, unsigned int query_flags) 98844a30220SYu Zhao { 989a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode; 99044a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 99189fdcd26SYang Shi struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb); 99244a30220SYu Zhao 993d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 9944595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 99544a30220SYu Zhao shmem_recalc_inode(inode); 9964595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 997d0424c42SHugh Dickins } 99844a30220SYu Zhao generic_fillattr(inode, stat); 99989fdcd26SYang Shi 100089fdcd26SYang Shi if (is_huge_enabled(sb_info)) 100189fdcd26SYang Shi stat->blksize = HPAGE_PMD_SIZE; 100289fdcd26SYang Shi 100344a30220SYu Zhao return 0; 100444a30220SYu Zhao } 100544a30220SYu Zhao 100694c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 10071da177e4SLinus Torvalds { 100875c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 100940e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 1010779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 10111da177e4SLinus Torvalds int error; 10121da177e4SLinus Torvalds 101331051c85SJan Kara error = setattr_prepare(dentry, attr); 1014db78b877SChristoph Hellwig if (error) 1015db78b877SChristoph Hellwig return error; 1016db78b877SChristoph Hellwig 101794c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 101894c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 101994c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 10203889e6e7Snpiggin@suse.de 102140e041a2SDavid Herrmann /* protected by i_mutex */ 102240e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 102340e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 102440e041a2SDavid Herrmann return -EPERM; 102540e041a2SDavid Herrmann 102694c1e62dSHugh Dickins if (newsize != oldsize) { 102777142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 102877142517SKonstantin Khlebnikov oldsize, newsize); 102977142517SKonstantin Khlebnikov if (error) 103077142517SKonstantin Khlebnikov return error; 103194c1e62dSHugh Dickins i_size_write(inode, newsize); 1032078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 103394c1e62dSHugh Dickins } 1034afa2db2fSJosef Bacik if (newsize <= oldsize) { 103594c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 1036d0424c42SHugh Dickins if (oldsize > holebegin) 1037d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1038d0424c42SHugh Dickins holebegin, 0, 1); 1039d0424c42SHugh Dickins if (info->alloced) 1040d0424c42SHugh Dickins shmem_truncate_range(inode, 1041d0424c42SHugh Dickins newsize, (loff_t)-1); 104294c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1043d0424c42SHugh Dickins if (oldsize > holebegin) 1044d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1045d0424c42SHugh Dickins holebegin, 0, 1); 1046779750d2SKirill A. Shutemov 1047779750d2SKirill A. Shutemov /* 1048779750d2SKirill A. Shutemov * Part of the huge page can be beyond i_size: subject 1049779750d2SKirill A. Shutemov * to shrink under memory pressure. 1050779750d2SKirill A. Shutemov */ 1051779750d2SKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1052779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1053d041353dSCong Wang /* 1054d041353dSCong Wang * _careful to defend against unlocked access to 1055d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1056d041353dSCong Wang */ 1057d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1058779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1059779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1060779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1061779750d2SKirill A. Shutemov } 1062779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1063779750d2SKirill A. Shutemov } 106494c1e62dSHugh Dickins } 10651da177e4SLinus Torvalds } 10661da177e4SLinus Torvalds 10676a1a90adSChristoph Hellwig setattr_copy(inode, attr); 1068db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 1069feda821eSChristoph Hellwig error = posix_acl_chmod(inode, inode->i_mode); 10701da177e4SLinus Torvalds return error; 10711da177e4SLinus Torvalds } 10721da177e4SLinus Torvalds 10731f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 10741da177e4SLinus Torvalds { 10751da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1076779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 10771da177e4SLinus Torvalds 10783889e6e7Snpiggin@suse.de if (inode->i_mapping->a_ops == &shmem_aops) { 10791da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 10801da177e4SLinus Torvalds inode->i_size = 0; 10813889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1082779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1083779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1084779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1085779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1086779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1087779750d2SKirill A. Shutemov } 1088779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1089779750d2SKirill A. Shutemov } 10901da177e4SLinus Torvalds if (!list_empty(&info->swaplist)) { 1091cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 10921da177e4SLinus Torvalds list_del_init(&info->swaplist); 1093cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 10941da177e4SLinus Torvalds } 10953ed47db3SAl Viro } 1096b09e0fa4SEric Paris 109738f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 10980f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 10995b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1100dbd5768fSJan Kara clear_inode(inode); 11011da177e4SLinus Torvalds } 11021da177e4SLinus Torvalds 1103478922e2SMatthew Wilcox static unsigned long find_swap_entry(struct radix_tree_root *root, void *item) 1104478922e2SMatthew Wilcox { 1105478922e2SMatthew Wilcox struct radix_tree_iter iter; 11065b9c98f3SMike Kravetz void __rcu **slot; 1107478922e2SMatthew Wilcox unsigned long found = -1; 1108478922e2SMatthew Wilcox unsigned int checked = 0; 1109478922e2SMatthew Wilcox 1110478922e2SMatthew Wilcox rcu_read_lock(); 1111478922e2SMatthew Wilcox radix_tree_for_each_slot(slot, root, &iter, 0) { 11125b9c98f3SMike Kravetz void *entry = radix_tree_deref_slot(slot); 11135b9c98f3SMike Kravetz 11145b9c98f3SMike Kravetz if (radix_tree_deref_retry(entry)) { 11155b9c98f3SMike Kravetz slot = radix_tree_iter_retry(&iter); 11165b9c98f3SMike Kravetz continue; 11175b9c98f3SMike Kravetz } 11185b9c98f3SMike Kravetz if (entry == item) { 1119478922e2SMatthew Wilcox found = iter.index; 1120478922e2SMatthew Wilcox break; 1121478922e2SMatthew Wilcox } 1122478922e2SMatthew Wilcox checked++; 1123478922e2SMatthew Wilcox if ((checked % 4096) != 0) 1124478922e2SMatthew Wilcox continue; 1125478922e2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 1126478922e2SMatthew Wilcox cond_resched_rcu(); 1127478922e2SMatthew Wilcox } 1128478922e2SMatthew Wilcox 1129478922e2SMatthew Wilcox rcu_read_unlock(); 1130478922e2SMatthew Wilcox return found; 1131478922e2SMatthew Wilcox } 1132478922e2SMatthew Wilcox 113346f65ec1SHugh Dickins /* 113446f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 113546f65ec1SHugh Dickins */ 113641ffe5d5SHugh Dickins static int shmem_unuse_inode(struct shmem_inode_info *info, 1137bde05d1cSHugh Dickins swp_entry_t swap, struct page **pagep) 11381da177e4SLinus Torvalds { 1139285b2c4fSHugh Dickins struct address_space *mapping = info->vfs_inode.i_mapping; 114046f65ec1SHugh Dickins void *radswap; 114141ffe5d5SHugh Dickins pgoff_t index; 1142bde05d1cSHugh Dickins gfp_t gfp; 1143bde05d1cSHugh Dickins int error = 0; 11441da177e4SLinus Torvalds 114546f65ec1SHugh Dickins radswap = swp_to_radix_entry(swap); 1146b93b0163SMatthew Wilcox index = find_swap_entry(&mapping->i_pages, radswap); 114746f65ec1SHugh Dickins if (index == -1) 114800501b53SJohannes Weiner return -EAGAIN; /* tell shmem_unuse we found nothing */ 11492e0e26c7SHugh Dickins 11501b1b32f2SHugh Dickins /* 11511b1b32f2SHugh Dickins * Move _head_ to start search for next from here. 11521f895f75SAl Viro * But be careful: shmem_evict_inode checks list_empty without taking 11531b1b32f2SHugh Dickins * mutex, and there's an instant in list_move_tail when info->swaplist 1154285b2c4fSHugh Dickins * would appear empty, if it were the only one on shmem_swaplist. 11551b1b32f2SHugh Dickins */ 11561b1b32f2SHugh Dickins if (shmem_swaplist.next != &info->swaplist) 11572e0e26c7SHugh Dickins list_move_tail(&shmem_swaplist, &info->swaplist); 11582e0e26c7SHugh Dickins 1159bde05d1cSHugh Dickins gfp = mapping_gfp_mask(mapping); 1160bde05d1cSHugh Dickins if (shmem_should_replace_page(*pagep, gfp)) { 1161bde05d1cSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1162bde05d1cSHugh Dickins error = shmem_replace_page(pagep, gfp, info, index); 1163bde05d1cSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1164bde05d1cSHugh Dickins /* 1165bde05d1cSHugh Dickins * We needed to drop mutex to make that restrictive page 11660142ef6cSHugh Dickins * allocation, but the inode might have been freed while we 11670142ef6cSHugh Dickins * dropped it: although a racing shmem_evict_inode() cannot 11680142ef6cSHugh Dickins * complete without emptying the radix_tree, our page lock 11690142ef6cSHugh Dickins * on this swapcache page is not enough to prevent that - 11700142ef6cSHugh Dickins * free_swap_and_cache() of our swap entry will only 11710142ef6cSHugh Dickins * trylock_page(), removing swap from radix_tree whatever. 11720142ef6cSHugh Dickins * 11730142ef6cSHugh Dickins * We must not proceed to shmem_add_to_page_cache() if the 11740142ef6cSHugh Dickins * inode has been freed, but of course we cannot rely on 11750142ef6cSHugh Dickins * inode or mapping or info to check that. However, we can 11760142ef6cSHugh Dickins * safely check if our swap entry is still in use (and here 11770142ef6cSHugh Dickins * it can't have got reused for another page): if it's still 11780142ef6cSHugh Dickins * in use, then the inode cannot have been freed yet, and we 11790142ef6cSHugh Dickins * can safely proceed (if it's no longer in use, that tells 11800142ef6cSHugh Dickins * nothing about the inode, but we don't need to unuse swap). 1181bde05d1cSHugh Dickins */ 1182bde05d1cSHugh Dickins if (!page_swapcount(*pagep)) 1183bde05d1cSHugh Dickins error = -ENOENT; 1184bde05d1cSHugh Dickins } 1185bde05d1cSHugh Dickins 1186d13d1443SKAMEZAWA Hiroyuki /* 1187778dd893SHugh Dickins * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 1188778dd893SHugh Dickins * but also to hold up shmem_evict_inode(): so inode cannot be freed 1189778dd893SHugh Dickins * beneath us (pagelock doesn't help until the page is in pagecache). 1190d13d1443SKAMEZAWA Hiroyuki */ 1191bde05d1cSHugh Dickins if (!error) 1192bde05d1cSHugh Dickins error = shmem_add_to_page_cache(*pagep, mapping, index, 1193fed400a1SWang Sheng-Hui radswap); 119448f170fbSHugh Dickins if (error != -ENOMEM) { 119546f65ec1SHugh Dickins /* 119646f65ec1SHugh Dickins * Truncation and eviction use free_swap_and_cache(), which 119746f65ec1SHugh Dickins * only does trylock page: if we raced, best clean up here. 119846f65ec1SHugh Dickins */ 1199bde05d1cSHugh Dickins delete_from_swap_cache(*pagep); 1200bde05d1cSHugh Dickins set_page_dirty(*pagep); 120146f65ec1SHugh Dickins if (!error) { 12024595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1203285b2c4fSHugh Dickins info->swapped--; 12044595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 120541ffe5d5SHugh Dickins swap_free(swap); 120646f65ec1SHugh Dickins } 12071da177e4SLinus Torvalds } 12082e0e26c7SHugh Dickins return error; 12091da177e4SLinus Torvalds } 12101da177e4SLinus Torvalds 12111da177e4SLinus Torvalds /* 121246f65ec1SHugh Dickins * Search through swapped inodes to find and replace swap by page. 12131da177e4SLinus Torvalds */ 121441ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 12151da177e4SLinus Torvalds { 121641ffe5d5SHugh Dickins struct list_head *this, *next; 12171da177e4SLinus Torvalds struct shmem_inode_info *info; 121800501b53SJohannes Weiner struct mem_cgroup *memcg; 1219bde05d1cSHugh Dickins int error = 0; 1220bde05d1cSHugh Dickins 1221bde05d1cSHugh Dickins /* 1222bde05d1cSHugh Dickins * There's a faint possibility that swap page was replaced before 12230142ef6cSHugh Dickins * caller locked it: caller will come back later with the right page. 1224bde05d1cSHugh Dickins */ 12250142ef6cSHugh Dickins if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 1226bde05d1cSHugh Dickins goto out; 1227778dd893SHugh Dickins 1228778dd893SHugh Dickins /* 1229778dd893SHugh Dickins * Charge page using GFP_KERNEL while we can wait, before taking 1230778dd893SHugh Dickins * the shmem_swaplist_mutex which might hold up shmem_writepage(). 1231778dd893SHugh Dickins * Charged back to the user (not to caller) when swap account is used. 1232778dd893SHugh Dickins */ 12332cf85583STejun Heo error = mem_cgroup_try_charge_delay(page, current->mm, GFP_KERNEL, 12342cf85583STejun Heo &memcg, false); 1235778dd893SHugh Dickins if (error) 1236778dd893SHugh Dickins goto out; 123746f65ec1SHugh Dickins /* No radix_tree_preload: swap entry keeps a place for page in tree */ 123800501b53SJohannes Weiner error = -EAGAIN; 12391da177e4SLinus Torvalds 1240cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 124141ffe5d5SHugh Dickins list_for_each_safe(this, next, &shmem_swaplist) { 124241ffe5d5SHugh Dickins info = list_entry(this, struct shmem_inode_info, swaplist); 1243285b2c4fSHugh Dickins if (info->swapped) 124400501b53SJohannes Weiner error = shmem_unuse_inode(info, swap, &page); 12456922c0c7SHugh Dickins else 12466922c0c7SHugh Dickins list_del_init(&info->swaplist); 1247cb5f7b9aSHugh Dickins cond_resched(); 124800501b53SJohannes Weiner if (error != -EAGAIN) 1249778dd893SHugh Dickins break; 125000501b53SJohannes Weiner /* found nothing in this: move on to search the next */ 12511da177e4SLinus Torvalds } 1252cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1253778dd893SHugh Dickins 125400501b53SJohannes Weiner if (error) { 125500501b53SJohannes Weiner if (error != -ENOMEM) 125600501b53SJohannes Weiner error = 0; 1257f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 125800501b53SJohannes Weiner } else 1259f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 1260778dd893SHugh Dickins out: 1261aaa46865SHugh Dickins unlock_page(page); 126209cbfeafSKirill A. Shutemov put_page(page); 1263778dd893SHugh Dickins return error; 12641da177e4SLinus Torvalds } 12651da177e4SLinus Torvalds 12661da177e4SLinus Torvalds /* 12671da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 12681da177e4SLinus Torvalds */ 12691da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 12701da177e4SLinus Torvalds { 12711da177e4SLinus Torvalds struct shmem_inode_info *info; 12721da177e4SLinus Torvalds struct address_space *mapping; 12731da177e4SLinus Torvalds struct inode *inode; 12746922c0c7SHugh Dickins swp_entry_t swap; 12756922c0c7SHugh Dickins pgoff_t index; 12761da177e4SLinus Torvalds 1277800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 12781da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 12791da177e4SLinus Torvalds mapping = page->mapping; 12801da177e4SLinus Torvalds index = page->index; 12811da177e4SLinus Torvalds inode = mapping->host; 12821da177e4SLinus Torvalds info = SHMEM_I(inode); 12831da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 12841da177e4SLinus Torvalds goto redirty; 1285d9fe526aSHugh Dickins if (!total_swap_pages) 12861da177e4SLinus Torvalds goto redirty; 12871da177e4SLinus Torvalds 1288d9fe526aSHugh Dickins /* 128997b713baSChristoph Hellwig * Our capabilities prevent regular writeback or sync from ever calling 129097b713baSChristoph Hellwig * shmem_writepage; but a stacking filesystem might use ->writepage of 129197b713baSChristoph Hellwig * its underlying filesystem, in which case tmpfs should write out to 129297b713baSChristoph Hellwig * swap only in response to memory pressure, and not for the writeback 129397b713baSChristoph Hellwig * threads or sync. 1294d9fe526aSHugh Dickins */ 129548f170fbSHugh Dickins if (!wbc->for_reclaim) { 129648f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 129748f170fbSHugh Dickins goto redirty; 129848f170fbSHugh Dickins } 12991635f6a7SHugh Dickins 13001635f6a7SHugh Dickins /* 13011635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 13021635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 13031635f6a7SHugh Dickins * fallocated page arriving here is now to initialize it and write it. 13041aac1400SHugh Dickins * 13051aac1400SHugh Dickins * That's okay for a page already fallocated earlier, but if we have 13061aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 13071aac1400SHugh Dickins * of this page in case we have to undo it, and (b) it may not be a 13081aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 13091aac1400SHugh Dickins * reactivate the page, and let shmem_fallocate() quit when too many. 13101635f6a7SHugh Dickins */ 13111635f6a7SHugh Dickins if (!PageUptodate(page)) { 13121aac1400SHugh Dickins if (inode->i_private) { 13131aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 13141aac1400SHugh Dickins spin_lock(&inode->i_lock); 13151aac1400SHugh Dickins shmem_falloc = inode->i_private; 13161aac1400SHugh Dickins if (shmem_falloc && 13178e205f77SHugh Dickins !shmem_falloc->waitq && 13181aac1400SHugh Dickins index >= shmem_falloc->start && 13191aac1400SHugh Dickins index < shmem_falloc->next) 13201aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 13211aac1400SHugh Dickins else 13221aac1400SHugh Dickins shmem_falloc = NULL; 13231aac1400SHugh Dickins spin_unlock(&inode->i_lock); 13241aac1400SHugh Dickins if (shmem_falloc) 13251aac1400SHugh Dickins goto redirty; 13261aac1400SHugh Dickins } 13271635f6a7SHugh Dickins clear_highpage(page); 13281635f6a7SHugh Dickins flush_dcache_page(page); 13291635f6a7SHugh Dickins SetPageUptodate(page); 13301635f6a7SHugh Dickins } 13311635f6a7SHugh Dickins 133238d8b4e6SHuang Ying swap = get_swap_page(page); 133348f170fbSHugh Dickins if (!swap.val) 133448f170fbSHugh Dickins goto redirty; 1335d9fe526aSHugh Dickins 1336b1dea800SHugh Dickins /* 1337b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 13386922c0c7SHugh Dickins * if it's not already there. Do it now before the page is 13396922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1340b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 13416922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 13426922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1343b1dea800SHugh Dickins */ 1344b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 134505bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 134605bf86b4SHugh Dickins list_add_tail(&info->swaplist, &shmem_swaplist); 1347b1dea800SHugh Dickins 134848f170fbSHugh Dickins if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 13494595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1350267a4c76SHugh Dickins shmem_recalc_inode(inode); 1351267a4c76SHugh Dickins info->swapped++; 13524595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1353267a4c76SHugh Dickins 1354aaa46865SHugh Dickins swap_shmem_alloc(swap); 13556922c0c7SHugh Dickins shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 13566922c0c7SHugh Dickins 13576922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1358d9fe526aSHugh Dickins BUG_ON(page_mapped(page)); 13599fab5619SHugh Dickins swap_writepage(page, wbc); 13601da177e4SLinus Torvalds return 0; 13611da177e4SLinus Torvalds } 13621da177e4SLinus Torvalds 13636922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 136475f6d6d2SMinchan Kim put_swap_page(page, swap); 13651da177e4SLinus Torvalds redirty: 13661da177e4SLinus Torvalds set_page_dirty(page); 1367d9fe526aSHugh Dickins if (wbc->for_reclaim) 1368d9fe526aSHugh Dickins return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1369d9fe526aSHugh Dickins unlock_page(page); 1370d9fe526aSHugh Dickins return 0; 13711da177e4SLinus Torvalds } 13721da177e4SLinus Torvalds 137375edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 137471fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1375680d794bSakpm@linux-foundation.org { 1376680d794bSakpm@linux-foundation.org char buffer[64]; 1377680d794bSakpm@linux-foundation.org 137871fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1379095f1fc4SLee Schermerhorn return; /* show nothing */ 1380095f1fc4SLee Schermerhorn 1381a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1382095f1fc4SLee Schermerhorn 1383095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1384680d794bSakpm@linux-foundation.org } 138571fe804bSLee Schermerhorn 138671fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 138771fe804bSLee Schermerhorn { 138871fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 138971fe804bSLee Schermerhorn if (sbinfo->mpol) { 139071fe804bSLee Schermerhorn spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 139171fe804bSLee Schermerhorn mpol = sbinfo->mpol; 139271fe804bSLee Schermerhorn mpol_get(mpol); 139371fe804bSLee Schermerhorn spin_unlock(&sbinfo->stat_lock); 139471fe804bSLee Schermerhorn } 139571fe804bSLee Schermerhorn return mpol; 139671fe804bSLee Schermerhorn } 139775edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 139875edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 139975edd345SHugh Dickins { 140075edd345SHugh Dickins } 140175edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 140275edd345SHugh Dickins { 140375edd345SHugh Dickins return NULL; 140475edd345SHugh Dickins } 140575edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 140675edd345SHugh Dickins #ifndef CONFIG_NUMA 140775edd345SHugh Dickins #define vm_policy vm_private_data 140875edd345SHugh Dickins #endif 1409680d794bSakpm@linux-foundation.org 1410800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1411800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1412800d8c63SKirill A. Shutemov { 1413800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 14142c4541e2SKirill A. Shutemov vma_init(vma, NULL); 1415800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1416800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1417800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1418800d8c63SKirill A. Shutemov } 1419800d8c63SKirill A. Shutemov 1420800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1421800d8c63SKirill A. Shutemov { 1422800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1423800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1424800d8c63SKirill A. Shutemov } 1425800d8c63SKirill A. Shutemov 142641ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 142741ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 14281da177e4SLinus Torvalds { 14291da177e4SLinus Torvalds struct vm_area_struct pvma; 143018a2f371SMel Gorman struct page *page; 1431e9e9b7ecSMinchan Kim struct vm_fault vmf; 14321da177e4SLinus Torvalds 1433800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1434e9e9b7ecSMinchan Kim vmf.vma = &pvma; 1435e9e9b7ecSMinchan Kim vmf.address = 0; 1436e9e9b7ecSMinchan Kim page = swap_cluster_readahead(swap, gfp, &vmf); 1437800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 143818a2f371SMel Gorman 1439800d8c63SKirill A. Shutemov return page; 1440800d8c63SKirill A. Shutemov } 144118a2f371SMel Gorman 1442800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp, 1443800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1444800d8c63SKirill A. Shutemov { 1445800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 1446800d8c63SKirill A. Shutemov struct inode *inode = &info->vfs_inode; 1447800d8c63SKirill A. Shutemov struct address_space *mapping = inode->i_mapping; 14484620a06eSGeert Uytterhoeven pgoff_t idx, hindex; 1449800d8c63SKirill A. Shutemov void __rcu **results; 1450800d8c63SKirill A. Shutemov struct page *page; 1451800d8c63SKirill A. Shutemov 1452e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1453800d8c63SKirill A. Shutemov return NULL; 1454800d8c63SKirill A. Shutemov 14554620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 1456800d8c63SKirill A. Shutemov rcu_read_lock(); 1457b93b0163SMatthew Wilcox if (radix_tree_gang_lookup_slot(&mapping->i_pages, &results, &idx, 1458800d8c63SKirill A. Shutemov hindex, 1) && idx < hindex + HPAGE_PMD_NR) { 1459800d8c63SKirill A. Shutemov rcu_read_unlock(); 1460800d8c63SKirill A. Shutemov return NULL; 1461800d8c63SKirill A. Shutemov } 1462800d8c63SKirill A. Shutemov rcu_read_unlock(); 1463800d8c63SKirill A. Shutemov 1464800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1465800d8c63SKirill A. Shutemov page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1466800d8c63SKirill A. Shutemov HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1467800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1468800d8c63SKirill A. Shutemov if (page) 1469800d8c63SKirill A. Shutemov prep_transhuge_page(page); 147018a2f371SMel Gorman return page; 147118a2f371SMel Gorman } 147218a2f371SMel Gorman 147318a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp, 147418a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 147518a2f371SMel Gorman { 147618a2f371SMel Gorman struct vm_area_struct pvma; 147718a2f371SMel Gorman struct page *page; 147818a2f371SMel Gorman 1479800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1480800d8c63SKirill A. Shutemov page = alloc_page_vma(gfp, &pvma, 0); 1481800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 148218a2f371SMel Gorman 1483800d8c63SKirill A. Shutemov return page; 1484800d8c63SKirill A. Shutemov } 1485800d8c63SKirill A. Shutemov 1486800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 14870f079694SMike Rapoport struct inode *inode, 1488800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1489800d8c63SKirill A. Shutemov { 14900f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 1491800d8c63SKirill A. Shutemov struct page *page; 1492800d8c63SKirill A. Shutemov int nr; 1493800d8c63SKirill A. Shutemov int err = -ENOSPC; 1494800d8c63SKirill A. Shutemov 1495e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1496800d8c63SKirill A. Shutemov huge = false; 1497800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1498800d8c63SKirill A. Shutemov 14990f079694SMike Rapoport if (!shmem_inode_acct_block(inode, nr)) 1500800d8c63SKirill A. Shutemov goto failed; 1501800d8c63SKirill A. Shutemov 1502800d8c63SKirill A. Shutemov if (huge) 1503800d8c63SKirill A. Shutemov page = shmem_alloc_hugepage(gfp, info, index); 1504800d8c63SKirill A. Shutemov else 1505800d8c63SKirill A. Shutemov page = shmem_alloc_page(gfp, info, index); 150675edd345SHugh Dickins if (page) { 150775edd345SHugh Dickins __SetPageLocked(page); 150875edd345SHugh Dickins __SetPageSwapBacked(page); 1509800d8c63SKirill A. Shutemov return page; 151075edd345SHugh Dickins } 151118a2f371SMel Gorman 1512800d8c63SKirill A. Shutemov err = -ENOMEM; 15130f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr); 1514800d8c63SKirill A. Shutemov failed: 1515800d8c63SKirill A. Shutemov return ERR_PTR(err); 15161da177e4SLinus Torvalds } 151771fe804bSLee Schermerhorn 15181da177e4SLinus Torvalds /* 1519bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1520bde05d1cSHugh Dickins * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1521bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1522bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1523bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1524bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1525bde05d1cSHugh Dickins * 1526bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1527bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1528bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1529bde05d1cSHugh Dickins */ 1530bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1531bde05d1cSHugh Dickins { 1532bde05d1cSHugh Dickins return page_zonenum(page) > gfp_zone(gfp); 1533bde05d1cSHugh Dickins } 1534bde05d1cSHugh Dickins 1535bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1536bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1537bde05d1cSHugh Dickins { 1538bde05d1cSHugh Dickins struct page *oldpage, *newpage; 1539bde05d1cSHugh Dickins struct address_space *swap_mapping; 1540bde05d1cSHugh Dickins pgoff_t swap_index; 1541bde05d1cSHugh Dickins int error; 1542bde05d1cSHugh Dickins 1543bde05d1cSHugh Dickins oldpage = *pagep; 1544bde05d1cSHugh Dickins swap_index = page_private(oldpage); 1545bde05d1cSHugh Dickins swap_mapping = page_mapping(oldpage); 1546bde05d1cSHugh Dickins 1547bde05d1cSHugh Dickins /* 1548bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1549bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1550bde05d1cSHugh Dickins */ 1551bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1552bde05d1cSHugh Dickins newpage = shmem_alloc_page(gfp, info, index); 1553bde05d1cSHugh Dickins if (!newpage) 1554bde05d1cSHugh Dickins return -ENOMEM; 1555bde05d1cSHugh Dickins 155609cbfeafSKirill A. Shutemov get_page(newpage); 1557bde05d1cSHugh Dickins copy_highpage(newpage, oldpage); 15580142ef6cSHugh Dickins flush_dcache_page(newpage); 1559bde05d1cSHugh Dickins 15609956edf3SHugh Dickins __SetPageLocked(newpage); 15619956edf3SHugh Dickins __SetPageSwapBacked(newpage); 1562bde05d1cSHugh Dickins SetPageUptodate(newpage); 1563bde05d1cSHugh Dickins set_page_private(newpage, swap_index); 1564bde05d1cSHugh Dickins SetPageSwapCache(newpage); 1565bde05d1cSHugh Dickins 1566bde05d1cSHugh Dickins /* 1567bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1568bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1569bde05d1cSHugh Dickins */ 1570b93b0163SMatthew Wilcox xa_lock_irq(&swap_mapping->i_pages); 157162f945b6SMatthew Wilcox error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage); 15720142ef6cSHugh Dickins if (!error) { 157311fb9989SMel Gorman __inc_node_page_state(newpage, NR_FILE_PAGES); 157411fb9989SMel Gorman __dec_node_page_state(oldpage, NR_FILE_PAGES); 15750142ef6cSHugh Dickins } 1576b93b0163SMatthew Wilcox xa_unlock_irq(&swap_mapping->i_pages); 1577bde05d1cSHugh Dickins 15780142ef6cSHugh Dickins if (unlikely(error)) { 15790142ef6cSHugh Dickins /* 15800142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 15810142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 15820142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 15830142ef6cSHugh Dickins */ 15840142ef6cSHugh Dickins oldpage = newpage; 15850142ef6cSHugh Dickins } else { 15866a93ca8fSJohannes Weiner mem_cgroup_migrate(oldpage, newpage); 1587bde05d1cSHugh Dickins lru_cache_add_anon(newpage); 15880142ef6cSHugh Dickins *pagep = newpage; 15890142ef6cSHugh Dickins } 1590bde05d1cSHugh Dickins 1591bde05d1cSHugh Dickins ClearPageSwapCache(oldpage); 1592bde05d1cSHugh Dickins set_page_private(oldpage, 0); 1593bde05d1cSHugh Dickins 1594bde05d1cSHugh Dickins unlock_page(oldpage); 159509cbfeafSKirill A. Shutemov put_page(oldpage); 159609cbfeafSKirill A. Shutemov put_page(oldpage); 15970142ef6cSHugh Dickins return error; 1598bde05d1cSHugh Dickins } 1599bde05d1cSHugh Dickins 1600bde05d1cSHugh Dickins /* 160168da9f05SHugh Dickins * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 16021da177e4SLinus Torvalds * 16031da177e4SLinus Torvalds * If we allocate a new one we do not mark it dirty. That's up to the 16041da177e4SLinus Torvalds * vm. If we swap it in we mark it dirty since we also free the swap 16059e18eb29SAndres Lagar-Cavilla * entry since a page cannot live in both the swap and page cache. 16069e18eb29SAndres Lagar-Cavilla * 16079e18eb29SAndres Lagar-Cavilla * fault_mm and fault_type are only supplied by shmem_fault: 16089e18eb29SAndres Lagar-Cavilla * otherwise they are NULL. 16091da177e4SLinus Torvalds */ 161041ffe5d5SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 16119e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, gfp_t gfp, 16122b740303SSouptick Joarder struct vm_area_struct *vma, struct vm_fault *vmf, 16132b740303SSouptick Joarder vm_fault_t *fault_type) 16141da177e4SLinus Torvalds { 16151da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 161623f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 16171da177e4SLinus Torvalds struct shmem_sb_info *sbinfo; 16189e18eb29SAndres Lagar-Cavilla struct mm_struct *charge_mm; 161900501b53SJohannes Weiner struct mem_cgroup *memcg; 162027ab7006SHugh Dickins struct page *page; 16211da177e4SLinus Torvalds swp_entry_t swap; 1622657e3038SKirill A. Shutemov enum sgp_type sgp_huge = sgp; 1623800d8c63SKirill A. Shutemov pgoff_t hindex = index; 16241da177e4SLinus Torvalds int error; 162554af6042SHugh Dickins int once = 0; 16261635f6a7SHugh Dickins int alloced = 0; 16271da177e4SLinus Torvalds 162809cbfeafSKirill A. Shutemov if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 16291da177e4SLinus Torvalds return -EFBIG; 1630657e3038SKirill A. Shutemov if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1631657e3038SKirill A. Shutemov sgp = SGP_CACHE; 16321da177e4SLinus Torvalds repeat: 163354af6042SHugh Dickins swap.val = 0; 16340cd6144aSJohannes Weiner page = find_lock_entry(mapping, index); 16353159f943SMatthew Wilcox if (xa_is_value(page)) { 163654af6042SHugh Dickins swap = radix_to_swp_entry(page); 163754af6042SHugh Dickins page = NULL; 163854af6042SHugh Dickins } 163954af6042SHugh Dickins 164075edd345SHugh Dickins if (sgp <= SGP_CACHE && 164109cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 164254af6042SHugh Dickins error = -EINVAL; 1643267a4c76SHugh Dickins goto unlock; 164454af6042SHugh Dickins } 164554af6042SHugh Dickins 164666d2f4d2SHugh Dickins if (page && sgp == SGP_WRITE) 164766d2f4d2SHugh Dickins mark_page_accessed(page); 164866d2f4d2SHugh Dickins 16491635f6a7SHugh Dickins /* fallocated page? */ 16501635f6a7SHugh Dickins if (page && !PageUptodate(page)) { 16511635f6a7SHugh Dickins if (sgp != SGP_READ) 16521635f6a7SHugh Dickins goto clear; 16531635f6a7SHugh Dickins unlock_page(page); 165409cbfeafSKirill A. Shutemov put_page(page); 16551635f6a7SHugh Dickins page = NULL; 16561635f6a7SHugh Dickins } 165754af6042SHugh Dickins if (page || (sgp == SGP_READ && !swap.val)) { 165854af6042SHugh Dickins *pagep = page; 165954af6042SHugh Dickins return 0; 166027ab7006SHugh Dickins } 166127ab7006SHugh Dickins 1662b409f9fcSHugh Dickins /* 166354af6042SHugh Dickins * Fast cache lookup did not find it: 166454af6042SHugh Dickins * bring it back from swap or allocate. 1665b409f9fcSHugh Dickins */ 166654af6042SHugh Dickins sbinfo = SHMEM_SB(inode->i_sb); 1667cfda0526SMike Rapoport charge_mm = vma ? vma->vm_mm : current->mm; 166827ab7006SHugh Dickins 16691da177e4SLinus Torvalds if (swap.val) { 16701da177e4SLinus Torvalds /* Look it up and read it in.. */ 1671ec560175SHuang Ying page = lookup_swap_cache(swap, NULL, 0); 167227ab7006SHugh Dickins if (!page) { 16739e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 16749e18eb29SAndres Lagar-Cavilla if (fault_type) { 167568da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 16769e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 16772262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT); 16789e18eb29SAndres Lagar-Cavilla } 16799e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 168041ffe5d5SHugh Dickins page = shmem_swapin(swap, gfp, info, index); 168127ab7006SHugh Dickins if (!page) { 16821da177e4SLinus Torvalds error = -ENOMEM; 168354af6042SHugh Dickins goto failed; 1684285b2c4fSHugh Dickins } 16851da177e4SLinus Torvalds } 16861da177e4SLinus Torvalds 16871da177e4SLinus Torvalds /* We have to do this with page locked to prevent races */ 168854af6042SHugh Dickins lock_page(page); 16890142ef6cSHugh Dickins if (!PageSwapCache(page) || page_private(page) != swap.val || 1690d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1691bde05d1cSHugh Dickins error = -EEXIST; /* try again */ 1692d1899228SHugh Dickins goto unlock; 1693bde05d1cSHugh Dickins } 169427ab7006SHugh Dickins if (!PageUptodate(page)) { 16951da177e4SLinus Torvalds error = -EIO; 169654af6042SHugh Dickins goto failed; 169754af6042SHugh Dickins } 169854af6042SHugh Dickins wait_on_page_writeback(page); 169954af6042SHugh Dickins 1700bde05d1cSHugh Dickins if (shmem_should_replace_page(page, gfp)) { 1701bde05d1cSHugh Dickins error = shmem_replace_page(&page, gfp, info, index); 1702bde05d1cSHugh Dickins if (error) 170354af6042SHugh Dickins goto failed; 17041da177e4SLinus Torvalds } 17051da177e4SLinus Torvalds 17062cf85583STejun Heo error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, 1707f627c2f5SKirill A. Shutemov false); 1708d1899228SHugh Dickins if (!error) { 170954af6042SHugh Dickins error = shmem_add_to_page_cache(page, mapping, index, 1710fed400a1SWang Sheng-Hui swp_to_radix_entry(swap)); 1711215c02bcSHugh Dickins /* 1712215c02bcSHugh Dickins * We already confirmed swap under page lock, and make 1713215c02bcSHugh Dickins * no memory allocation here, so usually no possibility 1714215c02bcSHugh Dickins * of error; but free_swap_and_cache() only trylocks a 1715215c02bcSHugh Dickins * page, so it is just possible that the entry has been 1716215c02bcSHugh Dickins * truncated or holepunched since swap was confirmed. 1717215c02bcSHugh Dickins * shmem_undo_range() will have done some of the 1718215c02bcSHugh Dickins * unaccounting, now delete_from_swap_cache() will do 171993aa7d95SVladimir Davydov * the rest. 1720215c02bcSHugh Dickins * Reset swap.val? No, leave it so "failed" goes back to 1721215c02bcSHugh Dickins * "repeat": reading a hole and writing should succeed. 1722215c02bcSHugh Dickins */ 172300501b53SJohannes Weiner if (error) { 1724f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 1725215c02bcSHugh Dickins delete_from_swap_cache(page); 1726d1899228SHugh Dickins } 172700501b53SJohannes Weiner } 172854af6042SHugh Dickins if (error) 172954af6042SHugh Dickins goto failed; 173054af6042SHugh Dickins 1731f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 173200501b53SJohannes Weiner 17334595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 173454af6042SHugh Dickins info->swapped--; 173554af6042SHugh Dickins shmem_recalc_inode(inode); 17364595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 173727ab7006SHugh Dickins 173866d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 173966d2f4d2SHugh Dickins mark_page_accessed(page); 174066d2f4d2SHugh Dickins 174127ab7006SHugh Dickins delete_from_swap_cache(page); 174227ab7006SHugh Dickins set_page_dirty(page); 174327ab7006SHugh Dickins swap_free(swap); 174427ab7006SHugh Dickins 174554af6042SHugh Dickins } else { 1746cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) { 1747cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1748cfda0526SMike Rapoport return 0; 1749cfda0526SMike Rapoport } 1750cfda0526SMike Rapoport 1751800d8c63SKirill A. Shutemov /* shmem_symlink() */ 1752800d8c63SKirill A. Shutemov if (mapping->a_ops != &shmem_aops) 1753800d8c63SKirill A. Shutemov goto alloc_nohuge; 1754657e3038SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1755800d8c63SKirill A. Shutemov goto alloc_nohuge; 1756800d8c63SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 1757800d8c63SKirill A. Shutemov goto alloc_huge; 1758800d8c63SKirill A. Shutemov switch (sbinfo->huge) { 1759800d8c63SKirill A. Shutemov loff_t i_size; 1760800d8c63SKirill A. Shutemov pgoff_t off; 1761800d8c63SKirill A. Shutemov case SHMEM_HUGE_NEVER: 1762800d8c63SKirill A. Shutemov goto alloc_nohuge; 1763800d8c63SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 1764800d8c63SKirill A. Shutemov off = round_up(index, HPAGE_PMD_NR); 1765800d8c63SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 1766800d8c63SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 1767800d8c63SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 1768800d8c63SKirill A. Shutemov goto alloc_huge; 1769800d8c63SKirill A. Shutemov /* fallthrough */ 1770800d8c63SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 1771657e3038SKirill A. Shutemov if (sgp_huge == SGP_HUGE) 1772657e3038SKirill A. Shutemov goto alloc_huge; 1773657e3038SKirill A. Shutemov /* TODO: implement fadvise() hints */ 1774800d8c63SKirill A. Shutemov goto alloc_nohuge; 177559a16eadSHugh Dickins } 17761da177e4SLinus Torvalds 1777800d8c63SKirill A. Shutemov alloc_huge: 17780f079694SMike Rapoport page = shmem_alloc_and_acct_page(gfp, inode, index, true); 1779800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 17800f079694SMike Rapoport alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode, 1781800d8c63SKirill A. Shutemov index, false); 178254af6042SHugh Dickins } 1783800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1784779750d2SKirill A. Shutemov int retry = 5; 1785800d8c63SKirill A. Shutemov error = PTR_ERR(page); 1786800d8c63SKirill A. Shutemov page = NULL; 1787779750d2SKirill A. Shutemov if (error != -ENOSPC) 1788779750d2SKirill A. Shutemov goto failed; 1789779750d2SKirill A. Shutemov /* 1790779750d2SKirill A. Shutemov * Try to reclaim some spece by splitting a huge page 1791779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1792779750d2SKirill A. Shutemov */ 1793779750d2SKirill A. Shutemov while (retry--) { 1794779750d2SKirill A. Shutemov int ret; 1795779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1796779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1797779750d2SKirill A. Shutemov break; 1798779750d2SKirill A. Shutemov if (ret) 1799779750d2SKirill A. Shutemov goto alloc_nohuge; 1800779750d2SKirill A. Shutemov } 1801800d8c63SKirill A. Shutemov goto failed; 1802800d8c63SKirill A. Shutemov } 1803800d8c63SKirill A. Shutemov 1804800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 1805800d8c63SKirill A. Shutemov hindex = round_down(index, HPAGE_PMD_NR); 1806800d8c63SKirill A. Shutemov else 1807800d8c63SKirill A. Shutemov hindex = index; 1808800d8c63SKirill A. Shutemov 180966d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1810eb39d618SHugh Dickins __SetPageReferenced(page); 181166d2f4d2SHugh Dickins 18122cf85583STejun Heo error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, 1813800d8c63SKirill A. Shutemov PageTransHuge(page)); 181454af6042SHugh Dickins if (error) 1815800d8c63SKirill A. Shutemov goto unacct; 1816800d8c63SKirill A. Shutemov error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, 1817800d8c63SKirill A. Shutemov compound_order(page)); 1818b065b432SHugh Dickins if (!error) { 1819800d8c63SKirill A. Shutemov error = shmem_add_to_page_cache(page, mapping, hindex, 1820fed400a1SWang Sheng-Hui NULL); 1821b065b432SHugh Dickins radix_tree_preload_end(); 1822b065b432SHugh Dickins } 1823b065b432SHugh Dickins if (error) { 1824800d8c63SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, 1825800d8c63SKirill A. Shutemov PageTransHuge(page)); 1826800d8c63SKirill A. Shutemov goto unacct; 1827b065b432SHugh Dickins } 1828800d8c63SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, 1829800d8c63SKirill A. Shutemov PageTransHuge(page)); 183054af6042SHugh Dickins lru_cache_add_anon(page); 183154af6042SHugh Dickins 18324595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1833800d8c63SKirill A. Shutemov info->alloced += 1 << compound_order(page); 1834800d8c63SKirill A. Shutemov inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 183554af6042SHugh Dickins shmem_recalc_inode(inode); 18364595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 18371635f6a7SHugh Dickins alloced = true; 183854af6042SHugh Dickins 1839779750d2SKirill A. Shutemov if (PageTransHuge(page) && 1840779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1841779750d2SKirill A. Shutemov hindex + HPAGE_PMD_NR - 1) { 1842779750d2SKirill A. Shutemov /* 1843779750d2SKirill A. Shutemov * Part of the huge page is beyond i_size: subject 1844779750d2SKirill A. Shutemov * to shrink under memory pressure. 1845779750d2SKirill A. Shutemov */ 1846779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1847d041353dSCong Wang /* 1848d041353dSCong Wang * _careful to defend against unlocked access to 1849d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1850d041353dSCong Wang */ 1851d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1852779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1853779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1854779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1855779750d2SKirill A. Shutemov } 1856779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1857779750d2SKirill A. Shutemov } 1858779750d2SKirill A. Shutemov 1859ec9516fbSHugh Dickins /* 18601635f6a7SHugh Dickins * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 18611635f6a7SHugh Dickins */ 18621635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 18631635f6a7SHugh Dickins sgp = SGP_WRITE; 18641635f6a7SHugh Dickins clear: 18651635f6a7SHugh Dickins /* 18661635f6a7SHugh Dickins * Let SGP_WRITE caller clear ends if write does not fill page; 18671635f6a7SHugh Dickins * but SGP_FALLOC on a page fallocated earlier must initialize 18681635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 1869ec9516fbSHugh Dickins */ 1870800d8c63SKirill A. Shutemov if (sgp != SGP_WRITE && !PageUptodate(page)) { 1871800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 1872800d8c63SKirill A. Shutemov int i; 1873800d8c63SKirill A. Shutemov 1874800d8c63SKirill A. Shutemov for (i = 0; i < (1 << compound_order(head)); i++) { 1875800d8c63SKirill A. Shutemov clear_highpage(head + i); 1876800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 1877800d8c63SKirill A. Shutemov } 1878800d8c63SKirill A. Shutemov SetPageUptodate(head); 1879ec9516fbSHugh Dickins } 18801da177e4SLinus Torvalds } 1881bde05d1cSHugh Dickins 188254af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 188375edd345SHugh Dickins if (sgp <= SGP_CACHE && 188409cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1885267a4c76SHugh Dickins if (alloced) { 1886267a4c76SHugh Dickins ClearPageDirty(page); 1887267a4c76SHugh Dickins delete_from_page_cache(page); 18884595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1889267a4c76SHugh Dickins shmem_recalc_inode(inode); 18904595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1891267a4c76SHugh Dickins } 189254af6042SHugh Dickins error = -EINVAL; 1893267a4c76SHugh Dickins goto unlock; 1894ff36b801SShaohua Li } 1895800d8c63SKirill A. Shutemov *pagep = page + index - hindex; 189654af6042SHugh Dickins return 0; 1897d00806b1SNick Piggin 1898d0217ac0SNick Piggin /* 189954af6042SHugh Dickins * Error recovery. 19001da177e4SLinus Torvalds */ 190154af6042SHugh Dickins unacct: 19020f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1 << compound_order(page)); 1903800d8c63SKirill A. Shutemov 1904800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 1905800d8c63SKirill A. Shutemov unlock_page(page); 1906800d8c63SKirill A. Shutemov put_page(page); 1907800d8c63SKirill A. Shutemov goto alloc_nohuge; 1908800d8c63SKirill A. Shutemov } 190954af6042SHugh Dickins failed: 1910267a4c76SHugh Dickins if (swap.val && !shmem_confirm_swap(mapping, index, swap)) 191154af6042SHugh Dickins error = -EEXIST; 1912d1899228SHugh Dickins unlock: 191327ab7006SHugh Dickins if (page) { 191454af6042SHugh Dickins unlock_page(page); 191509cbfeafSKirill A. Shutemov put_page(page); 191654af6042SHugh Dickins } 191754af6042SHugh Dickins if (error == -ENOSPC && !once++) { 19184595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 191954af6042SHugh Dickins shmem_recalc_inode(inode); 19204595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 19211da177e4SLinus Torvalds goto repeat; 1922d8dc74f2SAdrian Bunk } 1923d1899228SHugh Dickins if (error == -EEXIST) /* from above or from radix_tree_insert */ 192454af6042SHugh Dickins goto repeat; 192554af6042SHugh Dickins return error; 19261da177e4SLinus Torvalds } 19271da177e4SLinus Torvalds 192810d20bd2SLinus Torvalds /* 192910d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 193010d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 193110d20bd2SLinus Torvalds * target. 193210d20bd2SLinus Torvalds */ 1933ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 193410d20bd2SLinus Torvalds { 193510d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 19362055da97SIngo Molnar list_del_init(&wait->entry); 193710d20bd2SLinus Torvalds return ret; 193810d20bd2SLinus Torvalds } 193910d20bd2SLinus Torvalds 194020acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf) 19411da177e4SLinus Torvalds { 194211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 1943496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 19449e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 1945657e3038SKirill A. Shutemov enum sgp_type sgp; 194620acce67SSouptick Joarder int err; 194720acce67SSouptick Joarder vm_fault_t ret = VM_FAULT_LOCKED; 19481da177e4SLinus Torvalds 1949f00cdc6dSHugh Dickins /* 1950f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 1951f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 1952f00cdc6dSHugh Dickins * locks writers out with its hold on i_mutex. So refrain from 19538e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 19548e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 19558e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 19568e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 19578e205f77SHugh Dickins * 19588e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 19598e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 19608e205f77SHugh Dickins * we just need to make racing faults a rare case. 19618e205f77SHugh Dickins * 19628e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 19638e205f77SHugh Dickins * standard mutex or completion: but we cannot take i_mutex in fault, 19648e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 1965f00cdc6dSHugh Dickins */ 1966f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 1967f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 1968f00cdc6dSHugh Dickins 1969f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 1970f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 19718e205f77SHugh Dickins if (shmem_falloc && 19728e205f77SHugh Dickins shmem_falloc->waitq && 19738e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 19748e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 19758e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 197610d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 19778e205f77SHugh Dickins 19788e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 1979f00cdc6dSHugh Dickins if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1980f00cdc6dSHugh Dickins !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 19818e205f77SHugh Dickins /* It's polite to up mmap_sem if we can */ 1982f00cdc6dSHugh Dickins up_read(&vma->vm_mm->mmap_sem); 19838e205f77SHugh Dickins ret = VM_FAULT_RETRY; 1984f00cdc6dSHugh Dickins } 19858e205f77SHugh Dickins 19868e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 19878e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 19888e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 19898e205f77SHugh Dickins spin_unlock(&inode->i_lock); 19908e205f77SHugh Dickins schedule(); 19918e205f77SHugh Dickins 19928e205f77SHugh Dickins /* 19938e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 19948e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 19958e205f77SHugh Dickins * is usually invalid by the time we reach here, but 19968e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 19978e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 19988e205f77SHugh Dickins */ 19998e205f77SHugh Dickins spin_lock(&inode->i_lock); 20008e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 20018e205f77SHugh Dickins spin_unlock(&inode->i_lock); 20028e205f77SHugh Dickins return ret; 2003f00cdc6dSHugh Dickins } 20048e205f77SHugh Dickins spin_unlock(&inode->i_lock); 2005f00cdc6dSHugh Dickins } 2006f00cdc6dSHugh Dickins 2007657e3038SKirill A. Shutemov sgp = SGP_CACHE; 200818600332SMichal Hocko 200918600332SMichal Hocko if ((vma->vm_flags & VM_NOHUGEPAGE) || 201018600332SMichal Hocko test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 2011657e3038SKirill A. Shutemov sgp = SGP_NOHUGE; 201218600332SMichal Hocko else if (vma->vm_flags & VM_HUGEPAGE) 201318600332SMichal Hocko sgp = SGP_HUGE; 2014657e3038SKirill A. Shutemov 201520acce67SSouptick Joarder err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 2016cfda0526SMike Rapoport gfp, vma, vmf, &ret); 201720acce67SSouptick Joarder if (err) 201820acce67SSouptick Joarder return vmf_error(err); 201968da9f05SHugh Dickins return ret; 20201da177e4SLinus Torvalds } 20211da177e4SLinus Torvalds 2022c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 2023c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 2024c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 2025c01d5b30SHugh Dickins { 2026c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 2027c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 2028c01d5b30SHugh Dickins unsigned long addr; 2029c01d5b30SHugh Dickins unsigned long offset; 2030c01d5b30SHugh Dickins unsigned long inflated_len; 2031c01d5b30SHugh Dickins unsigned long inflated_addr; 2032c01d5b30SHugh Dickins unsigned long inflated_offset; 2033c01d5b30SHugh Dickins 2034c01d5b30SHugh Dickins if (len > TASK_SIZE) 2035c01d5b30SHugh Dickins return -ENOMEM; 2036c01d5b30SHugh Dickins 2037c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 2038c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 2039c01d5b30SHugh Dickins 2040e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 2041c01d5b30SHugh Dickins return addr; 2042c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 2043c01d5b30SHugh Dickins return addr; 2044c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 2045c01d5b30SHugh Dickins return addr; 2046c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2047c01d5b30SHugh Dickins return addr; 2048c01d5b30SHugh Dickins 2049c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2050c01d5b30SHugh Dickins return addr; 2051c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2052c01d5b30SHugh Dickins return addr; 2053c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2054c01d5b30SHugh Dickins return addr; 2055c01d5b30SHugh Dickins /* 2056c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2057c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2058c01d5b30SHugh Dickins * But if caller specified an address hint, respect that as before. 2059c01d5b30SHugh Dickins */ 2060c01d5b30SHugh Dickins if (uaddr) 2061c01d5b30SHugh Dickins return addr; 2062c01d5b30SHugh Dickins 2063c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2064c01d5b30SHugh Dickins struct super_block *sb; 2065c01d5b30SHugh Dickins 2066c01d5b30SHugh Dickins if (file) { 2067c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2068c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2069c01d5b30SHugh Dickins } else { 2070c01d5b30SHugh Dickins /* 2071c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2072c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2073c01d5b30SHugh Dickins */ 2074c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2075c01d5b30SHugh Dickins return addr; 2076c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2077c01d5b30SHugh Dickins } 20783089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2079c01d5b30SHugh Dickins return addr; 2080c01d5b30SHugh Dickins } 2081c01d5b30SHugh Dickins 2082c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2083c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2084c01d5b30SHugh Dickins return addr; 2085c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2086c01d5b30SHugh Dickins return addr; 2087c01d5b30SHugh Dickins 2088c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2089c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2090c01d5b30SHugh Dickins return addr; 2091c01d5b30SHugh Dickins if (inflated_len < len) 2092c01d5b30SHugh Dickins return addr; 2093c01d5b30SHugh Dickins 2094c01d5b30SHugh Dickins inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); 2095c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2096c01d5b30SHugh Dickins return addr; 2097c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2098c01d5b30SHugh Dickins return addr; 2099c01d5b30SHugh Dickins 2100c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2101c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2102c01d5b30SHugh Dickins if (inflated_offset > offset) 2103c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2104c01d5b30SHugh Dickins 2105c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2106c01d5b30SHugh Dickins return addr; 2107c01d5b30SHugh Dickins return inflated_addr; 2108c01d5b30SHugh Dickins } 2109c01d5b30SHugh Dickins 21101da177e4SLinus Torvalds #ifdef CONFIG_NUMA 211141ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 21121da177e4SLinus Torvalds { 2113496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 211441ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 21151da177e4SLinus Torvalds } 21161da177e4SLinus Torvalds 2117d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2118d8dc74f2SAdrian Bunk unsigned long addr) 21191da177e4SLinus Torvalds { 2120496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 212141ffe5d5SHugh Dickins pgoff_t index; 21221da177e4SLinus Torvalds 212341ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 212441ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 21251da177e4SLinus Torvalds } 21261da177e4SLinus Torvalds #endif 21271da177e4SLinus Torvalds 21281da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user) 21291da177e4SLinus Torvalds { 2130496ad9aaSAl Viro struct inode *inode = file_inode(file); 21311da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 21321da177e4SLinus Torvalds int retval = -ENOMEM; 21331da177e4SLinus Torvalds 21344595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 21351da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 21361da177e4SLinus Torvalds if (!user_shm_lock(inode->i_size, user)) 21371da177e4SLinus Torvalds goto out_nomem; 21381da177e4SLinus Torvalds info->flags |= VM_LOCKED; 213989e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 21401da177e4SLinus Torvalds } 21411da177e4SLinus Torvalds if (!lock && (info->flags & VM_LOCKED) && user) { 21421da177e4SLinus Torvalds user_shm_unlock(inode->i_size, user); 21431da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 214489e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 21451da177e4SLinus Torvalds } 21461da177e4SLinus Torvalds retval = 0; 214789e004eaSLee Schermerhorn 21481da177e4SLinus Torvalds out_nomem: 21494595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 21501da177e4SLinus Torvalds return retval; 21511da177e4SLinus Torvalds } 21521da177e4SLinus Torvalds 21539b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 21541da177e4SLinus Torvalds { 21551da177e4SLinus Torvalds file_accessed(file); 21561da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2157e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2158f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2159f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 2160f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 2161f3f0e1d2SKirill A. Shutemov } 21621da177e4SLinus Torvalds return 0; 21631da177e4SLinus Torvalds } 21641da177e4SLinus Torvalds 2165454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 216609208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 21671da177e4SLinus Torvalds { 21681da177e4SLinus Torvalds struct inode *inode; 21691da177e4SLinus Torvalds struct shmem_inode_info *info; 21701da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 21711da177e4SLinus Torvalds 21725b04c689SPavel Emelyanov if (shmem_reserve_inode(sb)) 21731da177e4SLinus Torvalds return NULL; 21741da177e4SLinus Torvalds 21751da177e4SLinus Torvalds inode = new_inode(sb); 21761da177e4SLinus Torvalds if (inode) { 217785fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 2178454abafeSDmitry Monakhov inode_init_owner(inode, dir, mode); 21791da177e4SLinus Torvalds inode->i_blocks = 0; 2180078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 218146c9a946SArnd Bergmann inode->i_generation = prandom_u32(); 21821da177e4SLinus Torvalds info = SHMEM_I(inode); 21831da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 21841da177e4SLinus Torvalds spin_lock_init(&info->lock); 218540e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 21860b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2187779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 21881da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 218938f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 219072c04902SAl Viro cache_no_acl(inode); 21911da177e4SLinus Torvalds 21921da177e4SLinus Torvalds switch (mode & S_IFMT) { 21931da177e4SLinus Torvalds default: 219439f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 21951da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 21961da177e4SLinus Torvalds break; 21971da177e4SLinus Torvalds case S_IFREG: 219814fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 21991da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 22001da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 220171fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 220271fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 22031da177e4SLinus Torvalds break; 22041da177e4SLinus Torvalds case S_IFDIR: 2205d8c76e6fSDave Hansen inc_nlink(inode); 22061da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 22071da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 22081da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 22091da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 22101da177e4SLinus Torvalds break; 22111da177e4SLinus Torvalds case S_IFLNK: 22121da177e4SLinus Torvalds /* 22131da177e4SLinus Torvalds * Must not load anything in the rbtree, 22141da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 22151da177e4SLinus Torvalds */ 221671fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 22171da177e4SLinus Torvalds break; 22181da177e4SLinus Torvalds } 2219b45d71fbSJoel Fernandes (Google) 2220b45d71fbSJoel Fernandes (Google) lockdep_annotate_inode_mutex_key(inode); 22215b04c689SPavel Emelyanov } else 22225b04c689SPavel Emelyanov shmem_free_inode(sb); 22231da177e4SLinus Torvalds return inode; 22241da177e4SLinus Torvalds } 22251da177e4SLinus Torvalds 22260cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping) 22270cd6144aSJohannes Weiner { 2228f8005451SHugh Dickins return mapping->a_ops == &shmem_aops; 22290cd6144aSJohannes Weiner } 22300cd6144aSJohannes Weiner 22318d103963SMike Rapoport static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 22324c27fe4cSMike Rapoport pmd_t *dst_pmd, 22334c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 22344c27fe4cSMike Rapoport unsigned long dst_addr, 22354c27fe4cSMike Rapoport unsigned long src_addr, 22368d103963SMike Rapoport bool zeropage, 22374c27fe4cSMike Rapoport struct page **pagep) 22384c27fe4cSMike Rapoport { 22394c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 22404c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 22414c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 22424c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 22434c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 22444c27fe4cSMike Rapoport struct mem_cgroup *memcg; 22454c27fe4cSMike Rapoport spinlock_t *ptl; 22464c27fe4cSMike Rapoport void *page_kaddr; 22474c27fe4cSMike Rapoport struct page *page; 22484c27fe4cSMike Rapoport pte_t _dst_pte, *dst_pte; 22494c27fe4cSMike Rapoport int ret; 22504c27fe4cSMike Rapoport 22514c27fe4cSMike Rapoport ret = -ENOMEM; 22520f079694SMike Rapoport if (!shmem_inode_acct_block(inode, 1)) 22534c27fe4cSMike Rapoport goto out; 22544c27fe4cSMike Rapoport 2255cb658a45SAndrea Arcangeli if (!*pagep) { 22564c27fe4cSMike Rapoport page = shmem_alloc_page(gfp, info, pgoff); 22574c27fe4cSMike Rapoport if (!page) 22580f079694SMike Rapoport goto out_unacct_blocks; 22594c27fe4cSMike Rapoport 22608d103963SMike Rapoport if (!zeropage) { /* mcopy_atomic */ 22614c27fe4cSMike Rapoport page_kaddr = kmap_atomic(page); 22628d103963SMike Rapoport ret = copy_from_user(page_kaddr, 22638d103963SMike Rapoport (const void __user *)src_addr, 22644c27fe4cSMike Rapoport PAGE_SIZE); 22654c27fe4cSMike Rapoport kunmap_atomic(page_kaddr); 22664c27fe4cSMike Rapoport 22674c27fe4cSMike Rapoport /* fallback to copy_from_user outside mmap_sem */ 22684c27fe4cSMike Rapoport if (unlikely(ret)) { 22694c27fe4cSMike Rapoport *pagep = page; 22700f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 22714c27fe4cSMike Rapoport /* don't free the page */ 22724c27fe4cSMike Rapoport return -EFAULT; 22734c27fe4cSMike Rapoport } 22748d103963SMike Rapoport } else { /* mfill_zeropage_atomic */ 22758d103963SMike Rapoport clear_highpage(page); 22768d103963SMike Rapoport } 22774c27fe4cSMike Rapoport } else { 22784c27fe4cSMike Rapoport page = *pagep; 22794c27fe4cSMike Rapoport *pagep = NULL; 22804c27fe4cSMike Rapoport } 22814c27fe4cSMike Rapoport 22829cc90c66SAndrea Arcangeli VM_BUG_ON(PageLocked(page) || PageSwapBacked(page)); 22839cc90c66SAndrea Arcangeli __SetPageLocked(page); 22849cc90c66SAndrea Arcangeli __SetPageSwapBacked(page); 2285a425d358SAndrea Arcangeli __SetPageUptodate(page); 22869cc90c66SAndrea Arcangeli 22872cf85583STejun Heo ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false); 22884c27fe4cSMike Rapoport if (ret) 22894c27fe4cSMike Rapoport goto out_release; 22904c27fe4cSMike Rapoport 22914c27fe4cSMike Rapoport ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 22924c27fe4cSMike Rapoport if (!ret) { 22934c27fe4cSMike Rapoport ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL); 22944c27fe4cSMike Rapoport radix_tree_preload_end(); 22954c27fe4cSMike Rapoport } 22964c27fe4cSMike Rapoport if (ret) 22974c27fe4cSMike Rapoport goto out_release_uncharge; 22984c27fe4cSMike Rapoport 22994c27fe4cSMike Rapoport mem_cgroup_commit_charge(page, memcg, false, false); 23004c27fe4cSMike Rapoport 23014c27fe4cSMike Rapoport _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 23024c27fe4cSMike Rapoport if (dst_vma->vm_flags & VM_WRITE) 23034c27fe4cSMike Rapoport _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 23044c27fe4cSMike Rapoport 23054c27fe4cSMike Rapoport ret = -EEXIST; 23064c27fe4cSMike Rapoport dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 23074c27fe4cSMike Rapoport if (!pte_none(*dst_pte)) 23084c27fe4cSMike Rapoport goto out_release_uncharge_unlock; 23094c27fe4cSMike Rapoport 23104c27fe4cSMike Rapoport lru_cache_add_anon(page); 23114c27fe4cSMike Rapoport 23124c27fe4cSMike Rapoport spin_lock(&info->lock); 23134c27fe4cSMike Rapoport info->alloced++; 23144c27fe4cSMike Rapoport inode->i_blocks += BLOCKS_PER_PAGE; 23154c27fe4cSMike Rapoport shmem_recalc_inode(inode); 23164c27fe4cSMike Rapoport spin_unlock(&info->lock); 23174c27fe4cSMike Rapoport 23184c27fe4cSMike Rapoport inc_mm_counter(dst_mm, mm_counter_file(page)); 23194c27fe4cSMike Rapoport page_add_file_rmap(page, false); 23204c27fe4cSMike Rapoport set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 23214c27fe4cSMike Rapoport 23224c27fe4cSMike Rapoport /* No need to invalidate - it was non-present before */ 23234c27fe4cSMike Rapoport update_mmu_cache(dst_vma, dst_addr, dst_pte); 23244c27fe4cSMike Rapoport unlock_page(page); 23254c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 23264c27fe4cSMike Rapoport ret = 0; 23274c27fe4cSMike Rapoport out: 23284c27fe4cSMike Rapoport return ret; 23294c27fe4cSMike Rapoport out_release_uncharge_unlock: 23304c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 23314c27fe4cSMike Rapoport out_release_uncharge: 23324c27fe4cSMike Rapoport mem_cgroup_cancel_charge(page, memcg, false); 23334c27fe4cSMike Rapoport out_release: 23349cc90c66SAndrea Arcangeli unlock_page(page); 23354c27fe4cSMike Rapoport put_page(page); 23364c27fe4cSMike Rapoport out_unacct_blocks: 23370f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 23384c27fe4cSMike Rapoport goto out; 23394c27fe4cSMike Rapoport } 23404c27fe4cSMike Rapoport 23418d103963SMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, 23428d103963SMike Rapoport pmd_t *dst_pmd, 23438d103963SMike Rapoport struct vm_area_struct *dst_vma, 23448d103963SMike Rapoport unsigned long dst_addr, 23458d103963SMike Rapoport unsigned long src_addr, 23468d103963SMike Rapoport struct page **pagep) 23478d103963SMike Rapoport { 23488d103963SMike Rapoport return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 23498d103963SMike Rapoport dst_addr, src_addr, false, pagep); 23508d103963SMike Rapoport } 23518d103963SMike Rapoport 23528d103963SMike Rapoport int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, 23538d103963SMike Rapoport pmd_t *dst_pmd, 23548d103963SMike Rapoport struct vm_area_struct *dst_vma, 23558d103963SMike Rapoport unsigned long dst_addr) 23568d103963SMike Rapoport { 23578d103963SMike Rapoport struct page *page = NULL; 23588d103963SMike Rapoport 23598d103963SMike Rapoport return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 23608d103963SMike Rapoport dst_addr, 0, true, &page); 23618d103963SMike Rapoport } 23628d103963SMike Rapoport 23631da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 236492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 236569f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 23661da177e4SLinus Torvalds 23676d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR 23686d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 23696d9d88d0SJarkko Sakkinen #else 23706d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL 23716d9d88d0SJarkko Sakkinen #endif 23726d9d88d0SJarkko Sakkinen 23731da177e4SLinus Torvalds static int 2374800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 2375800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 2376800d15a5SNick Piggin struct page **pagep, void **fsdata) 23771da177e4SLinus Torvalds { 2378800d15a5SNick Piggin struct inode *inode = mapping->host; 237940e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 238009cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 238140e041a2SDavid Herrmann 238240e041a2SDavid Herrmann /* i_mutex is held by caller */ 23833f472cc9SSteven Rostedt (VMware) if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) { 238440e041a2SDavid Herrmann if (info->seals & F_SEAL_WRITE) 238540e041a2SDavid Herrmann return -EPERM; 238640e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 238740e041a2SDavid Herrmann return -EPERM; 238840e041a2SDavid Herrmann } 238940e041a2SDavid Herrmann 23909e18eb29SAndres Lagar-Cavilla return shmem_getpage(inode, index, pagep, SGP_WRITE); 2391800d15a5SNick Piggin } 2392800d15a5SNick Piggin 2393800d15a5SNick Piggin static int 2394800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2395800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2396800d15a5SNick Piggin struct page *page, void *fsdata) 2397800d15a5SNick Piggin { 2398800d15a5SNick Piggin struct inode *inode = mapping->host; 2399800d15a5SNick Piggin 2400800d15a5SNick Piggin if (pos + copied > inode->i_size) 2401800d15a5SNick Piggin i_size_write(inode, pos + copied); 2402800d15a5SNick Piggin 2403ec9516fbSHugh Dickins if (!PageUptodate(page)) { 2404800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 2405800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 2406800d8c63SKirill A. Shutemov int i; 2407800d8c63SKirill A. Shutemov 2408800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2409800d8c63SKirill A. Shutemov if (head + i == page) 2410800d8c63SKirill A. Shutemov continue; 2411800d8c63SKirill A. Shutemov clear_highpage(head + i); 2412800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 2413800d8c63SKirill A. Shutemov } 2414800d8c63SKirill A. Shutemov } 241509cbfeafSKirill A. Shutemov if (copied < PAGE_SIZE) { 241609cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2417ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 241809cbfeafSKirill A. Shutemov from + copied, PAGE_SIZE); 2419ec9516fbSHugh Dickins } 2420800d8c63SKirill A. Shutemov SetPageUptodate(head); 2421ec9516fbSHugh Dickins } 2422d3602444SHugh Dickins set_page_dirty(page); 24236746aff7SWu Fengguang unlock_page(page); 242409cbfeafSKirill A. Shutemov put_page(page); 2425d3602444SHugh Dickins 2426800d15a5SNick Piggin return copied; 24271da177e4SLinus Torvalds } 24281da177e4SLinus Torvalds 24292ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 24301da177e4SLinus Torvalds { 24316e58e79dSAl Viro struct file *file = iocb->ki_filp; 24326e58e79dSAl Viro struct inode *inode = file_inode(file); 24331da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 243441ffe5d5SHugh Dickins pgoff_t index; 243541ffe5d5SHugh Dickins unsigned long offset; 2436a0ee5ec5SHugh Dickins enum sgp_type sgp = SGP_READ; 2437f7c1d074SGeert Uytterhoeven int error = 0; 2438cb66a7a1SAl Viro ssize_t retval = 0; 24396e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2440a0ee5ec5SHugh Dickins 2441a0ee5ec5SHugh Dickins /* 2442a0ee5ec5SHugh Dickins * Might this read be for a stacking filesystem? Then when reading 2443a0ee5ec5SHugh Dickins * holes of a sparse file, we actually need to allocate those pages, 2444a0ee5ec5SHugh Dickins * and even mark them dirty, so it cannot exceed the max_blocks limit. 2445a0ee5ec5SHugh Dickins */ 2446777eda2cSAl Viro if (!iter_is_iovec(to)) 244775edd345SHugh Dickins sgp = SGP_CACHE; 24481da177e4SLinus Torvalds 244909cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 245009cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 24511da177e4SLinus Torvalds 24521da177e4SLinus Torvalds for (;;) { 24531da177e4SLinus Torvalds struct page *page = NULL; 245441ffe5d5SHugh Dickins pgoff_t end_index; 245541ffe5d5SHugh Dickins unsigned long nr, ret; 24561da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 24571da177e4SLinus Torvalds 245809cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 24591da177e4SLinus Torvalds if (index > end_index) 24601da177e4SLinus Torvalds break; 24611da177e4SLinus Torvalds if (index == end_index) { 246209cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 24631da177e4SLinus Torvalds if (nr <= offset) 24641da177e4SLinus Torvalds break; 24651da177e4SLinus Torvalds } 24661da177e4SLinus Torvalds 24679e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, sgp); 24686e58e79dSAl Viro if (error) { 24696e58e79dSAl Viro if (error == -EINVAL) 24706e58e79dSAl Viro error = 0; 24711da177e4SLinus Torvalds break; 24721da177e4SLinus Torvalds } 247375edd345SHugh Dickins if (page) { 247475edd345SHugh Dickins if (sgp == SGP_CACHE) 247575edd345SHugh Dickins set_page_dirty(page); 2476d3602444SHugh Dickins unlock_page(page); 247775edd345SHugh Dickins } 24781da177e4SLinus Torvalds 24791da177e4SLinus Torvalds /* 24801da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 24811b1dcc1bSJes Sorensen * are called without i_mutex protection against truncate 24821da177e4SLinus Torvalds */ 248309cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 24841da177e4SLinus Torvalds i_size = i_size_read(inode); 248509cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 24861da177e4SLinus Torvalds if (index == end_index) { 248709cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 24881da177e4SLinus Torvalds if (nr <= offset) { 24891da177e4SLinus Torvalds if (page) 249009cbfeafSKirill A. Shutemov put_page(page); 24911da177e4SLinus Torvalds break; 24921da177e4SLinus Torvalds } 24931da177e4SLinus Torvalds } 24941da177e4SLinus Torvalds nr -= offset; 24951da177e4SLinus Torvalds 24961da177e4SLinus Torvalds if (page) { 24971da177e4SLinus Torvalds /* 24981da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 24991da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 25001da177e4SLinus Torvalds * before reading the page on the kernel side. 25011da177e4SLinus Torvalds */ 25021da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 25031da177e4SLinus Torvalds flush_dcache_page(page); 25041da177e4SLinus Torvalds /* 25051da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 25061da177e4SLinus Torvalds */ 25071da177e4SLinus Torvalds if (!offset) 25081da177e4SLinus Torvalds mark_page_accessed(page); 2509b5810039SNick Piggin } else { 25101da177e4SLinus Torvalds page = ZERO_PAGE(0); 251109cbfeafSKirill A. Shutemov get_page(page); 2512b5810039SNick Piggin } 25131da177e4SLinus Torvalds 25141da177e4SLinus Torvalds /* 25151da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 25161da177e4SLinus Torvalds * now we can copy it to user space... 25171da177e4SLinus Torvalds */ 25182ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 25196e58e79dSAl Viro retval += ret; 25201da177e4SLinus Torvalds offset += ret; 252109cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 252209cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 25231da177e4SLinus Torvalds 252409cbfeafSKirill A. Shutemov put_page(page); 25252ba5bbedSAl Viro if (!iov_iter_count(to)) 25261da177e4SLinus Torvalds break; 25276e58e79dSAl Viro if (ret < nr) { 25286e58e79dSAl Viro error = -EFAULT; 25296e58e79dSAl Viro break; 25306e58e79dSAl Viro } 25311da177e4SLinus Torvalds cond_resched(); 25321da177e4SLinus Torvalds } 25331da177e4SLinus Torvalds 253409cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 25356e58e79dSAl Viro file_accessed(file); 25366e58e79dSAl Viro return retval ? retval : error; 25371da177e4SLinus Torvalds } 25381da177e4SLinus Torvalds 2539220f2ac9SHugh Dickins /* 2540220f2ac9SHugh Dickins * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 2541220f2ac9SHugh Dickins */ 2542220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 2543965c8e59SAndrew Morton pgoff_t index, pgoff_t end, int whence) 2544220f2ac9SHugh Dickins { 2545220f2ac9SHugh Dickins struct page *page; 2546220f2ac9SHugh Dickins struct pagevec pvec; 2547220f2ac9SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 2548220f2ac9SHugh Dickins bool done = false; 2549220f2ac9SHugh Dickins int i; 2550220f2ac9SHugh Dickins 255186679820SMel Gorman pagevec_init(&pvec); 2552220f2ac9SHugh Dickins pvec.nr = 1; /* start small: we may be there already */ 2553220f2ac9SHugh Dickins while (!done) { 25540cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 2555220f2ac9SHugh Dickins pvec.nr, pvec.pages, indices); 2556220f2ac9SHugh Dickins if (!pvec.nr) { 2557965c8e59SAndrew Morton if (whence == SEEK_DATA) 2558220f2ac9SHugh Dickins index = end; 2559220f2ac9SHugh Dickins break; 2560220f2ac9SHugh Dickins } 2561220f2ac9SHugh Dickins for (i = 0; i < pvec.nr; i++, index++) { 2562220f2ac9SHugh Dickins if (index < indices[i]) { 2563965c8e59SAndrew Morton if (whence == SEEK_HOLE) { 2564220f2ac9SHugh Dickins done = true; 2565220f2ac9SHugh Dickins break; 2566220f2ac9SHugh Dickins } 2567220f2ac9SHugh Dickins index = indices[i]; 2568220f2ac9SHugh Dickins } 2569220f2ac9SHugh Dickins page = pvec.pages[i]; 25703159f943SMatthew Wilcox if (page && !xa_is_value(page)) { 2571220f2ac9SHugh Dickins if (!PageUptodate(page)) 2572220f2ac9SHugh Dickins page = NULL; 2573220f2ac9SHugh Dickins } 2574220f2ac9SHugh Dickins if (index >= end || 2575965c8e59SAndrew Morton (page && whence == SEEK_DATA) || 2576965c8e59SAndrew Morton (!page && whence == SEEK_HOLE)) { 2577220f2ac9SHugh Dickins done = true; 2578220f2ac9SHugh Dickins break; 2579220f2ac9SHugh Dickins } 2580220f2ac9SHugh Dickins } 25810cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 2582220f2ac9SHugh Dickins pagevec_release(&pvec); 2583220f2ac9SHugh Dickins pvec.nr = PAGEVEC_SIZE; 2584220f2ac9SHugh Dickins cond_resched(); 2585220f2ac9SHugh Dickins } 2586220f2ac9SHugh Dickins return index; 2587220f2ac9SHugh Dickins } 2588220f2ac9SHugh Dickins 2589965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2590220f2ac9SHugh Dickins { 2591220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2592220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2593220f2ac9SHugh Dickins pgoff_t start, end; 2594220f2ac9SHugh Dickins loff_t new_offset; 2595220f2ac9SHugh Dickins 2596965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2597965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2598220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 25995955102cSAl Viro inode_lock(inode); 2600220f2ac9SHugh Dickins /* We're holding i_mutex so we can access i_size directly */ 2601220f2ac9SHugh Dickins 2602220f2ac9SHugh Dickins if (offset < 0) 2603220f2ac9SHugh Dickins offset = -EINVAL; 2604220f2ac9SHugh Dickins else if (offset >= inode->i_size) 2605220f2ac9SHugh Dickins offset = -ENXIO; 2606220f2ac9SHugh Dickins else { 260709cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 260809cbfeafSKirill A. Shutemov end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2609965c8e59SAndrew Morton new_offset = shmem_seek_hole_data(mapping, start, end, whence); 261009cbfeafSKirill A. Shutemov new_offset <<= PAGE_SHIFT; 2611220f2ac9SHugh Dickins if (new_offset > offset) { 2612220f2ac9SHugh Dickins if (new_offset < inode->i_size) 2613220f2ac9SHugh Dickins offset = new_offset; 2614965c8e59SAndrew Morton else if (whence == SEEK_DATA) 2615220f2ac9SHugh Dickins offset = -ENXIO; 2616220f2ac9SHugh Dickins else 2617220f2ac9SHugh Dickins offset = inode->i_size; 2618220f2ac9SHugh Dickins } 2619220f2ac9SHugh Dickins } 2620220f2ac9SHugh Dickins 2621387aae6fSHugh Dickins if (offset >= 0) 262246a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 26235955102cSAl Viro inode_unlock(inode); 2624220f2ac9SHugh Dickins return offset; 2625220f2ac9SHugh Dickins } 2626220f2ac9SHugh Dickins 262783e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 262883e4fa9cSHugh Dickins loff_t len) 262983e4fa9cSHugh Dickins { 2630496ad9aaSAl Viro struct inode *inode = file_inode(file); 2631e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 263240e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 26331aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2634e2d12e22SHugh Dickins pgoff_t start, index, end; 2635e2d12e22SHugh Dickins int error; 263683e4fa9cSHugh Dickins 263713ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 263813ace4d0SHugh Dickins return -EOPNOTSUPP; 263913ace4d0SHugh Dickins 26405955102cSAl Viro inode_lock(inode); 264183e4fa9cSHugh Dickins 264283e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 264383e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 264483e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 264583e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 26468e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 264783e4fa9cSHugh Dickins 264840e041a2SDavid Herrmann /* protected by i_mutex */ 264940e041a2SDavid Herrmann if (info->seals & F_SEAL_WRITE) { 265040e041a2SDavid Herrmann error = -EPERM; 265140e041a2SDavid Herrmann goto out; 265240e041a2SDavid Herrmann } 265340e041a2SDavid Herrmann 26548e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2655f00cdc6dSHugh Dickins shmem_falloc.start = unmap_start >> PAGE_SHIFT; 2656f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2657f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2658f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2659f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2660f00cdc6dSHugh Dickins 266183e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 266283e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 266383e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 266483e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 266583e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 26668e205f77SHugh Dickins 26678e205f77SHugh Dickins spin_lock(&inode->i_lock); 26688e205f77SHugh Dickins inode->i_private = NULL; 26698e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 26702055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 26718e205f77SHugh Dickins spin_unlock(&inode->i_lock); 267283e4fa9cSHugh Dickins error = 0; 26738e205f77SHugh Dickins goto out; 267483e4fa9cSHugh Dickins } 267583e4fa9cSHugh Dickins 2676e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2677e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2678e2d12e22SHugh Dickins if (error) 2679e2d12e22SHugh Dickins goto out; 2680e2d12e22SHugh Dickins 268140e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 268240e041a2SDavid Herrmann error = -EPERM; 268340e041a2SDavid Herrmann goto out; 268440e041a2SDavid Herrmann } 268540e041a2SDavid Herrmann 268609cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 268709cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2688e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2689e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2690e2d12e22SHugh Dickins error = -ENOSPC; 2691e2d12e22SHugh Dickins goto out; 2692e2d12e22SHugh Dickins } 2693e2d12e22SHugh Dickins 26948e205f77SHugh Dickins shmem_falloc.waitq = NULL; 26951aac1400SHugh Dickins shmem_falloc.start = start; 26961aac1400SHugh Dickins shmem_falloc.next = start; 26971aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 26981aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 26991aac1400SHugh Dickins spin_lock(&inode->i_lock); 27001aac1400SHugh Dickins inode->i_private = &shmem_falloc; 27011aac1400SHugh Dickins spin_unlock(&inode->i_lock); 27021aac1400SHugh Dickins 2703e2d12e22SHugh Dickins for (index = start; index < end; index++) { 2704e2d12e22SHugh Dickins struct page *page; 2705e2d12e22SHugh Dickins 2706e2d12e22SHugh Dickins /* 2707e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2708e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2709e2d12e22SHugh Dickins */ 2710e2d12e22SHugh Dickins if (signal_pending(current)) 2711e2d12e22SHugh Dickins error = -EINTR; 27121aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 27131aac1400SHugh Dickins error = -ENOMEM; 2714e2d12e22SHugh Dickins else 27159e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2716e2d12e22SHugh Dickins if (error) { 27171635f6a7SHugh Dickins /* Remove the !PageUptodate pages we added */ 27187f556567SHugh Dickins if (index > start) { 27191635f6a7SHugh Dickins shmem_undo_range(inode, 272009cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2721b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 27227f556567SHugh Dickins } 27231aac1400SHugh Dickins goto undone; 2724e2d12e22SHugh Dickins } 2725e2d12e22SHugh Dickins 2726e2d12e22SHugh Dickins /* 27271aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 27281aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 27291aac1400SHugh Dickins */ 27301aac1400SHugh Dickins shmem_falloc.next++; 27311aac1400SHugh Dickins if (!PageUptodate(page)) 27321aac1400SHugh Dickins shmem_falloc.nr_falloced++; 27331aac1400SHugh Dickins 27341aac1400SHugh Dickins /* 27351635f6a7SHugh Dickins * If !PageUptodate, leave it that way so that freeable pages 27361635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 27371635f6a7SHugh Dickins * But set_page_dirty so that memory pressure will swap rather 2738e2d12e22SHugh Dickins * than free the pages we are allocating (and SGP_CACHE pages 2739e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2740e2d12e22SHugh Dickins */ 2741e2d12e22SHugh Dickins set_page_dirty(page); 2742e2d12e22SHugh Dickins unlock_page(page); 274309cbfeafSKirill A. Shutemov put_page(page); 2744e2d12e22SHugh Dickins cond_resched(); 2745e2d12e22SHugh Dickins } 2746e2d12e22SHugh Dickins 2747e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2748e2d12e22SHugh Dickins i_size_write(inode, offset + len); 2749078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 27501aac1400SHugh Dickins undone: 27511aac1400SHugh Dickins spin_lock(&inode->i_lock); 27521aac1400SHugh Dickins inode->i_private = NULL; 27531aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2754e2d12e22SHugh Dickins out: 27555955102cSAl Viro inode_unlock(inode); 275683e4fa9cSHugh Dickins return error; 275783e4fa9cSHugh Dickins } 275883e4fa9cSHugh Dickins 2759726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 27601da177e4SLinus Torvalds { 2761726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 27621da177e4SLinus Torvalds 27631da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 276409cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 27651da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 27660edd73b3SHugh Dickins if (sbinfo->max_blocks) { 27671da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 276841ffe5d5SHugh Dickins buf->f_bavail = 276941ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 277041ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 27710edd73b3SHugh Dickins } 27720edd73b3SHugh Dickins if (sbinfo->max_inodes) { 27731da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 27741da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 27751da177e4SLinus Torvalds } 27761da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 27771da177e4SLinus Torvalds return 0; 27781da177e4SLinus Torvalds } 27791da177e4SLinus Torvalds 27801da177e4SLinus Torvalds /* 27811da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 27821da177e4SLinus Torvalds */ 27831da177e4SLinus Torvalds static int 27841a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 27851da177e4SLinus Torvalds { 27860b0a0806SHugh Dickins struct inode *inode; 27871da177e4SLinus Torvalds int error = -ENOSPC; 27881da177e4SLinus Torvalds 2789454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 27901da177e4SLinus Torvalds if (inode) { 2791feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2792feda821eSChristoph Hellwig if (error) 2793feda821eSChristoph Hellwig goto out_iput; 27942a7dba39SEric Paris error = security_inode_init_security(inode, dir, 27959d8f13baSMimi Zohar &dentry->d_name, 27966d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2797feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2798feda821eSChristoph Hellwig goto out_iput; 279937ec43cdSMimi Zohar 2800718deb6bSAl Viro error = 0; 28011da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2802078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 28031da177e4SLinus Torvalds d_instantiate(dentry, inode); 28041da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 28051da177e4SLinus Torvalds } 28061da177e4SLinus Torvalds return error; 2807feda821eSChristoph Hellwig out_iput: 2808feda821eSChristoph Hellwig iput(inode); 2809feda821eSChristoph Hellwig return error; 28101da177e4SLinus Torvalds } 28111da177e4SLinus Torvalds 281260545d0dSAl Viro static int 281360545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 281460545d0dSAl Viro { 281560545d0dSAl Viro struct inode *inode; 281660545d0dSAl Viro int error = -ENOSPC; 281760545d0dSAl Viro 281860545d0dSAl Viro inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 281960545d0dSAl Viro if (inode) { 282060545d0dSAl Viro error = security_inode_init_security(inode, dir, 282160545d0dSAl Viro NULL, 282260545d0dSAl Viro shmem_initxattrs, NULL); 2823feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2824feda821eSChristoph Hellwig goto out_iput; 2825feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2826feda821eSChristoph Hellwig if (error) 2827feda821eSChristoph Hellwig goto out_iput; 282860545d0dSAl Viro d_tmpfile(dentry, inode); 282960545d0dSAl Viro } 283060545d0dSAl Viro return error; 2831feda821eSChristoph Hellwig out_iput: 2832feda821eSChristoph Hellwig iput(inode); 2833feda821eSChristoph Hellwig return error; 283460545d0dSAl Viro } 283560545d0dSAl Viro 283618bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 28371da177e4SLinus Torvalds { 28381da177e4SLinus Torvalds int error; 28391da177e4SLinus Torvalds 28401da177e4SLinus Torvalds if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 28411da177e4SLinus Torvalds return error; 2842d8c76e6fSDave Hansen inc_nlink(dir); 28431da177e4SLinus Torvalds return 0; 28441da177e4SLinus Torvalds } 28451da177e4SLinus Torvalds 28464acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2847ebfc3b49SAl Viro bool excl) 28481da177e4SLinus Torvalds { 28491da177e4SLinus Torvalds return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 28501da177e4SLinus Torvalds } 28511da177e4SLinus Torvalds 28521da177e4SLinus Torvalds /* 28531da177e4SLinus Torvalds * Link a file.. 28541da177e4SLinus Torvalds */ 28551da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 28561da177e4SLinus Torvalds { 285775c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 28585b04c689SPavel Emelyanov int ret; 28591da177e4SLinus Torvalds 28601da177e4SLinus Torvalds /* 28611da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 28621da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 28631da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 28641da177e4SLinus Torvalds */ 28655b04c689SPavel Emelyanov ret = shmem_reserve_inode(inode->i_sb); 28665b04c689SPavel Emelyanov if (ret) 28675b04c689SPavel Emelyanov goto out; 28681da177e4SLinus Torvalds 28691da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2870078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 2871d8c76e6fSDave Hansen inc_nlink(inode); 28727de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 28731da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 28741da177e4SLinus Torvalds d_instantiate(dentry, inode); 28755b04c689SPavel Emelyanov out: 28765b04c689SPavel Emelyanov return ret; 28771da177e4SLinus Torvalds } 28781da177e4SLinus Torvalds 28791da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 28801da177e4SLinus Torvalds { 288175c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 28821da177e4SLinus Torvalds 28835b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 28845b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 28851da177e4SLinus Torvalds 28861da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 2887078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 28889a53c3a7SDave Hansen drop_nlink(inode); 28891da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 28901da177e4SLinus Torvalds return 0; 28911da177e4SLinus Torvalds } 28921da177e4SLinus Torvalds 28931da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 28941da177e4SLinus Torvalds { 28951da177e4SLinus Torvalds if (!simple_empty(dentry)) 28961da177e4SLinus Torvalds return -ENOTEMPTY; 28971da177e4SLinus Torvalds 289875c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 28999a53c3a7SDave Hansen drop_nlink(dir); 29001da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 29011da177e4SLinus Torvalds } 29021da177e4SLinus Torvalds 290337456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 290437456771SMiklos Szeredi { 2905e36cb0b8SDavid Howells bool old_is_dir = d_is_dir(old_dentry); 2906e36cb0b8SDavid Howells bool new_is_dir = d_is_dir(new_dentry); 290737456771SMiklos Szeredi 290837456771SMiklos Szeredi if (old_dir != new_dir && old_is_dir != new_is_dir) { 290937456771SMiklos Szeredi if (old_is_dir) { 291037456771SMiklos Szeredi drop_nlink(old_dir); 291137456771SMiklos Szeredi inc_nlink(new_dir); 291237456771SMiklos Szeredi } else { 291337456771SMiklos Szeredi drop_nlink(new_dir); 291437456771SMiklos Szeredi inc_nlink(old_dir); 291537456771SMiklos Szeredi } 291637456771SMiklos Szeredi } 291737456771SMiklos Szeredi old_dir->i_ctime = old_dir->i_mtime = 291837456771SMiklos Szeredi new_dir->i_ctime = new_dir->i_mtime = 291975c3cfa8SDavid Howells d_inode(old_dentry)->i_ctime = 2920078cd827SDeepa Dinamani d_inode(new_dentry)->i_ctime = current_time(old_dir); 292137456771SMiklos Szeredi 292237456771SMiklos Szeredi return 0; 292337456771SMiklos Szeredi } 292437456771SMiklos Szeredi 292546fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) 292646fdb794SMiklos Szeredi { 292746fdb794SMiklos Szeredi struct dentry *whiteout; 292846fdb794SMiklos Szeredi int error; 292946fdb794SMiklos Szeredi 293046fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 293146fdb794SMiklos Szeredi if (!whiteout) 293246fdb794SMiklos Szeredi return -ENOMEM; 293346fdb794SMiklos Szeredi 293446fdb794SMiklos Szeredi error = shmem_mknod(old_dir, whiteout, 293546fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 293646fdb794SMiklos Szeredi dput(whiteout); 293746fdb794SMiklos Szeredi if (error) 293846fdb794SMiklos Szeredi return error; 293946fdb794SMiklos Szeredi 294046fdb794SMiklos Szeredi /* 294146fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 294246fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 294346fdb794SMiklos Szeredi * 294446fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 294546fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 294646fdb794SMiklos Szeredi */ 294746fdb794SMiklos Szeredi d_rehash(whiteout); 294846fdb794SMiklos Szeredi return 0; 294946fdb794SMiklos Szeredi } 295046fdb794SMiklos Szeredi 29511da177e4SLinus Torvalds /* 29521da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 29531da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 29541da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 29551da177e4SLinus Torvalds * gets overwritten. 29561da177e4SLinus Torvalds */ 29573b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) 29581da177e4SLinus Torvalds { 295975c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 29601da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 29611da177e4SLinus Torvalds 296246fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 29633b69ff51SMiklos Szeredi return -EINVAL; 29643b69ff51SMiklos Szeredi 296537456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 296637456771SMiklos Szeredi return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 296737456771SMiklos Szeredi 29681da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 29691da177e4SLinus Torvalds return -ENOTEMPTY; 29701da177e4SLinus Torvalds 297146fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 297246fdb794SMiklos Szeredi int error; 297346fdb794SMiklos Szeredi 297446fdb794SMiklos Szeredi error = shmem_whiteout(old_dir, old_dentry); 297546fdb794SMiklos Szeredi if (error) 297646fdb794SMiklos Szeredi return error; 297746fdb794SMiklos Szeredi } 297846fdb794SMiklos Szeredi 297975c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 29801da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 2981b928095bSMiklos Szeredi if (they_are_dirs) { 298275c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 29839a53c3a7SDave Hansen drop_nlink(old_dir); 2984b928095bSMiklos Szeredi } 29851da177e4SLinus Torvalds } else if (they_are_dirs) { 29869a53c3a7SDave Hansen drop_nlink(old_dir); 2987d8c76e6fSDave Hansen inc_nlink(new_dir); 29881da177e4SLinus Torvalds } 29891da177e4SLinus Torvalds 29901da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 29911da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 29921da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 29931da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 2994078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 29951da177e4SLinus Torvalds return 0; 29961da177e4SLinus Torvalds } 29971da177e4SLinus Torvalds 29981da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 29991da177e4SLinus Torvalds { 30001da177e4SLinus Torvalds int error; 30011da177e4SLinus Torvalds int len; 30021da177e4SLinus Torvalds struct inode *inode; 30039276aad6SHugh Dickins struct page *page; 30041da177e4SLinus Torvalds 30051da177e4SLinus Torvalds len = strlen(symname) + 1; 300609cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 30071da177e4SLinus Torvalds return -ENAMETOOLONG; 30081da177e4SLinus Torvalds 30090825a6f9SJoe Perches inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 30100825a6f9SJoe Perches VM_NORESERVE); 30111da177e4SLinus Torvalds if (!inode) 30121da177e4SLinus Torvalds return -ENOSPC; 30131da177e4SLinus Torvalds 30149d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 30156d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3016570bc1c2SStephen Smalley if (error) { 3017570bc1c2SStephen Smalley if (error != -EOPNOTSUPP) { 3018570bc1c2SStephen Smalley iput(inode); 3019570bc1c2SStephen Smalley return error; 3020570bc1c2SStephen Smalley } 3021570bc1c2SStephen Smalley error = 0; 3022570bc1c2SStephen Smalley } 3023570bc1c2SStephen Smalley 30241da177e4SLinus Torvalds inode->i_size = len-1; 302569f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 30263ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 30273ed47db3SAl Viro if (!inode->i_link) { 302869f07ec9SHugh Dickins iput(inode); 302969f07ec9SHugh Dickins return -ENOMEM; 303069f07ec9SHugh Dickins } 303169f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 30321da177e4SLinus Torvalds } else { 3033e8ecde25SAl Viro inode_nohighmem(inode); 30349e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_WRITE); 30351da177e4SLinus Torvalds if (error) { 30361da177e4SLinus Torvalds iput(inode); 30371da177e4SLinus Torvalds return error; 30381da177e4SLinus Torvalds } 303914fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 30401da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 304121fc61c7SAl Viro memcpy(page_address(page), symname, len); 3042ec9516fbSHugh Dickins SetPageUptodate(page); 30431da177e4SLinus Torvalds set_page_dirty(page); 30446746aff7SWu Fengguang unlock_page(page); 304509cbfeafSKirill A. Shutemov put_page(page); 30461da177e4SLinus Torvalds } 30471da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3048078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 30491da177e4SLinus Torvalds d_instantiate(dentry, inode); 30501da177e4SLinus Torvalds dget(dentry); 30511da177e4SLinus Torvalds return 0; 30521da177e4SLinus Torvalds } 30531da177e4SLinus Torvalds 3054fceef393SAl Viro static void shmem_put_link(void *arg) 3055fceef393SAl Viro { 3056fceef393SAl Viro mark_page_accessed(arg); 3057fceef393SAl Viro put_page(arg); 3058fceef393SAl Viro } 3059fceef393SAl Viro 30606b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3061fceef393SAl Viro struct inode *inode, 3062fceef393SAl Viro struct delayed_call *done) 30631da177e4SLinus Torvalds { 30641da177e4SLinus Torvalds struct page *page = NULL; 30656b255391SAl Viro int error; 30666a6c9904SAl Viro if (!dentry) { 30676a6c9904SAl Viro page = find_get_page(inode->i_mapping, 0); 30686a6c9904SAl Viro if (!page) 30696b255391SAl Viro return ERR_PTR(-ECHILD); 30706a6c9904SAl Viro if (!PageUptodate(page)) { 30716a6c9904SAl Viro put_page(page); 30726a6c9904SAl Viro return ERR_PTR(-ECHILD); 30736a6c9904SAl Viro } 30746a6c9904SAl Viro } else { 30759e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_READ); 3076680baacbSAl Viro if (error) 3077680baacbSAl Viro return ERR_PTR(error); 3078d3602444SHugh Dickins unlock_page(page); 30791da177e4SLinus Torvalds } 3080fceef393SAl Viro set_delayed_call(done, shmem_put_link, page); 308121fc61c7SAl Viro return page_address(page); 30821da177e4SLinus Torvalds } 30831da177e4SLinus Torvalds 3084b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3085b09e0fa4SEric Paris /* 3086b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3087b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3088b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3089b09e0fa4SEric Paris * filesystem level, though. 3090b09e0fa4SEric Paris */ 3091b09e0fa4SEric Paris 30926d9d88d0SJarkko Sakkinen /* 30936d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 30946d9d88d0SJarkko Sakkinen */ 30956d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 30966d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 30976d9d88d0SJarkko Sakkinen void *fs_info) 30986d9d88d0SJarkko Sakkinen { 30996d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 31006d9d88d0SJarkko Sakkinen const struct xattr *xattr; 310138f38657SAristeu Rozanski struct simple_xattr *new_xattr; 31026d9d88d0SJarkko Sakkinen size_t len; 31036d9d88d0SJarkko Sakkinen 31046d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 310538f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 31066d9d88d0SJarkko Sakkinen if (!new_xattr) 31076d9d88d0SJarkko Sakkinen return -ENOMEM; 31086d9d88d0SJarkko Sakkinen 31096d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 31106d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 31116d9d88d0SJarkko Sakkinen GFP_KERNEL); 31126d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 31136d9d88d0SJarkko Sakkinen kfree(new_xattr); 31146d9d88d0SJarkko Sakkinen return -ENOMEM; 31156d9d88d0SJarkko Sakkinen } 31166d9d88d0SJarkko Sakkinen 31176d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 31186d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 31196d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 31206d9d88d0SJarkko Sakkinen xattr->name, len); 31216d9d88d0SJarkko Sakkinen 312238f38657SAristeu Rozanski simple_xattr_list_add(&info->xattrs, new_xattr); 31236d9d88d0SJarkko Sakkinen } 31246d9d88d0SJarkko Sakkinen 31256d9d88d0SJarkko Sakkinen return 0; 31266d9d88d0SJarkko Sakkinen } 31276d9d88d0SJarkko Sakkinen 3128aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3129b296821aSAl Viro struct dentry *unused, struct inode *inode, 3130b296821aSAl Viro const char *name, void *buffer, size_t size) 3131aa7c5241SAndreas Gruenbacher { 3132b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3133aa7c5241SAndreas Gruenbacher 3134aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3135aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3136aa7c5241SAndreas Gruenbacher } 3137aa7c5241SAndreas Gruenbacher 3138aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 313959301226SAl Viro struct dentry *unused, struct inode *inode, 314059301226SAl Viro const char *name, const void *value, 314159301226SAl Viro size_t size, int flags) 3142aa7c5241SAndreas Gruenbacher { 314359301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3144aa7c5241SAndreas Gruenbacher 3145aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3146aa7c5241SAndreas Gruenbacher return simple_xattr_set(&info->xattrs, name, value, size, flags); 3147aa7c5241SAndreas Gruenbacher } 3148aa7c5241SAndreas Gruenbacher 3149aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3150aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3151aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3152aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3153aa7c5241SAndreas Gruenbacher }; 3154aa7c5241SAndreas Gruenbacher 3155aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3156aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3157aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3158aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3159aa7c5241SAndreas Gruenbacher }; 3160aa7c5241SAndreas Gruenbacher 3161b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3162b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3163feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3164feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3165b09e0fa4SEric Paris #endif 3166aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3167aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3168b09e0fa4SEric Paris NULL 3169b09e0fa4SEric Paris }; 3170b09e0fa4SEric Paris 3171b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3172b09e0fa4SEric Paris { 317375c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3174786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3175b09e0fa4SEric Paris } 3176b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3177b09e0fa4SEric Paris 317869f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 31796b255391SAl Viro .get_link = simple_get_link, 3180b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3181b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3182b09e0fa4SEric Paris #endif 31831da177e4SLinus Torvalds }; 31841da177e4SLinus Torvalds 318592e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 31866b255391SAl Viro .get_link = shmem_get_link, 3187b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3188b09e0fa4SEric Paris .listxattr = shmem_listxattr, 318939f0247dSAndreas Gruenbacher #endif 3190b09e0fa4SEric Paris }; 319139f0247dSAndreas Gruenbacher 319291828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 319391828a40SDavid M. Grimes { 319491828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 319591828a40SDavid M. Grimes } 319691828a40SDavid M. Grimes 319791828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 319891828a40SDavid M. Grimes { 319991828a40SDavid M. Grimes __u32 *fh = vfh; 320091828a40SDavid M. Grimes __u64 inum = fh[2]; 320191828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 320291828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 320391828a40SDavid M. Grimes } 320491828a40SDavid M. Grimes 320512ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */ 320612ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode) 320712ba780dSAmir Goldstein { 320812ba780dSAmir Goldstein struct dentry *alias = d_find_alias(inode); 320912ba780dSAmir Goldstein 321012ba780dSAmir Goldstein return alias ?: d_find_any_alias(inode); 321112ba780dSAmir Goldstein } 321212ba780dSAmir Goldstein 321312ba780dSAmir Goldstein 3214480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3215480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 321691828a40SDavid M. Grimes { 321791828a40SDavid M. Grimes struct inode *inode; 3218480b116cSChristoph Hellwig struct dentry *dentry = NULL; 321935c2a7f4SHugh Dickins u64 inum; 322091828a40SDavid M. Grimes 3221480b116cSChristoph Hellwig if (fh_len < 3) 3222480b116cSChristoph Hellwig return NULL; 3223480b116cSChristoph Hellwig 322435c2a7f4SHugh Dickins inum = fid->raw[2]; 322535c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 322635c2a7f4SHugh Dickins 3227480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3228480b116cSChristoph Hellwig shmem_match, fid->raw); 322991828a40SDavid M. Grimes if (inode) { 323012ba780dSAmir Goldstein dentry = shmem_find_alias(inode); 323191828a40SDavid M. Grimes iput(inode); 323291828a40SDavid M. Grimes } 323391828a40SDavid M. Grimes 3234480b116cSChristoph Hellwig return dentry; 323591828a40SDavid M. Grimes } 323691828a40SDavid M. Grimes 3237b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3238b0b0382bSAl Viro struct inode *parent) 323991828a40SDavid M. Grimes { 32405fe0c237SAneesh Kumar K.V if (*len < 3) { 32415fe0c237SAneesh Kumar K.V *len = 3; 324294e07a75SNamjae Jeon return FILEID_INVALID; 32435fe0c237SAneesh Kumar K.V } 324491828a40SDavid M. Grimes 32451d3382cbSAl Viro if (inode_unhashed(inode)) { 324691828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 324791828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 324891828a40SDavid M. Grimes * time, we need a lock to ensure we only try 324991828a40SDavid M. Grimes * to do it once 325091828a40SDavid M. Grimes */ 325191828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 325291828a40SDavid M. Grimes spin_lock(&lock); 32531d3382cbSAl Viro if (inode_unhashed(inode)) 325491828a40SDavid M. Grimes __insert_inode_hash(inode, 325591828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 325691828a40SDavid M. Grimes spin_unlock(&lock); 325791828a40SDavid M. Grimes } 325891828a40SDavid M. Grimes 325991828a40SDavid M. Grimes fh[0] = inode->i_generation; 326091828a40SDavid M. Grimes fh[1] = inode->i_ino; 326191828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 326291828a40SDavid M. Grimes 326391828a40SDavid M. Grimes *len = 3; 326491828a40SDavid M. Grimes return 1; 326591828a40SDavid M. Grimes } 326691828a40SDavid M. Grimes 326739655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 326891828a40SDavid M. Grimes .get_parent = shmem_get_parent, 326991828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3270480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 327191828a40SDavid M. Grimes }; 327291828a40SDavid M. Grimes 3273680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 3274680d794bSakpm@linux-foundation.org bool remount) 32751da177e4SLinus Torvalds { 32761da177e4SLinus Torvalds char *this_char, *value, *rest; 327749cd0a5cSGreg Thelen struct mempolicy *mpol = NULL; 32788751e039SEric W. Biederman uid_t uid; 32798751e039SEric W. Biederman gid_t gid; 32801da177e4SLinus Torvalds 3281b00dc3adSHugh Dickins while (options != NULL) { 3282b00dc3adSHugh Dickins this_char = options; 3283b00dc3adSHugh Dickins for (;;) { 3284b00dc3adSHugh Dickins /* 3285b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3286b00dc3adSHugh Dickins * mount options form a comma-separated list, 3287b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3288b00dc3adSHugh Dickins */ 3289b00dc3adSHugh Dickins options = strchr(options, ','); 3290b00dc3adSHugh Dickins if (options == NULL) 3291b00dc3adSHugh Dickins break; 3292b00dc3adSHugh Dickins options++; 3293b00dc3adSHugh Dickins if (!isdigit(*options)) { 3294b00dc3adSHugh Dickins options[-1] = '\0'; 3295b00dc3adSHugh Dickins break; 3296b00dc3adSHugh Dickins } 3297b00dc3adSHugh Dickins } 32981da177e4SLinus Torvalds if (!*this_char) 32991da177e4SLinus Torvalds continue; 33001da177e4SLinus Torvalds if ((value = strchr(this_char,'=')) != NULL) { 33011da177e4SLinus Torvalds *value++ = 0; 33021da177e4SLinus Torvalds } else { 33031170532bSJoe Perches pr_err("tmpfs: No value for mount option '%s'\n", 33041da177e4SLinus Torvalds this_char); 330549cd0a5cSGreg Thelen goto error; 33061da177e4SLinus Torvalds } 33071da177e4SLinus Torvalds 33081da177e4SLinus Torvalds if (!strcmp(this_char,"size")) { 33091da177e4SLinus Torvalds unsigned long long size; 33101da177e4SLinus Torvalds size = memparse(value,&rest); 33111da177e4SLinus Torvalds if (*rest == '%') { 33121da177e4SLinus Torvalds size <<= PAGE_SHIFT; 33131da177e4SLinus Torvalds size *= totalram_pages; 33141da177e4SLinus Torvalds do_div(size, 100); 33151da177e4SLinus Torvalds rest++; 33161da177e4SLinus Torvalds } 33171da177e4SLinus Torvalds if (*rest) 33181da177e4SLinus Torvalds goto bad_val; 3319680d794bSakpm@linux-foundation.org sbinfo->max_blocks = 332009cbfeafSKirill A. Shutemov DIV_ROUND_UP(size, PAGE_SIZE); 33211da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_blocks")) { 3322680d794bSakpm@linux-foundation.org sbinfo->max_blocks = memparse(value, &rest); 33231da177e4SLinus Torvalds if (*rest) 33241da177e4SLinus Torvalds goto bad_val; 33251da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_inodes")) { 3326680d794bSakpm@linux-foundation.org sbinfo->max_inodes = memparse(value, &rest); 33271da177e4SLinus Torvalds if (*rest) 33281da177e4SLinus Torvalds goto bad_val; 33291da177e4SLinus Torvalds } else if (!strcmp(this_char,"mode")) { 3330680d794bSakpm@linux-foundation.org if (remount) 33311da177e4SLinus Torvalds continue; 3332680d794bSakpm@linux-foundation.org sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 33331da177e4SLinus Torvalds if (*rest) 33341da177e4SLinus Torvalds goto bad_val; 33351da177e4SLinus Torvalds } else if (!strcmp(this_char,"uid")) { 3336680d794bSakpm@linux-foundation.org if (remount) 33371da177e4SLinus Torvalds continue; 33388751e039SEric W. Biederman uid = simple_strtoul(value, &rest, 0); 33391da177e4SLinus Torvalds if (*rest) 33401da177e4SLinus Torvalds goto bad_val; 33418751e039SEric W. Biederman sbinfo->uid = make_kuid(current_user_ns(), uid); 33428751e039SEric W. Biederman if (!uid_valid(sbinfo->uid)) 33438751e039SEric W. Biederman goto bad_val; 33441da177e4SLinus Torvalds } else if (!strcmp(this_char,"gid")) { 3345680d794bSakpm@linux-foundation.org if (remount) 33461da177e4SLinus Torvalds continue; 33478751e039SEric W. Biederman gid = simple_strtoul(value, &rest, 0); 33481da177e4SLinus Torvalds if (*rest) 33491da177e4SLinus Torvalds goto bad_val; 33508751e039SEric W. Biederman sbinfo->gid = make_kgid(current_user_ns(), gid); 33518751e039SEric W. Biederman if (!gid_valid(sbinfo->gid)) 33528751e039SEric W. Biederman goto bad_val; 3353e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 33545a6e75f8SKirill A. Shutemov } else if (!strcmp(this_char, "huge")) { 33555a6e75f8SKirill A. Shutemov int huge; 33565a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(value); 33575a6e75f8SKirill A. Shutemov if (huge < 0) 33585a6e75f8SKirill A. Shutemov goto bad_val; 33595a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 33605a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER) 33615a6e75f8SKirill A. Shutemov goto bad_val; 33625a6e75f8SKirill A. Shutemov sbinfo->huge = huge; 33635a6e75f8SKirill A. Shutemov #endif 33645a6e75f8SKirill A. Shutemov #ifdef CONFIG_NUMA 33657339ff83SRobin Holt } else if (!strcmp(this_char,"mpol")) { 336649cd0a5cSGreg Thelen mpol_put(mpol); 336749cd0a5cSGreg Thelen mpol = NULL; 336849cd0a5cSGreg Thelen if (mpol_parse_str(value, &mpol)) 33697339ff83SRobin Holt goto bad_val; 33705a6e75f8SKirill A. Shutemov #endif 33711da177e4SLinus Torvalds } else { 33721170532bSJoe Perches pr_err("tmpfs: Bad mount option %s\n", this_char); 337349cd0a5cSGreg Thelen goto error; 33741da177e4SLinus Torvalds } 33751da177e4SLinus Torvalds } 337649cd0a5cSGreg Thelen sbinfo->mpol = mpol; 33771da177e4SLinus Torvalds return 0; 33781da177e4SLinus Torvalds 33791da177e4SLinus Torvalds bad_val: 33801170532bSJoe Perches pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", 33811da177e4SLinus Torvalds value, this_char); 338249cd0a5cSGreg Thelen error: 338349cd0a5cSGreg Thelen mpol_put(mpol); 33841da177e4SLinus Torvalds return 1; 33851da177e4SLinus Torvalds 33861da177e4SLinus Torvalds } 33871da177e4SLinus Torvalds 33881da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 33891da177e4SLinus Torvalds { 33901da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3391680d794bSakpm@linux-foundation.org struct shmem_sb_info config = *sbinfo; 33920edd73b3SHugh Dickins unsigned long inodes; 33930edd73b3SHugh Dickins int error = -EINVAL; 33941da177e4SLinus Torvalds 33955f00110fSGreg Thelen config.mpol = NULL; 3396680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, &config, true)) 33970edd73b3SHugh Dickins return error; 33980edd73b3SHugh Dickins 33990edd73b3SHugh Dickins spin_lock(&sbinfo->stat_lock); 34000edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 34017e496299STim Chen if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 34020edd73b3SHugh Dickins goto out; 3403680d794bSakpm@linux-foundation.org if (config.max_inodes < inodes) 34040edd73b3SHugh Dickins goto out; 34050edd73b3SHugh Dickins /* 340654af6042SHugh Dickins * Those tests disallow limited->unlimited while any are in use; 34070edd73b3SHugh Dickins * but we must separately disallow unlimited->limited, because 34080edd73b3SHugh Dickins * in that case we have no record of how much is already in use. 34090edd73b3SHugh Dickins */ 3410680d794bSakpm@linux-foundation.org if (config.max_blocks && !sbinfo->max_blocks) 34110edd73b3SHugh Dickins goto out; 3412680d794bSakpm@linux-foundation.org if (config.max_inodes && !sbinfo->max_inodes) 34130edd73b3SHugh Dickins goto out; 34140edd73b3SHugh Dickins 34150edd73b3SHugh Dickins error = 0; 34165a6e75f8SKirill A. Shutemov sbinfo->huge = config.huge; 3417680d794bSakpm@linux-foundation.org sbinfo->max_blocks = config.max_blocks; 3418680d794bSakpm@linux-foundation.org sbinfo->max_inodes = config.max_inodes; 3419680d794bSakpm@linux-foundation.org sbinfo->free_inodes = config.max_inodes - inodes; 342071fe804bSLee Schermerhorn 34215f00110fSGreg Thelen /* 34225f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 34235f00110fSGreg Thelen */ 34245f00110fSGreg Thelen if (config.mpol) { 342571fe804bSLee Schermerhorn mpol_put(sbinfo->mpol); 342671fe804bSLee Schermerhorn sbinfo->mpol = config.mpol; /* transfers initial ref */ 34275f00110fSGreg Thelen } 34280edd73b3SHugh Dickins out: 34290edd73b3SHugh Dickins spin_unlock(&sbinfo->stat_lock); 34300edd73b3SHugh Dickins return error; 34311da177e4SLinus Torvalds } 3432680d794bSakpm@linux-foundation.org 343334c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3434680d794bSakpm@linux-foundation.org { 343534c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3436680d794bSakpm@linux-foundation.org 3437680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3438680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 343909cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3440680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3441680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 34420825a6f9SJoe Perches if (sbinfo->mode != (0777 | S_ISVTX)) 344309208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 34448751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 34458751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 34468751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 34478751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 34488751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 34498751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3450e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 34515a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 34525a6e75f8SKirill A. Shutemov if (sbinfo->huge) 34535a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 34545a6e75f8SKirill A. Shutemov #endif 345571fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 3456680d794bSakpm@linux-foundation.org return 0; 3457680d794bSakpm@linux-foundation.org } 34589183df25SDavid Herrmann 3459680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 34601da177e4SLinus Torvalds 34611da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 34621da177e4SLinus Torvalds { 3463602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3464602586a8SHugh Dickins 3465602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 346649cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3467602586a8SHugh Dickins kfree(sbinfo); 34681da177e4SLinus Torvalds sb->s_fs_info = NULL; 34691da177e4SLinus Torvalds } 34701da177e4SLinus Torvalds 34712b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent) 34721da177e4SLinus Torvalds { 34731da177e4SLinus Torvalds struct inode *inode; 34740edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3475680d794bSakpm@linux-foundation.org int err = -ENOMEM; 3476680d794bSakpm@linux-foundation.org 3477680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3478425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3479680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3480680d794bSakpm@linux-foundation.org if (!sbinfo) 3481680d794bSakpm@linux-foundation.org return -ENOMEM; 3482680d794bSakpm@linux-foundation.org 34830825a6f9SJoe Perches sbinfo->mode = 0777 | S_ISVTX; 348476aac0e9SDavid Howells sbinfo->uid = current_fsuid(); 348576aac0e9SDavid Howells sbinfo->gid = current_fsgid(); 3486680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 34871da177e4SLinus Torvalds 34880edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 34891da177e4SLinus Torvalds /* 34901da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 34911da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 34921da177e4SLinus Torvalds * but the internal instance is left unlimited. 34931da177e4SLinus Torvalds */ 34941751e8a6SLinus Torvalds if (!(sb->s_flags & SB_KERNMOUNT)) { 3495680d794bSakpm@linux-foundation.org sbinfo->max_blocks = shmem_default_max_blocks(); 3496680d794bSakpm@linux-foundation.org sbinfo->max_inodes = shmem_default_max_inodes(); 3497680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, sbinfo, false)) { 3498680d794bSakpm@linux-foundation.org err = -EINVAL; 3499680d794bSakpm@linux-foundation.org goto failed; 3500680d794bSakpm@linux-foundation.org } 3501ca4e0519SAl Viro } else { 35021751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 35031da177e4SLinus Torvalds } 350491828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 35051751e8a6SLinus Torvalds sb->s_flags |= SB_NOSEC; 35060edd73b3SHugh Dickins #else 35071751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 35080edd73b3SHugh Dickins #endif 35091da177e4SLinus Torvalds 35101da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 3511908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3512602586a8SHugh Dickins goto failed; 3513680d794bSakpm@linux-foundation.org sbinfo->free_inodes = sbinfo->max_inodes; 3514779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3515779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 35161da177e4SLinus Torvalds 3517285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 351809cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 351909cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 35201da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 35211da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3522cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3523b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 352439f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3525b09e0fa4SEric Paris #endif 3526b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 35271751e8a6SLinus Torvalds sb->s_flags |= SB_POSIXACL; 352839f0247dSAndreas Gruenbacher #endif 35292b4db796SAmir Goldstein uuid_gen(&sb->s_uuid); 35300edd73b3SHugh Dickins 3531454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 35321da177e4SLinus Torvalds if (!inode) 35331da177e4SLinus Torvalds goto failed; 3534680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3535680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3536318ceed0SAl Viro sb->s_root = d_make_root(inode); 3537318ceed0SAl Viro if (!sb->s_root) 353848fde701SAl Viro goto failed; 35391da177e4SLinus Torvalds return 0; 35401da177e4SLinus Torvalds 35411da177e4SLinus Torvalds failed: 35421da177e4SLinus Torvalds shmem_put_super(sb); 35431da177e4SLinus Torvalds return err; 35441da177e4SLinus Torvalds } 35451da177e4SLinus Torvalds 3546fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 35471da177e4SLinus Torvalds 35481da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 35491da177e4SLinus Torvalds { 355041ffe5d5SHugh Dickins struct shmem_inode_info *info; 355141ffe5d5SHugh Dickins info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 355241ffe5d5SHugh Dickins if (!info) 35531da177e4SLinus Torvalds return NULL; 355441ffe5d5SHugh Dickins return &info->vfs_inode; 35551da177e4SLinus Torvalds } 35561da177e4SLinus Torvalds 355741ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head) 3558fa0d7e3dSNick Piggin { 3559fa0d7e3dSNick Piggin struct inode *inode = container_of(head, struct inode, i_rcu); 356084e710daSAl Viro if (S_ISLNK(inode->i_mode)) 35613ed47db3SAl Viro kfree(inode->i_link); 3562fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3563fa0d7e3dSNick Piggin } 3564fa0d7e3dSNick Piggin 35651da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 35661da177e4SLinus Torvalds { 356709208d15SAl Viro if (S_ISREG(inode->i_mode)) 35681da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 356941ffe5d5SHugh Dickins call_rcu(&inode->i_rcu, shmem_destroy_callback); 35701da177e4SLinus Torvalds } 35711da177e4SLinus Torvalds 357241ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 35731da177e4SLinus Torvalds { 357441ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 357541ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 35761da177e4SLinus Torvalds } 35771da177e4SLinus Torvalds 35789a8ec03eSweiping zhang static void shmem_init_inodecache(void) 35791da177e4SLinus Torvalds { 35801da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 35811da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 35825d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 35831da177e4SLinus Torvalds } 35841da177e4SLinus Torvalds 358541ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 35861da177e4SLinus Torvalds { 35871a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 35881da177e4SLinus Torvalds } 35891da177e4SLinus Torvalds 3590f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = { 35911da177e4SLinus Torvalds .writepage = shmem_writepage, 359276719325SKen Chen .set_page_dirty = __set_page_dirty_no_writeback, 35931da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3594800d15a5SNick Piggin .write_begin = shmem_write_begin, 3595800d15a5SNick Piggin .write_end = shmem_write_end, 35961da177e4SLinus Torvalds #endif 35971c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 3598304dbdb7SLee Schermerhorn .migratepage = migrate_page, 35991c93923cSAndrew Morton #endif 3600aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 36011da177e4SLinus Torvalds }; 36021da177e4SLinus Torvalds 360315ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 36041da177e4SLinus Torvalds .mmap = shmem_mmap, 3605c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 36061da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3607220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 36082ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 36098174202bSAl Viro .write_iter = generic_file_write_iter, 36101b061d92SChristoph Hellwig .fsync = noop_fsync, 361182c156f8SAl Viro .splice_read = generic_file_splice_read, 3612f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 361383e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 36141da177e4SLinus Torvalds #endif 36151da177e4SLinus Torvalds }; 36161da177e4SLinus Torvalds 361792e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 361844a30220SYu Zhao .getattr = shmem_getattr, 361994c1e62dSHugh Dickins .setattr = shmem_setattr, 3620b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3621b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3622feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3623b09e0fa4SEric Paris #endif 36241da177e4SLinus Torvalds }; 36251da177e4SLinus Torvalds 362692e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 36271da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 36281da177e4SLinus Torvalds .create = shmem_create, 36291da177e4SLinus Torvalds .lookup = simple_lookup, 36301da177e4SLinus Torvalds .link = shmem_link, 36311da177e4SLinus Torvalds .unlink = shmem_unlink, 36321da177e4SLinus Torvalds .symlink = shmem_symlink, 36331da177e4SLinus Torvalds .mkdir = shmem_mkdir, 36341da177e4SLinus Torvalds .rmdir = shmem_rmdir, 36351da177e4SLinus Torvalds .mknod = shmem_mknod, 36362773bf00SMiklos Szeredi .rename = shmem_rename2, 363760545d0dSAl Viro .tmpfile = shmem_tmpfile, 36381da177e4SLinus Torvalds #endif 3639b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3640b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3641b09e0fa4SEric Paris #endif 364239f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 364394c1e62dSHugh Dickins .setattr = shmem_setattr, 3644feda821eSChristoph Hellwig .set_acl = simple_set_acl, 364539f0247dSAndreas Gruenbacher #endif 364639f0247dSAndreas Gruenbacher }; 364739f0247dSAndreas Gruenbacher 364892e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 3649b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3650b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3651b09e0fa4SEric Paris #endif 365239f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 365394c1e62dSHugh Dickins .setattr = shmem_setattr, 3654feda821eSChristoph Hellwig .set_acl = simple_set_acl, 365539f0247dSAndreas Gruenbacher #endif 36561da177e4SLinus Torvalds }; 36571da177e4SLinus Torvalds 3658759b9775SHugh Dickins static const struct super_operations shmem_ops = { 36591da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 36601da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 36611da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 36621da177e4SLinus Torvalds .statfs = shmem_statfs, 36631da177e4SLinus Torvalds .remount_fs = shmem_remount_fs, 3664680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 36651da177e4SLinus Torvalds #endif 36661f895f75SAl Viro .evict_inode = shmem_evict_inode, 36671da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 36681da177e4SLinus Torvalds .put_super = shmem_put_super, 3669779750d2SKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3670779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 3671779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 3672779750d2SKirill A. Shutemov #endif 36731da177e4SLinus Torvalds }; 36741da177e4SLinus Torvalds 3675f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 367654cb8821SNick Piggin .fault = shmem_fault, 3677d7c17551SNing Qu .map_pages = filemap_map_pages, 36781da177e4SLinus Torvalds #ifdef CONFIG_NUMA 36791da177e4SLinus Torvalds .set_policy = shmem_set_policy, 36801da177e4SLinus Torvalds .get_policy = shmem_get_policy, 36811da177e4SLinus Torvalds #endif 36821da177e4SLinus Torvalds }; 36831da177e4SLinus Torvalds 36843c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type, 36853c26ff6eSAl Viro int flags, const char *dev_name, void *data) 36861da177e4SLinus Torvalds { 36873c26ff6eSAl Viro return mount_nodev(fs_type, flags, data, shmem_fill_super); 36881da177e4SLinus Torvalds } 36891da177e4SLinus Torvalds 369041ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 36911da177e4SLinus Torvalds .owner = THIS_MODULE, 36921da177e4SLinus Torvalds .name = "tmpfs", 36933c26ff6eSAl Viro .mount = shmem_mount, 36941da177e4SLinus Torvalds .kill_sb = kill_litter_super, 36952b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 36961da177e4SLinus Torvalds }; 36971da177e4SLinus Torvalds 369841ffe5d5SHugh Dickins int __init shmem_init(void) 36991da177e4SLinus Torvalds { 37001da177e4SLinus Torvalds int error; 37011da177e4SLinus Torvalds 370216203a7aSRob Landley /* If rootfs called this, don't re-init */ 370316203a7aSRob Landley if (shmem_inode_cachep) 370416203a7aSRob Landley return 0; 370516203a7aSRob Landley 37069a8ec03eSweiping zhang shmem_init_inodecache(); 37071da177e4SLinus Torvalds 370841ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 37091da177e4SLinus Torvalds if (error) { 37101170532bSJoe Perches pr_err("Could not register tmpfs\n"); 37111da177e4SLinus Torvalds goto out2; 37121da177e4SLinus Torvalds } 371395dc112aSGreg Kroah-Hartman 3714ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 37151da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 37161da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 37171170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 37181da177e4SLinus Torvalds goto out1; 37191da177e4SLinus Torvalds } 37205a6e75f8SKirill A. Shutemov 3721e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3722435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 37235a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 37245a6e75f8SKirill A. Shutemov else 37255a6e75f8SKirill A. Shutemov shmem_huge = 0; /* just in case it was patched */ 37265a6e75f8SKirill A. Shutemov #endif 37271da177e4SLinus Torvalds return 0; 37281da177e4SLinus Torvalds 37291da177e4SLinus Torvalds out1: 373041ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 37311da177e4SLinus Torvalds out2: 373241ffe5d5SHugh Dickins shmem_destroy_inodecache(); 37331da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 37341da177e4SLinus Torvalds return error; 37351da177e4SLinus Torvalds } 3736853ac43aSMatt Mackall 3737e496cf3dSKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) 37385a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 37395a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 37405a6e75f8SKirill A. Shutemov { 37415a6e75f8SKirill A. Shutemov int values[] = { 37425a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 37435a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 37445a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 37455a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 37465a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 37475a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 37485a6e75f8SKirill A. Shutemov }; 37495a6e75f8SKirill A. Shutemov int i, count; 37505a6e75f8SKirill A. Shutemov 37515a6e75f8SKirill A. Shutemov for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { 37525a6e75f8SKirill A. Shutemov const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; 37535a6e75f8SKirill A. Shutemov 37545a6e75f8SKirill A. Shutemov count += sprintf(buf + count, fmt, 37555a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 37565a6e75f8SKirill A. Shutemov } 37575a6e75f8SKirill A. Shutemov buf[count - 1] = '\n'; 37585a6e75f8SKirill A. Shutemov return count; 37595a6e75f8SKirill A. Shutemov } 37605a6e75f8SKirill A. Shutemov 37615a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 37625a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 37635a6e75f8SKirill A. Shutemov { 37645a6e75f8SKirill A. Shutemov char tmp[16]; 37655a6e75f8SKirill A. Shutemov int huge; 37665a6e75f8SKirill A. Shutemov 37675a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 37685a6e75f8SKirill A. Shutemov return -EINVAL; 37695a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 37705a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 37715a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 37725a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 37735a6e75f8SKirill A. Shutemov 37745a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 37755a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 37765a6e75f8SKirill A. Shutemov return -EINVAL; 37775a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 37785a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 37795a6e75f8SKirill A. Shutemov return -EINVAL; 37805a6e75f8SKirill A. Shutemov 37815a6e75f8SKirill A. Shutemov shmem_huge = huge; 3782435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY) 37835a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 37845a6e75f8SKirill A. Shutemov return count; 37855a6e75f8SKirill A. Shutemov } 37865a6e75f8SKirill A. Shutemov 37875a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr = 37885a6e75f8SKirill A. Shutemov __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 37893b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 3790f3f0e1d2SKirill A. Shutemov 37913b33719cSArnd Bergmann #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3792f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma) 3793f3f0e1d2SKirill A. Shutemov { 3794f3f0e1d2SKirill A. Shutemov struct inode *inode = file_inode(vma->vm_file); 3795f3f0e1d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3796f3f0e1d2SKirill A. Shutemov loff_t i_size; 3797f3f0e1d2SKirill A. Shutemov pgoff_t off; 3798f3f0e1d2SKirill A. Shutemov 3799f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 3800f3f0e1d2SKirill A. Shutemov return true; 3801f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY) 3802f3f0e1d2SKirill A. Shutemov return false; 3803f3f0e1d2SKirill A. Shutemov switch (sbinfo->huge) { 3804f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_NEVER: 3805f3f0e1d2SKirill A. Shutemov return false; 3806f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 3807f3f0e1d2SKirill A. Shutemov return true; 3808f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 3809f3f0e1d2SKirill A. Shutemov off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 3810f3f0e1d2SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 3811f3f0e1d2SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 3812f3f0e1d2SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 3813f3f0e1d2SKirill A. Shutemov return true; 3814c8402871SGustavo A. R. Silva /* fall through */ 3815f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 3816f3f0e1d2SKirill A. Shutemov /* TODO: implement fadvise() hints */ 3817f3f0e1d2SKirill A. Shutemov return (vma->vm_flags & VM_HUGEPAGE); 3818f3f0e1d2SKirill A. Shutemov default: 3819f3f0e1d2SKirill A. Shutemov VM_BUG_ON(1); 3820f3f0e1d2SKirill A. Shutemov return false; 3821f3f0e1d2SKirill A. Shutemov } 3822f3f0e1d2SKirill A. Shutemov } 38233b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 38245a6e75f8SKirill A. Shutemov 3825853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 3826853ac43aSMatt Mackall 3827853ac43aSMatt Mackall /* 3828853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 3829853ac43aSMatt Mackall * 3830853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 3831853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 3832853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 3833853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 3834853ac43aSMatt Mackall */ 3835853ac43aSMatt Mackall 383641ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 3837853ac43aSMatt Mackall .name = "tmpfs", 38383c26ff6eSAl Viro .mount = ramfs_mount, 3839853ac43aSMatt Mackall .kill_sb = kill_litter_super, 38402b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 3841853ac43aSMatt Mackall }; 3842853ac43aSMatt Mackall 384341ffe5d5SHugh Dickins int __init shmem_init(void) 3844853ac43aSMatt Mackall { 384541ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 3846853ac43aSMatt Mackall 384741ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 3848853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 3849853ac43aSMatt Mackall 3850853ac43aSMatt Mackall return 0; 3851853ac43aSMatt Mackall } 3852853ac43aSMatt Mackall 385341ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 3854853ac43aSMatt Mackall { 3855853ac43aSMatt Mackall return 0; 3856853ac43aSMatt Mackall } 3857853ac43aSMatt Mackall 38583f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user) 38593f96b79aSHugh Dickins { 38603f96b79aSHugh Dickins return 0; 38613f96b79aSHugh Dickins } 38623f96b79aSHugh Dickins 386324513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 386424513264SHugh Dickins { 386524513264SHugh Dickins } 386624513264SHugh Dickins 3867c01d5b30SHugh Dickins #ifdef CONFIG_MMU 3868c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 3869c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 3870c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 3871c01d5b30SHugh Dickins { 3872c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 3873c01d5b30SHugh Dickins } 3874c01d5b30SHugh Dickins #endif 3875c01d5b30SHugh Dickins 387641ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 387794c1e62dSHugh Dickins { 387841ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 387994c1e62dSHugh Dickins } 388094c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 388194c1e62dSHugh Dickins 3882853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 38830b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 3884454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 38850b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 38860b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 3887853ac43aSMatt Mackall 3888853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 3889853ac43aSMatt Mackall 3890853ac43aSMatt Mackall /* common code */ 38911da177e4SLinus Torvalds 3892703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 3893c7277090SEric Paris unsigned long flags, unsigned int i_flags) 38941da177e4SLinus Torvalds { 38951da177e4SLinus Torvalds struct inode *inode; 389693dec2daSAl Viro struct file *res; 38971da177e4SLinus Torvalds 3898703321b6SMatthew Auld if (IS_ERR(mnt)) 3899703321b6SMatthew Auld return ERR_CAST(mnt); 39001da177e4SLinus Torvalds 3901285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 39021da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 39031da177e4SLinus Torvalds 39041da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 39051da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 39061da177e4SLinus Torvalds 390793dec2daSAl Viro inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, 390893dec2daSAl Viro flags); 3909dac2d1f6SAl Viro if (unlikely(!inode)) { 3910dac2d1f6SAl Viro shmem_unacct_size(flags, size); 3911dac2d1f6SAl Viro return ERR_PTR(-ENOSPC); 3912dac2d1f6SAl Viro } 3913c7277090SEric Paris inode->i_flags |= i_flags; 39141da177e4SLinus Torvalds inode->i_size = size; 39156d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 391626567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 391793dec2daSAl Viro if (!IS_ERR(res)) 391893dec2daSAl Viro res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 39194b42af81SAl Viro &shmem_file_operations); 39206b4d0b27SAl Viro if (IS_ERR(res)) 392193dec2daSAl Viro iput(inode); 39226b4d0b27SAl Viro return res; 39231da177e4SLinus Torvalds } 3924c7277090SEric Paris 3925c7277090SEric Paris /** 3926c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 3927c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 3928c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 3929e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 3930e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 3931c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 3932c7277090SEric Paris * @size: size to be set for the file 3933c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3934c7277090SEric Paris */ 3935c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 3936c7277090SEric Paris { 3937703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 3938c7277090SEric Paris } 3939c7277090SEric Paris 3940c7277090SEric Paris /** 3941c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 3942c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 3943c7277090SEric Paris * @size: size to be set for the file 3944c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3945c7277090SEric Paris */ 3946c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 3947c7277090SEric Paris { 3948703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, 0); 3949c7277090SEric Paris } 3950395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 39511da177e4SLinus Torvalds 395246711810SRandy Dunlap /** 3953703321b6SMatthew Auld * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 3954703321b6SMatthew Auld * @mnt: the tmpfs mount where the file will be created 3955703321b6SMatthew Auld * @name: name for dentry (to be seen in /proc/<pid>/maps 3956703321b6SMatthew Auld * @size: size to be set for the file 3957703321b6SMatthew Auld * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3958703321b6SMatthew Auld */ 3959703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 3960703321b6SMatthew Auld loff_t size, unsigned long flags) 3961703321b6SMatthew Auld { 3962703321b6SMatthew Auld return __shmem_file_setup(mnt, name, size, flags, 0); 3963703321b6SMatthew Auld } 3964703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 3965703321b6SMatthew Auld 3966703321b6SMatthew Auld /** 39671da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 39681da177e4SLinus Torvalds * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 39691da177e4SLinus Torvalds */ 39701da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 39711da177e4SLinus Torvalds { 39721da177e4SLinus Torvalds struct file *file; 39731da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 39741da177e4SLinus Torvalds 397566fc1303SHugh Dickins /* 397666fc1303SHugh Dickins * Cloning a new file under mmap_sem leads to a lock ordering conflict 397766fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 397866fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 397966fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 398066fc1303SHugh Dickins */ 3981703321b6SMatthew Auld file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 39821da177e4SLinus Torvalds if (IS_ERR(file)) 39831da177e4SLinus Torvalds return PTR_ERR(file); 39841da177e4SLinus Torvalds 39851da177e4SLinus Torvalds if (vma->vm_file) 39861da177e4SLinus Torvalds fput(vma->vm_file); 39871da177e4SLinus Torvalds vma->vm_file = file; 39881da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 3989f3f0e1d2SKirill A. Shutemov 3990e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 3991f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 3992f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 3993f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 3994f3f0e1d2SKirill A. Shutemov } 3995f3f0e1d2SKirill A. Shutemov 39961da177e4SLinus Torvalds return 0; 39971da177e4SLinus Torvalds } 3998d9d90e5eSHugh Dickins 3999d9d90e5eSHugh Dickins /** 4000d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4001d9d90e5eSHugh Dickins * @mapping: the page's address_space 4002d9d90e5eSHugh Dickins * @index: the page index 4003d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4004d9d90e5eSHugh Dickins * 4005d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4006d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 4007d9d90e5eSHugh Dickins * But read_cache_page_gfp() uses the ->readpage() method: which does not 4008d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4009d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4010d9d90e5eSHugh Dickins * 401168da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 401268da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4013d9d90e5eSHugh Dickins */ 4014d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4015d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4016d9d90e5eSHugh Dickins { 401768da9f05SHugh Dickins #ifdef CONFIG_SHMEM 401868da9f05SHugh Dickins struct inode *inode = mapping->host; 40199276aad6SHugh Dickins struct page *page; 402068da9f05SHugh Dickins int error; 402168da9f05SHugh Dickins 402268da9f05SHugh Dickins BUG_ON(mapping->a_ops != &shmem_aops); 40239e18eb29SAndres Lagar-Cavilla error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, 4024cfda0526SMike Rapoport gfp, NULL, NULL, NULL); 402568da9f05SHugh Dickins if (error) 402668da9f05SHugh Dickins page = ERR_PTR(error); 402768da9f05SHugh Dickins else 402868da9f05SHugh Dickins unlock_page(page); 402968da9f05SHugh Dickins return page; 403068da9f05SHugh Dickins #else 403168da9f05SHugh Dickins /* 403268da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 403368da9f05SHugh Dickins */ 4034d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 403568da9f05SHugh Dickins #endif 4036d9d90e5eSHugh Dickins } 4037d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4038