11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31853ac43aSMatt Mackall #include <linux/mm.h> 32174cd4b1SIngo Molnar #include <linux/sched/signal.h> 33b95f1b31SPaul Gortmaker #include <linux/export.h> 34853ac43aSMatt Mackall #include <linux/swap.h> 35e2e40f2cSChristoph Hellwig #include <linux/uio.h> 36f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h> 37749df87bSMike Kravetz #include <linux/hugetlb.h> 38853ac43aSMatt Mackall 3995cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ 4095cc09d6SAndrea Arcangeli 41853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 42853ac43aSMatt Mackall 43853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 441da177e4SLinus Torvalds /* 451da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 461da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 471da177e4SLinus Torvalds * which makes it a completely usable filesystem. 481da177e4SLinus Torvalds */ 491da177e4SLinus Torvalds 5039f0247dSAndreas Gruenbacher #include <linux/xattr.h> 51a5694255SChristoph Hellwig #include <linux/exportfs.h> 521c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 53feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 541da177e4SLinus Torvalds #include <linux/mman.h> 551da177e4SLinus Torvalds #include <linux/string.h> 561da177e4SLinus Torvalds #include <linux/slab.h> 571da177e4SLinus Torvalds #include <linux/backing-dev.h> 581da177e4SLinus Torvalds #include <linux/shmem_fs.h> 591da177e4SLinus Torvalds #include <linux/writeback.h> 601da177e4SLinus Torvalds #include <linux/blkdev.h> 61bda97eabSHugh Dickins #include <linux/pagevec.h> 6241ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 6383e4fa9cSHugh Dickins #include <linux/falloc.h> 64708e3508SHugh Dickins #include <linux/splice.h> 651da177e4SLinus Torvalds #include <linux/security.h> 661da177e4SLinus Torvalds #include <linux/swapops.h> 671da177e4SLinus Torvalds #include <linux/mempolicy.h> 681da177e4SLinus Torvalds #include <linux/namei.h> 69b00dc3adSHugh Dickins #include <linux/ctype.h> 70304dbdb7SLee Schermerhorn #include <linux/migrate.h> 71c1f60a5aSChristoph Lameter #include <linux/highmem.h> 72680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 7392562927SMimi Zohar #include <linux/magic.h> 749183df25SDavid Herrmann #include <linux/syscalls.h> 7540e041a2SDavid Herrmann #include <linux/fcntl.h> 769183df25SDavid Herrmann #include <uapi/linux/memfd.h> 77cfda0526SMike Rapoport #include <linux/userfaultfd_k.h> 784c27fe4cSMike Rapoport #include <linux/rmap.h> 792b4db796SAmir Goldstein #include <linux/uuid.h> 80304dbdb7SLee Schermerhorn 817c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 821da177e4SLinus Torvalds #include <asm/pgtable.h> 831da177e4SLinus Torvalds 84dd56b046SMel Gorman #include "internal.h" 85dd56b046SMel Gorman 8609cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8709cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 881da177e4SLinus Torvalds 891da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 901da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 911da177e4SLinus Torvalds 9269f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 9369f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 9469f07ec9SHugh Dickins 951aac1400SHugh Dickins /* 96f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 97f00cdc6dSHugh Dickins * inode->i_private (with i_mutex making sure that it has only one user at 98f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 991aac1400SHugh Dickins */ 1001aac1400SHugh Dickins struct shmem_falloc { 1018e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 1021aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 1031aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 1041aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 1051aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1061aac1400SHugh Dickins }; 1071aac1400SHugh Dickins 108b76db735SAndrew Morton #ifdef CONFIG_TMPFS 109680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 110680d794bSakpm@linux-foundation.org { 111680d794bSakpm@linux-foundation.org return totalram_pages / 2; 112680d794bSakpm@linux-foundation.org } 113680d794bSakpm@linux-foundation.org 114680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 115680d794bSakpm@linux-foundation.org { 116680d794bSakpm@linux-foundation.org return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 117680d794bSakpm@linux-foundation.org } 118b76db735SAndrew Morton #endif 119680d794bSakpm@linux-foundation.org 120bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 121bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 122bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index); 12368da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1249e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, 125cfda0526SMike Rapoport gfp_t gfp, struct vm_area_struct *vma, 126cfda0526SMike Rapoport struct vm_fault *vmf, int *fault_type); 12768da9f05SHugh Dickins 128f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index, 1299e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp) 13068da9f05SHugh Dickins { 13168da9f05SHugh Dickins return shmem_getpage_gfp(inode, index, pagep, sgp, 132cfda0526SMike Rapoport mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 13368da9f05SHugh Dickins } 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1361da177e4SLinus Torvalds { 1371da177e4SLinus Torvalds return sb->s_fs_info; 1381da177e4SLinus Torvalds } 1391da177e4SLinus Torvalds 1401da177e4SLinus Torvalds /* 1411da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1421da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1431da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1441da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1451da177e4SLinus Torvalds */ 1461da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1471da177e4SLinus Torvalds { 1480b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 149191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1501da177e4SLinus Torvalds } 1511da177e4SLinus Torvalds 1521da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1531da177e4SLinus Torvalds { 1540b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1551da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds 15877142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 15977142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 16077142517SKonstantin Khlebnikov { 16177142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 16277142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 16377142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 16477142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 16577142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 16677142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 16777142517SKonstantin Khlebnikov } 16877142517SKonstantin Khlebnikov return 0; 16977142517SKonstantin Khlebnikov } 17077142517SKonstantin Khlebnikov 1711da177e4SLinus Torvalds /* 1721da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 17375edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 1741da177e4SLinus Torvalds * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1751da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1761da177e4SLinus Torvalds */ 177800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 1781da177e4SLinus Torvalds { 179800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 180800d8c63SKirill A. Shutemov return 0; 181800d8c63SKirill A. Shutemov 182800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 183800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 1841da177e4SLinus Torvalds } 1851da177e4SLinus Torvalds 1861da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 1871da177e4SLinus Torvalds { 1880b0a0806SHugh Dickins if (flags & VM_NORESERVE) 18909cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds 1920f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 1930f079694SMike Rapoport { 1940f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 1950f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1960f079694SMike Rapoport 1970f079694SMike Rapoport if (shmem_acct_block(info->flags, pages)) 1980f079694SMike Rapoport return false; 1990f079694SMike Rapoport 2000f079694SMike Rapoport if (sbinfo->max_blocks) { 2010f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2020f079694SMike Rapoport sbinfo->max_blocks - pages) > 0) 2030f079694SMike Rapoport goto unacct; 2040f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages); 2050f079694SMike Rapoport } 2060f079694SMike Rapoport 2070f079694SMike Rapoport return true; 2080f079694SMike Rapoport 2090f079694SMike Rapoport unacct: 2100f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2110f079694SMike Rapoport return false; 2120f079694SMike Rapoport } 2130f079694SMike Rapoport 2140f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 2150f079694SMike Rapoport { 2160f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2170f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2180f079694SMike Rapoport 2190f079694SMike Rapoport if (sbinfo->max_blocks) 2200f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages); 2210f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2220f079694SMike Rapoport } 2230f079694SMike Rapoport 224759b9775SHugh Dickins static const struct super_operations shmem_ops; 225f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops; 22615ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 22792e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 22892e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 22992e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 230f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 231779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 2321da177e4SLinus Torvalds 233b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma) 234b0506e48SMike Rapoport { 235b0506e48SMike Rapoport return vma->vm_ops == &shmem_vm_ops; 236b0506e48SMike Rapoport } 237b0506e48SMike Rapoport 2381da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 239cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 2401da177e4SLinus Torvalds 2415b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb) 2425b04c689SPavel Emelyanov { 2435b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2445b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2455b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2465b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 2475b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2485b04c689SPavel Emelyanov return -ENOSPC; 2495b04c689SPavel Emelyanov } 2505b04c689SPavel Emelyanov sbinfo->free_inodes--; 2515b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2525b04c689SPavel Emelyanov } 2535b04c689SPavel Emelyanov return 0; 2545b04c689SPavel Emelyanov } 2555b04c689SPavel Emelyanov 2565b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 2575b04c689SPavel Emelyanov { 2585b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2595b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2605b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2615b04c689SPavel Emelyanov sbinfo->free_inodes++; 2625b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2635b04c689SPavel Emelyanov } 2645b04c689SPavel Emelyanov } 2655b04c689SPavel Emelyanov 26646711810SRandy Dunlap /** 26741ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 2681da177e4SLinus Torvalds * @inode: inode to recalc 2691da177e4SLinus Torvalds * 2701da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 2711da177e4SLinus Torvalds * undirtied hole pages behind our back. 2721da177e4SLinus Torvalds * 2731da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 2741da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 2751da177e4SLinus Torvalds * 2761da177e4SLinus Torvalds * It has to be called with the spinlock held. 2771da177e4SLinus Torvalds */ 2781da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 2791da177e4SLinus Torvalds { 2801da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 2811da177e4SLinus Torvalds long freed; 2821da177e4SLinus Torvalds 2831da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 2841da177e4SLinus Torvalds if (freed > 0) { 2851da177e4SLinus Torvalds info->alloced -= freed; 28654af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 2870f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed); 2881da177e4SLinus Torvalds } 2891da177e4SLinus Torvalds } 2901da177e4SLinus Torvalds 291800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 292800d8c63SKirill A. Shutemov { 293800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 2944595ef88SKirill A. Shutemov unsigned long flags; 295800d8c63SKirill A. Shutemov 2960f079694SMike Rapoport if (!shmem_inode_acct_block(inode, pages)) 297800d8c63SKirill A. Shutemov return false; 298b1cc94abSMike Rapoport 2994595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 300800d8c63SKirill A. Shutemov info->alloced += pages; 301800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 302800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3034595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 304800d8c63SKirill A. Shutemov inode->i_mapping->nrpages += pages; 305800d8c63SKirill A. Shutemov 306800d8c63SKirill A. Shutemov return true; 307800d8c63SKirill A. Shutemov } 308800d8c63SKirill A. Shutemov 309800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 310800d8c63SKirill A. Shutemov { 311800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3124595ef88SKirill A. Shutemov unsigned long flags; 313800d8c63SKirill A. Shutemov 3144595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 315800d8c63SKirill A. Shutemov info->alloced -= pages; 316800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 317800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3184595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 319800d8c63SKirill A. Shutemov 3200f079694SMike Rapoport shmem_inode_unacct_blocks(inode, pages); 321800d8c63SKirill A. Shutemov } 322800d8c63SKirill A. Shutemov 3237a5d0fbbSHugh Dickins /* 3247a5d0fbbSHugh Dickins * Replace item expected in radix tree by a new item, while holding tree lock. 3257a5d0fbbSHugh Dickins */ 3267a5d0fbbSHugh Dickins static int shmem_radix_tree_replace(struct address_space *mapping, 3277a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 3287a5d0fbbSHugh Dickins { 329f7942430SJohannes Weiner struct radix_tree_node *node; 3307a5d0fbbSHugh Dickins void **pslot; 3316dbaf22cSJohannes Weiner void *item; 3327a5d0fbbSHugh Dickins 3337a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 3346dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 335f7942430SJohannes Weiner item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot); 336f7942430SJohannes Weiner if (!item) 3376dbaf22cSJohannes Weiner return -ENOENT; 3387a5d0fbbSHugh Dickins if (item != expected) 3397a5d0fbbSHugh Dickins return -ENOENT; 3404d693d08SJohannes Weiner __radix_tree_replace(&mapping->page_tree, node, pslot, 341*c7df8ad2SMel Gorman replacement, NULL); 3427a5d0fbbSHugh Dickins return 0; 3437a5d0fbbSHugh Dickins } 3447a5d0fbbSHugh Dickins 3457a5d0fbbSHugh Dickins /* 346d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 347d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 348d1899228SHugh Dickins * 349d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 350d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 351d1899228SHugh Dickins */ 352d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 353d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 354d1899228SHugh Dickins { 355d1899228SHugh Dickins void *item; 356d1899228SHugh Dickins 357d1899228SHugh Dickins rcu_read_lock(); 358d1899228SHugh Dickins item = radix_tree_lookup(&mapping->page_tree, index); 359d1899228SHugh Dickins rcu_read_unlock(); 360d1899228SHugh Dickins return item == swp_to_radix_entry(swap); 361d1899228SHugh Dickins } 362d1899228SHugh Dickins 363d1899228SHugh Dickins /* 3645a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 3655a6e75f8SKirill A. Shutemov * 3665a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 3675a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 3685a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 3695a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 3705a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 3715a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 3725a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 3735a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 3745a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 3755a6e75f8SKirill A. Shutemov */ 3765a6e75f8SKirill A. Shutemov 3775a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 3785a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 3795a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 3805a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 3815a6e75f8SKirill A. Shutemov 3825a6e75f8SKirill A. Shutemov /* 3835a6e75f8SKirill A. Shutemov * Special values. 3845a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 3855a6e75f8SKirill A. Shutemov * 3865a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 3875a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 3885a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 3895a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 3905a6e75f8SKirill A. Shutemov * 3915a6e75f8SKirill A. Shutemov */ 3925a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 3935a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 3945a6e75f8SKirill A. Shutemov 395e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3965a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 3975a6e75f8SKirill A. Shutemov 3985a6e75f8SKirill A. Shutemov int shmem_huge __read_mostly; 3995a6e75f8SKirill A. Shutemov 400f1f5929cSJérémy Lefaure #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 4015a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 4025a6e75f8SKirill A. Shutemov { 4035a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 4045a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 4055a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 4065a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 4075a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 4085a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 4095a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 4105a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 4115a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 4125a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 4135a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 4145a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 4155a6e75f8SKirill A. Shutemov return -EINVAL; 4165a6e75f8SKirill A. Shutemov } 4175a6e75f8SKirill A. Shutemov 4185a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 4195a6e75f8SKirill A. Shutemov { 4205a6e75f8SKirill A. Shutemov switch (huge) { 4215a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 4225a6e75f8SKirill A. Shutemov return "never"; 4235a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 4245a6e75f8SKirill A. Shutemov return "always"; 4255a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 4265a6e75f8SKirill A. Shutemov return "within_size"; 4275a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4285a6e75f8SKirill A. Shutemov return "advise"; 4295a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 4305a6e75f8SKirill A. Shutemov return "deny"; 4315a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 4325a6e75f8SKirill A. Shutemov return "force"; 4335a6e75f8SKirill A. Shutemov default: 4345a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 4355a6e75f8SKirill A. Shutemov return "bad_val"; 4365a6e75f8SKirill A. Shutemov } 4375a6e75f8SKirill A. Shutemov } 438f1f5929cSJérémy Lefaure #endif 4395a6e75f8SKirill A. Shutemov 440779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 441779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 442779750d2SKirill A. Shutemov { 443779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 444253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 445779750d2SKirill A. Shutemov struct inode *inode; 446779750d2SKirill A. Shutemov struct shmem_inode_info *info; 447779750d2SKirill A. Shutemov struct page *page; 448779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 449779750d2SKirill A. Shutemov int removed = 0, split = 0; 450779750d2SKirill A. Shutemov 451779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 452779750d2SKirill A. Shutemov return SHRINK_STOP; 453779750d2SKirill A. Shutemov 454779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 455779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 456779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 457779750d2SKirill A. Shutemov 458779750d2SKirill A. Shutemov /* pin the inode */ 459779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 460779750d2SKirill A. Shutemov 461779750d2SKirill A. Shutemov /* inode is about to be evicted */ 462779750d2SKirill A. Shutemov if (!inode) { 463779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 464779750d2SKirill A. Shutemov removed++; 465779750d2SKirill A. Shutemov goto next; 466779750d2SKirill A. Shutemov } 467779750d2SKirill A. Shutemov 468779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 469779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 470779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 471253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 472779750d2SKirill A. Shutemov removed++; 473779750d2SKirill A. Shutemov goto next; 474779750d2SKirill A. Shutemov } 475779750d2SKirill A. Shutemov 476779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 477779750d2SKirill A. Shutemov next: 478779750d2SKirill A. Shutemov if (!--batch) 479779750d2SKirill A. Shutemov break; 480779750d2SKirill A. Shutemov } 481779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 482779750d2SKirill A. Shutemov 483253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 484253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 485253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 486253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 487253fd0f0SKirill A. Shutemov iput(inode); 488253fd0f0SKirill A. Shutemov } 489253fd0f0SKirill A. Shutemov 490779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 491779750d2SKirill A. Shutemov int ret; 492779750d2SKirill A. Shutemov 493779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 494779750d2SKirill A. Shutemov inode = &info->vfs_inode; 495779750d2SKirill A. Shutemov 496779750d2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) { 497779750d2SKirill A. Shutemov iput(inode); 498779750d2SKirill A. Shutemov continue; 499779750d2SKirill A. Shutemov } 500779750d2SKirill A. Shutemov 501779750d2SKirill A. Shutemov page = find_lock_page(inode->i_mapping, 502779750d2SKirill A. Shutemov (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 503779750d2SKirill A. Shutemov if (!page) 504779750d2SKirill A. Shutemov goto drop; 505779750d2SKirill A. Shutemov 506779750d2SKirill A. Shutemov if (!PageTransHuge(page)) { 507779750d2SKirill A. Shutemov unlock_page(page); 508779750d2SKirill A. Shutemov put_page(page); 509779750d2SKirill A. Shutemov goto drop; 510779750d2SKirill A. Shutemov } 511779750d2SKirill A. Shutemov 512779750d2SKirill A. Shutemov ret = split_huge_page(page); 513779750d2SKirill A. Shutemov unlock_page(page); 514779750d2SKirill A. Shutemov put_page(page); 515779750d2SKirill A. Shutemov 516779750d2SKirill A. Shutemov if (ret) { 517779750d2SKirill A. Shutemov /* split failed: leave it on the list */ 518779750d2SKirill A. Shutemov iput(inode); 519779750d2SKirill A. Shutemov continue; 520779750d2SKirill A. Shutemov } 521779750d2SKirill A. Shutemov 522779750d2SKirill A. Shutemov split++; 523779750d2SKirill A. Shutemov drop: 524779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 525779750d2SKirill A. Shutemov removed++; 526779750d2SKirill A. Shutemov iput(inode); 527779750d2SKirill A. Shutemov } 528779750d2SKirill A. Shutemov 529779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 530779750d2SKirill A. Shutemov list_splice_tail(&list, &sbinfo->shrinklist); 531779750d2SKirill A. Shutemov sbinfo->shrinklist_len -= removed; 532779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 533779750d2SKirill A. Shutemov 534779750d2SKirill A. Shutemov return split; 535779750d2SKirill A. Shutemov } 536779750d2SKirill A. Shutemov 537779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 538779750d2SKirill A. Shutemov struct shrink_control *sc) 539779750d2SKirill A. Shutemov { 540779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 541779750d2SKirill A. Shutemov 542779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 543779750d2SKirill A. Shutemov return SHRINK_STOP; 544779750d2SKirill A. Shutemov 545779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 546779750d2SKirill A. Shutemov } 547779750d2SKirill A. Shutemov 548779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 549779750d2SKirill A. Shutemov struct shrink_control *sc) 550779750d2SKirill A. Shutemov { 551779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 552779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 553779750d2SKirill A. Shutemov } 554e496cf3dSKirill A. Shutemov #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5555a6e75f8SKirill A. Shutemov 5565a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 5575a6e75f8SKirill A. Shutemov 558779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 559779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 560779750d2SKirill A. Shutemov { 561779750d2SKirill A. Shutemov return 0; 562779750d2SKirill A. Shutemov } 563e496cf3dSKirill A. Shutemov #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5645a6e75f8SKirill A. Shutemov 5655a6e75f8SKirill A. Shutemov /* 56646f65ec1SHugh Dickins * Like add_to_page_cache_locked, but error if expected item has gone. 56746f65ec1SHugh Dickins */ 56846f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page, 56946f65ec1SHugh Dickins struct address_space *mapping, 570fed400a1SWang Sheng-Hui pgoff_t index, void *expected) 57146f65ec1SHugh Dickins { 572800d8c63SKirill A. Shutemov int error, nr = hpage_nr_pages(page); 57346f65ec1SHugh Dickins 574800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 575800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(index != round_down(index, nr), page); 576309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 577309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 578800d8c63SKirill A. Shutemov VM_BUG_ON(expected && PageTransHuge(page)); 57946f65ec1SHugh Dickins 580800d8c63SKirill A. Shutemov page_ref_add(page, nr); 58146f65ec1SHugh Dickins page->mapping = mapping; 58246f65ec1SHugh Dickins page->index = index; 58346f65ec1SHugh Dickins 58446f65ec1SHugh Dickins spin_lock_irq(&mapping->tree_lock); 585800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 586800d8c63SKirill A. Shutemov void __rcu **results; 587800d8c63SKirill A. Shutemov pgoff_t idx; 588800d8c63SKirill A. Shutemov int i; 589800d8c63SKirill A. Shutemov 590800d8c63SKirill A. Shutemov error = 0; 591800d8c63SKirill A. Shutemov if (radix_tree_gang_lookup_slot(&mapping->page_tree, 592800d8c63SKirill A. Shutemov &results, &idx, index, 1) && 593800d8c63SKirill A. Shutemov idx < index + HPAGE_PMD_NR) { 594800d8c63SKirill A. Shutemov error = -EEXIST; 595800d8c63SKirill A. Shutemov } 596800d8c63SKirill A. Shutemov 597800d8c63SKirill A. Shutemov if (!error) { 598800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 599800d8c63SKirill A. Shutemov error = radix_tree_insert(&mapping->page_tree, 600800d8c63SKirill A. Shutemov index + i, page + i); 601800d8c63SKirill A. Shutemov VM_BUG_ON(error); 602800d8c63SKirill A. Shutemov } 603800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 604800d8c63SKirill A. Shutemov } 605800d8c63SKirill A. Shutemov } else if (!expected) { 606b065b432SHugh Dickins error = radix_tree_insert(&mapping->page_tree, index, page); 607800d8c63SKirill A. Shutemov } else { 608b065b432SHugh Dickins error = shmem_radix_tree_replace(mapping, index, expected, 609b065b432SHugh Dickins page); 610800d8c63SKirill A. Shutemov } 611800d8c63SKirill A. Shutemov 61246f65ec1SHugh Dickins if (!error) { 613800d8c63SKirill A. Shutemov mapping->nrpages += nr; 614800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 61511fb9989SMel Gorman __inc_node_page_state(page, NR_SHMEM_THPS); 61611fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 61711fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 61846f65ec1SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 61946f65ec1SHugh Dickins } else { 62046f65ec1SHugh Dickins page->mapping = NULL; 62146f65ec1SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 622800d8c63SKirill A. Shutemov page_ref_sub(page, nr); 62346f65ec1SHugh Dickins } 62446f65ec1SHugh Dickins return error; 62546f65ec1SHugh Dickins } 62646f65ec1SHugh Dickins 62746f65ec1SHugh Dickins /* 6286922c0c7SHugh Dickins * Like delete_from_page_cache, but substitutes swap for page. 6296922c0c7SHugh Dickins */ 6306922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap) 6316922c0c7SHugh Dickins { 6326922c0c7SHugh Dickins struct address_space *mapping = page->mapping; 6336922c0c7SHugh Dickins int error; 6346922c0c7SHugh Dickins 635800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 636800d8c63SKirill A. Shutemov 6376922c0c7SHugh Dickins spin_lock_irq(&mapping->tree_lock); 6386922c0c7SHugh Dickins error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 6396922c0c7SHugh Dickins page->mapping = NULL; 6406922c0c7SHugh Dickins mapping->nrpages--; 64111fb9989SMel Gorman __dec_node_page_state(page, NR_FILE_PAGES); 64211fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM); 6436922c0c7SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 64409cbfeafSKirill A. Shutemov put_page(page); 6456922c0c7SHugh Dickins BUG_ON(error); 6466922c0c7SHugh Dickins } 6476922c0c7SHugh Dickins 6486922c0c7SHugh Dickins /* 6497a5d0fbbSHugh Dickins * Remove swap entry from radix tree, free the swap and its page cache. 6507a5d0fbbSHugh Dickins */ 6517a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 6527a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 6537a5d0fbbSHugh Dickins { 6546dbaf22cSJohannes Weiner void *old; 6557a5d0fbbSHugh Dickins 6567a5d0fbbSHugh Dickins spin_lock_irq(&mapping->tree_lock); 6576dbaf22cSJohannes Weiner old = radix_tree_delete_item(&mapping->page_tree, index, radswap); 6587a5d0fbbSHugh Dickins spin_unlock_irq(&mapping->tree_lock); 6596dbaf22cSJohannes Weiner if (old != radswap) 6606dbaf22cSJohannes Weiner return -ENOENT; 6617a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 6626dbaf22cSJohannes Weiner return 0; 6637a5d0fbbSHugh Dickins } 6647a5d0fbbSHugh Dickins 6657a5d0fbbSHugh Dickins /* 6666a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 66748131e03SVlastimil Babka * given offsets are swapped out. 6686a15a370SVlastimil Babka * 6696a15a370SVlastimil Babka * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 6706a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 6716a15a370SVlastimil Babka */ 67248131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 67348131e03SVlastimil Babka pgoff_t start, pgoff_t end) 6746a15a370SVlastimil Babka { 6756a15a370SVlastimil Babka struct radix_tree_iter iter; 6766a15a370SVlastimil Babka void **slot; 6776a15a370SVlastimil Babka struct page *page; 67848131e03SVlastimil Babka unsigned long swapped = 0; 6796a15a370SVlastimil Babka 6806a15a370SVlastimil Babka rcu_read_lock(); 6816a15a370SVlastimil Babka 6826a15a370SVlastimil Babka radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 6836a15a370SVlastimil Babka if (iter.index >= end) 6846a15a370SVlastimil Babka break; 6856a15a370SVlastimil Babka 6866a15a370SVlastimil Babka page = radix_tree_deref_slot(slot); 6876a15a370SVlastimil Babka 6882cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 6892cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 6902cf938aaSMatthew Wilcox continue; 6912cf938aaSMatthew Wilcox } 6926a15a370SVlastimil Babka 6936a15a370SVlastimil Babka if (radix_tree_exceptional_entry(page)) 6946a15a370SVlastimil Babka swapped++; 6956a15a370SVlastimil Babka 6966a15a370SVlastimil Babka if (need_resched()) { 697148deab2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 6986a15a370SVlastimil Babka cond_resched_rcu(); 6996a15a370SVlastimil Babka } 7006a15a370SVlastimil Babka } 7016a15a370SVlastimil Babka 7026a15a370SVlastimil Babka rcu_read_unlock(); 7036a15a370SVlastimil Babka 7046a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 7056a15a370SVlastimil Babka } 7066a15a370SVlastimil Babka 7076a15a370SVlastimil Babka /* 70848131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 70948131e03SVlastimil Babka * given vma is swapped out. 71048131e03SVlastimil Babka * 71148131e03SVlastimil Babka * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 71248131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 71348131e03SVlastimil Babka */ 71448131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 71548131e03SVlastimil Babka { 71648131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 71748131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 71848131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 71948131e03SVlastimil Babka unsigned long swapped; 72048131e03SVlastimil Babka 72148131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 72248131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 72348131e03SVlastimil Babka 72448131e03SVlastimil Babka /* 72548131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 72648131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 72748131e03SVlastimil Babka * already track. 72848131e03SVlastimil Babka */ 72948131e03SVlastimil Babka if (!swapped) 73048131e03SVlastimil Babka return 0; 73148131e03SVlastimil Babka 73248131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 73348131e03SVlastimil Babka return swapped << PAGE_SHIFT; 73448131e03SVlastimil Babka 73548131e03SVlastimil Babka /* Here comes the more involved part */ 73648131e03SVlastimil Babka return shmem_partial_swap_usage(mapping, 73748131e03SVlastimil Babka linear_page_index(vma, vma->vm_start), 73848131e03SVlastimil Babka linear_page_index(vma, vma->vm_end)); 73948131e03SVlastimil Babka } 74048131e03SVlastimil Babka 74148131e03SVlastimil Babka /* 74224513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 74324513264SHugh Dickins */ 74424513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 74524513264SHugh Dickins { 74624513264SHugh Dickins struct pagevec pvec; 74724513264SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 74824513264SHugh Dickins pgoff_t index = 0; 74924513264SHugh Dickins 75024513264SHugh Dickins pagevec_init(&pvec, 0); 75124513264SHugh Dickins /* 75224513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 75324513264SHugh Dickins */ 75424513264SHugh Dickins while (!mapping_unevictable(mapping)) { 75524513264SHugh Dickins /* 75624513264SHugh Dickins * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 75724513264SHugh Dickins * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 75824513264SHugh Dickins */ 7590cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 76024513264SHugh Dickins PAGEVEC_SIZE, pvec.pages, indices); 76124513264SHugh Dickins if (!pvec.nr) 76224513264SHugh Dickins break; 76324513264SHugh Dickins index = indices[pvec.nr - 1] + 1; 7640cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 76524513264SHugh Dickins check_move_unevictable_pages(pvec.pages, pvec.nr); 76624513264SHugh Dickins pagevec_release(&pvec); 76724513264SHugh Dickins cond_resched(); 76824513264SHugh Dickins } 7697a5d0fbbSHugh Dickins } 7707a5d0fbbSHugh Dickins 7717a5d0fbbSHugh Dickins /* 7727a5d0fbbSHugh Dickins * Remove range of pages and swap entries from radix tree, and free them. 7731635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 7747a5d0fbbSHugh Dickins */ 7751635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 7761635f6a7SHugh Dickins bool unfalloc) 7771da177e4SLinus Torvalds { 778285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 7791da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 78009cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 78109cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 78209cbfeafSKirill A. Shutemov unsigned int partial_start = lstart & (PAGE_SIZE - 1); 78309cbfeafSKirill A. Shutemov unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); 784bda97eabSHugh Dickins struct pagevec pvec; 7857a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 7867a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 787285b2c4fSHugh Dickins pgoff_t index; 788bda97eabSHugh Dickins int i; 7891da177e4SLinus Torvalds 79083e4fa9cSHugh Dickins if (lend == -1) 79183e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 792bda97eabSHugh Dickins 793bda97eabSHugh Dickins pagevec_init(&pvec, 0); 794bda97eabSHugh Dickins index = start; 79583e4fa9cSHugh Dickins while (index < end) { 7960cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 79783e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 7987a5d0fbbSHugh Dickins pvec.pages, indices); 7997a5d0fbbSHugh Dickins if (!pvec.nr) 8007a5d0fbbSHugh Dickins break; 801bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 802bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 803bda97eabSHugh Dickins 8047a5d0fbbSHugh Dickins index = indices[i]; 80583e4fa9cSHugh Dickins if (index >= end) 806bda97eabSHugh Dickins break; 807bda97eabSHugh Dickins 8087a5d0fbbSHugh Dickins if (radix_tree_exceptional_entry(page)) { 8091635f6a7SHugh Dickins if (unfalloc) 8101635f6a7SHugh Dickins continue; 8117a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 8127a5d0fbbSHugh Dickins index, page); 8137a5d0fbbSHugh Dickins continue; 8147a5d0fbbSHugh Dickins } 8157a5d0fbbSHugh Dickins 816800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); 817800d8c63SKirill A. Shutemov 818bda97eabSHugh Dickins if (!trylock_page(page)) 819bda97eabSHugh Dickins continue; 820800d8c63SKirill A. Shutemov 821800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 822800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 823800d8c63SKirill A. Shutemov clear_highpage(page); 824800d8c63SKirill A. Shutemov unlock_page(page); 825800d8c63SKirill A. Shutemov continue; 826800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 827800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 828800d8c63SKirill A. Shutemov /* 829800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 830800d8c63SKirill A. Shutemov * zero out the page 831800d8c63SKirill A. Shutemov */ 832800d8c63SKirill A. Shutemov clear_highpage(page); 833800d8c63SKirill A. Shutemov unlock_page(page); 834800d8c63SKirill A. Shutemov continue; 835800d8c63SKirill A. Shutemov } 836800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 837800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 838800d8c63SKirill A. Shutemov } 839800d8c63SKirill A. Shutemov 8401635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 841800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 842800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 843309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 844bda97eabSHugh Dickins truncate_inode_page(mapping, page); 8457a5d0fbbSHugh Dickins } 8461635f6a7SHugh Dickins } 847bda97eabSHugh Dickins unlock_page(page); 848bda97eabSHugh Dickins } 8490cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 85024513264SHugh Dickins pagevec_release(&pvec); 851bda97eabSHugh Dickins cond_resched(); 852bda97eabSHugh Dickins index++; 853bda97eabSHugh Dickins } 854bda97eabSHugh Dickins 85583e4fa9cSHugh Dickins if (partial_start) { 856bda97eabSHugh Dickins struct page *page = NULL; 8579e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, start - 1, &page, SGP_READ); 858bda97eabSHugh Dickins if (page) { 85909cbfeafSKirill A. Shutemov unsigned int top = PAGE_SIZE; 86083e4fa9cSHugh Dickins if (start > end) { 86183e4fa9cSHugh Dickins top = partial_end; 86283e4fa9cSHugh Dickins partial_end = 0; 86383e4fa9cSHugh Dickins } 86483e4fa9cSHugh Dickins zero_user_segment(page, partial_start, top); 865bda97eabSHugh Dickins set_page_dirty(page); 866bda97eabSHugh Dickins unlock_page(page); 86709cbfeafSKirill A. Shutemov put_page(page); 868bda97eabSHugh Dickins } 869bda97eabSHugh Dickins } 87083e4fa9cSHugh Dickins if (partial_end) { 87183e4fa9cSHugh Dickins struct page *page = NULL; 8729e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, end, &page, SGP_READ); 87383e4fa9cSHugh Dickins if (page) { 87483e4fa9cSHugh Dickins zero_user_segment(page, 0, partial_end); 87583e4fa9cSHugh Dickins set_page_dirty(page); 87683e4fa9cSHugh Dickins unlock_page(page); 87709cbfeafSKirill A. Shutemov put_page(page); 87883e4fa9cSHugh Dickins } 87983e4fa9cSHugh Dickins } 88083e4fa9cSHugh Dickins if (start >= end) 88183e4fa9cSHugh Dickins return; 882bda97eabSHugh Dickins 883bda97eabSHugh Dickins index = start; 884b1a36650SHugh Dickins while (index < end) { 885bda97eabSHugh Dickins cond_resched(); 8860cd6144aSJohannes Weiner 8870cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 88883e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 8897a5d0fbbSHugh Dickins pvec.pages, indices); 8907a5d0fbbSHugh Dickins if (!pvec.nr) { 891b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 892b1a36650SHugh Dickins if (index == start || end != -1) 893bda97eabSHugh Dickins break; 894b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 895bda97eabSHugh Dickins index = start; 896bda97eabSHugh Dickins continue; 897bda97eabSHugh Dickins } 898bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 899bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 900bda97eabSHugh Dickins 9017a5d0fbbSHugh Dickins index = indices[i]; 90283e4fa9cSHugh Dickins if (index >= end) 903bda97eabSHugh Dickins break; 904bda97eabSHugh Dickins 9057a5d0fbbSHugh Dickins if (radix_tree_exceptional_entry(page)) { 9061635f6a7SHugh Dickins if (unfalloc) 9071635f6a7SHugh Dickins continue; 908b1a36650SHugh Dickins if (shmem_free_swap(mapping, index, page)) { 909b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 910b1a36650SHugh Dickins index--; 911b1a36650SHugh Dickins break; 912b1a36650SHugh Dickins } 913b1a36650SHugh Dickins nr_swaps_freed++; 9147a5d0fbbSHugh Dickins continue; 9157a5d0fbbSHugh Dickins } 9167a5d0fbbSHugh Dickins 917bda97eabSHugh Dickins lock_page(page); 918800d8c63SKirill A. Shutemov 919800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 920800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 921800d8c63SKirill A. Shutemov clear_highpage(page); 922800d8c63SKirill A. Shutemov unlock_page(page); 923800d8c63SKirill A. Shutemov /* 924800d8c63SKirill A. Shutemov * Partial thp truncate due 'start' in middle 925800d8c63SKirill A. Shutemov * of THP: don't need to look on these pages 926800d8c63SKirill A. Shutemov * again on !pvec.nr restart. 927800d8c63SKirill A. Shutemov */ 928800d8c63SKirill A. Shutemov if (index != round_down(end, HPAGE_PMD_NR)) 929800d8c63SKirill A. Shutemov start++; 930800d8c63SKirill A. Shutemov continue; 931800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 932800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 933800d8c63SKirill A. Shutemov /* 934800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 935800d8c63SKirill A. Shutemov * zero out the page 936800d8c63SKirill A. Shutemov */ 937800d8c63SKirill A. Shutemov clear_highpage(page); 938800d8c63SKirill A. Shutemov unlock_page(page); 939800d8c63SKirill A. Shutemov continue; 940800d8c63SKirill A. Shutemov } 941800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 942800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 943800d8c63SKirill A. Shutemov } 944800d8c63SKirill A. Shutemov 9451635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 946800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 947800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 948309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 949bda97eabSHugh Dickins truncate_inode_page(mapping, page); 950b1a36650SHugh Dickins } else { 951b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 952b1a36650SHugh Dickins unlock_page(page); 953b1a36650SHugh Dickins index--; 954b1a36650SHugh Dickins break; 9557a5d0fbbSHugh Dickins } 9561635f6a7SHugh Dickins } 957bda97eabSHugh Dickins unlock_page(page); 958bda97eabSHugh Dickins } 9590cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 96024513264SHugh Dickins pagevec_release(&pvec); 961bda97eabSHugh Dickins index++; 962bda97eabSHugh Dickins } 96394c1e62dSHugh Dickins 9644595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 9657a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 9661da177e4SLinus Torvalds shmem_recalc_inode(inode); 9674595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 9681635f6a7SHugh Dickins } 9691da177e4SLinus Torvalds 9701635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 9711635f6a7SHugh Dickins { 9721635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 973078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 9741da177e4SLinus Torvalds } 97594c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 9761da177e4SLinus Torvalds 977a528d35eSDavid Howells static int shmem_getattr(const struct path *path, struct kstat *stat, 978a528d35eSDavid Howells u32 request_mask, unsigned int query_flags) 97944a30220SYu Zhao { 980a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode; 98144a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 98244a30220SYu Zhao 983d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 9844595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 98544a30220SYu Zhao shmem_recalc_inode(inode); 9864595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 987d0424c42SHugh Dickins } 98844a30220SYu Zhao generic_fillattr(inode, stat); 98944a30220SYu Zhao return 0; 99044a30220SYu Zhao } 99144a30220SYu Zhao 99294c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 9931da177e4SLinus Torvalds { 99475c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 99540e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 996779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 9971da177e4SLinus Torvalds int error; 9981da177e4SLinus Torvalds 99931051c85SJan Kara error = setattr_prepare(dentry, attr); 1000db78b877SChristoph Hellwig if (error) 1001db78b877SChristoph Hellwig return error; 1002db78b877SChristoph Hellwig 100394c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 100494c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 100594c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 10063889e6e7Snpiggin@suse.de 100740e041a2SDavid Herrmann /* protected by i_mutex */ 100840e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 100940e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 101040e041a2SDavid Herrmann return -EPERM; 101140e041a2SDavid Herrmann 101294c1e62dSHugh Dickins if (newsize != oldsize) { 101377142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 101477142517SKonstantin Khlebnikov oldsize, newsize); 101577142517SKonstantin Khlebnikov if (error) 101677142517SKonstantin Khlebnikov return error; 101794c1e62dSHugh Dickins i_size_write(inode, newsize); 1018078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 101994c1e62dSHugh Dickins } 1020afa2db2fSJosef Bacik if (newsize <= oldsize) { 102194c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 1022d0424c42SHugh Dickins if (oldsize > holebegin) 1023d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1024d0424c42SHugh Dickins holebegin, 0, 1); 1025d0424c42SHugh Dickins if (info->alloced) 1026d0424c42SHugh Dickins shmem_truncate_range(inode, 1027d0424c42SHugh Dickins newsize, (loff_t)-1); 102894c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1029d0424c42SHugh Dickins if (oldsize > holebegin) 1030d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1031d0424c42SHugh Dickins holebegin, 0, 1); 1032779750d2SKirill A. Shutemov 1033779750d2SKirill A. Shutemov /* 1034779750d2SKirill A. Shutemov * Part of the huge page can be beyond i_size: subject 1035779750d2SKirill A. Shutemov * to shrink under memory pressure. 1036779750d2SKirill A. Shutemov */ 1037779750d2SKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1038779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1039d041353dSCong Wang /* 1040d041353dSCong Wang * _careful to defend against unlocked access to 1041d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1042d041353dSCong Wang */ 1043d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1044779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1045779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1046779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1047779750d2SKirill A. Shutemov } 1048779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1049779750d2SKirill A. Shutemov } 105094c1e62dSHugh Dickins } 10511da177e4SLinus Torvalds } 10521da177e4SLinus Torvalds 10536a1a90adSChristoph Hellwig setattr_copy(inode, attr); 1054db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 1055feda821eSChristoph Hellwig error = posix_acl_chmod(inode, inode->i_mode); 10561da177e4SLinus Torvalds return error; 10571da177e4SLinus Torvalds } 10581da177e4SLinus Torvalds 10591f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 10601da177e4SLinus Torvalds { 10611da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1062779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 10631da177e4SLinus Torvalds 10643889e6e7Snpiggin@suse.de if (inode->i_mapping->a_ops == &shmem_aops) { 10651da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 10661da177e4SLinus Torvalds inode->i_size = 0; 10673889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1068779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1069779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1070779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1071779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1072779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1073779750d2SKirill A. Shutemov } 1074779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1075779750d2SKirill A. Shutemov } 10761da177e4SLinus Torvalds if (!list_empty(&info->swaplist)) { 1077cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 10781da177e4SLinus Torvalds list_del_init(&info->swaplist); 1079cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 10801da177e4SLinus Torvalds } 10813ed47db3SAl Viro } 1082b09e0fa4SEric Paris 108338f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 10840f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 10855b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1086dbd5768fSJan Kara clear_inode(inode); 10871da177e4SLinus Torvalds } 10881da177e4SLinus Torvalds 1089478922e2SMatthew Wilcox static unsigned long find_swap_entry(struct radix_tree_root *root, void *item) 1090478922e2SMatthew Wilcox { 1091478922e2SMatthew Wilcox struct radix_tree_iter iter; 1092478922e2SMatthew Wilcox void **slot; 1093478922e2SMatthew Wilcox unsigned long found = -1; 1094478922e2SMatthew Wilcox unsigned int checked = 0; 1095478922e2SMatthew Wilcox 1096478922e2SMatthew Wilcox rcu_read_lock(); 1097478922e2SMatthew Wilcox radix_tree_for_each_slot(slot, root, &iter, 0) { 1098478922e2SMatthew Wilcox if (*slot == item) { 1099478922e2SMatthew Wilcox found = iter.index; 1100478922e2SMatthew Wilcox break; 1101478922e2SMatthew Wilcox } 1102478922e2SMatthew Wilcox checked++; 1103478922e2SMatthew Wilcox if ((checked % 4096) != 0) 1104478922e2SMatthew Wilcox continue; 1105478922e2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 1106478922e2SMatthew Wilcox cond_resched_rcu(); 1107478922e2SMatthew Wilcox } 1108478922e2SMatthew Wilcox 1109478922e2SMatthew Wilcox rcu_read_unlock(); 1110478922e2SMatthew Wilcox return found; 1111478922e2SMatthew Wilcox } 1112478922e2SMatthew Wilcox 111346f65ec1SHugh Dickins /* 111446f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 111546f65ec1SHugh Dickins */ 111641ffe5d5SHugh Dickins static int shmem_unuse_inode(struct shmem_inode_info *info, 1117bde05d1cSHugh Dickins swp_entry_t swap, struct page **pagep) 11181da177e4SLinus Torvalds { 1119285b2c4fSHugh Dickins struct address_space *mapping = info->vfs_inode.i_mapping; 112046f65ec1SHugh Dickins void *radswap; 112141ffe5d5SHugh Dickins pgoff_t index; 1122bde05d1cSHugh Dickins gfp_t gfp; 1123bde05d1cSHugh Dickins int error = 0; 11241da177e4SLinus Torvalds 112546f65ec1SHugh Dickins radswap = swp_to_radix_entry(swap); 1126478922e2SMatthew Wilcox index = find_swap_entry(&mapping->page_tree, radswap); 112746f65ec1SHugh Dickins if (index == -1) 112800501b53SJohannes Weiner return -EAGAIN; /* tell shmem_unuse we found nothing */ 11292e0e26c7SHugh Dickins 11301b1b32f2SHugh Dickins /* 11311b1b32f2SHugh Dickins * Move _head_ to start search for next from here. 11321f895f75SAl Viro * But be careful: shmem_evict_inode checks list_empty without taking 11331b1b32f2SHugh Dickins * mutex, and there's an instant in list_move_tail when info->swaplist 1134285b2c4fSHugh Dickins * would appear empty, if it were the only one on shmem_swaplist. 11351b1b32f2SHugh Dickins */ 11361b1b32f2SHugh Dickins if (shmem_swaplist.next != &info->swaplist) 11372e0e26c7SHugh Dickins list_move_tail(&shmem_swaplist, &info->swaplist); 11382e0e26c7SHugh Dickins 1139bde05d1cSHugh Dickins gfp = mapping_gfp_mask(mapping); 1140bde05d1cSHugh Dickins if (shmem_should_replace_page(*pagep, gfp)) { 1141bde05d1cSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1142bde05d1cSHugh Dickins error = shmem_replace_page(pagep, gfp, info, index); 1143bde05d1cSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1144bde05d1cSHugh Dickins /* 1145bde05d1cSHugh Dickins * We needed to drop mutex to make that restrictive page 11460142ef6cSHugh Dickins * allocation, but the inode might have been freed while we 11470142ef6cSHugh Dickins * dropped it: although a racing shmem_evict_inode() cannot 11480142ef6cSHugh Dickins * complete without emptying the radix_tree, our page lock 11490142ef6cSHugh Dickins * on this swapcache page is not enough to prevent that - 11500142ef6cSHugh Dickins * free_swap_and_cache() of our swap entry will only 11510142ef6cSHugh Dickins * trylock_page(), removing swap from radix_tree whatever. 11520142ef6cSHugh Dickins * 11530142ef6cSHugh Dickins * We must not proceed to shmem_add_to_page_cache() if the 11540142ef6cSHugh Dickins * inode has been freed, but of course we cannot rely on 11550142ef6cSHugh Dickins * inode or mapping or info to check that. However, we can 11560142ef6cSHugh Dickins * safely check if our swap entry is still in use (and here 11570142ef6cSHugh Dickins * it can't have got reused for another page): if it's still 11580142ef6cSHugh Dickins * in use, then the inode cannot have been freed yet, and we 11590142ef6cSHugh Dickins * can safely proceed (if it's no longer in use, that tells 11600142ef6cSHugh Dickins * nothing about the inode, but we don't need to unuse swap). 1161bde05d1cSHugh Dickins */ 1162bde05d1cSHugh Dickins if (!page_swapcount(*pagep)) 1163bde05d1cSHugh Dickins error = -ENOENT; 1164bde05d1cSHugh Dickins } 1165bde05d1cSHugh Dickins 1166d13d1443SKAMEZAWA Hiroyuki /* 1167778dd893SHugh Dickins * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 1168778dd893SHugh Dickins * but also to hold up shmem_evict_inode(): so inode cannot be freed 1169778dd893SHugh Dickins * beneath us (pagelock doesn't help until the page is in pagecache). 1170d13d1443SKAMEZAWA Hiroyuki */ 1171bde05d1cSHugh Dickins if (!error) 1172bde05d1cSHugh Dickins error = shmem_add_to_page_cache(*pagep, mapping, index, 1173fed400a1SWang Sheng-Hui radswap); 117448f170fbSHugh Dickins if (error != -ENOMEM) { 117546f65ec1SHugh Dickins /* 117646f65ec1SHugh Dickins * Truncation and eviction use free_swap_and_cache(), which 117746f65ec1SHugh Dickins * only does trylock page: if we raced, best clean up here. 117846f65ec1SHugh Dickins */ 1179bde05d1cSHugh Dickins delete_from_swap_cache(*pagep); 1180bde05d1cSHugh Dickins set_page_dirty(*pagep); 118146f65ec1SHugh Dickins if (!error) { 11824595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1183285b2c4fSHugh Dickins info->swapped--; 11844595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 118541ffe5d5SHugh Dickins swap_free(swap); 118646f65ec1SHugh Dickins } 11871da177e4SLinus Torvalds } 11882e0e26c7SHugh Dickins return error; 11891da177e4SLinus Torvalds } 11901da177e4SLinus Torvalds 11911da177e4SLinus Torvalds /* 119246f65ec1SHugh Dickins * Search through swapped inodes to find and replace swap by page. 11931da177e4SLinus Torvalds */ 119441ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 11951da177e4SLinus Torvalds { 119641ffe5d5SHugh Dickins struct list_head *this, *next; 11971da177e4SLinus Torvalds struct shmem_inode_info *info; 119800501b53SJohannes Weiner struct mem_cgroup *memcg; 1199bde05d1cSHugh Dickins int error = 0; 1200bde05d1cSHugh Dickins 1201bde05d1cSHugh Dickins /* 1202bde05d1cSHugh Dickins * There's a faint possibility that swap page was replaced before 12030142ef6cSHugh Dickins * caller locked it: caller will come back later with the right page. 1204bde05d1cSHugh Dickins */ 12050142ef6cSHugh Dickins if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 1206bde05d1cSHugh Dickins goto out; 1207778dd893SHugh Dickins 1208778dd893SHugh Dickins /* 1209778dd893SHugh Dickins * Charge page using GFP_KERNEL while we can wait, before taking 1210778dd893SHugh Dickins * the shmem_swaplist_mutex which might hold up shmem_writepage(). 1211778dd893SHugh Dickins * Charged back to the user (not to caller) when swap account is used. 1212778dd893SHugh Dickins */ 1213f627c2f5SKirill A. Shutemov error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg, 1214f627c2f5SKirill A. Shutemov false); 1215778dd893SHugh Dickins if (error) 1216778dd893SHugh Dickins goto out; 121746f65ec1SHugh Dickins /* No radix_tree_preload: swap entry keeps a place for page in tree */ 121800501b53SJohannes Weiner error = -EAGAIN; 12191da177e4SLinus Torvalds 1220cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 122141ffe5d5SHugh Dickins list_for_each_safe(this, next, &shmem_swaplist) { 122241ffe5d5SHugh Dickins info = list_entry(this, struct shmem_inode_info, swaplist); 1223285b2c4fSHugh Dickins if (info->swapped) 122400501b53SJohannes Weiner error = shmem_unuse_inode(info, swap, &page); 12256922c0c7SHugh Dickins else 12266922c0c7SHugh Dickins list_del_init(&info->swaplist); 1227cb5f7b9aSHugh Dickins cond_resched(); 122800501b53SJohannes Weiner if (error != -EAGAIN) 1229778dd893SHugh Dickins break; 123000501b53SJohannes Weiner /* found nothing in this: move on to search the next */ 12311da177e4SLinus Torvalds } 1232cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1233778dd893SHugh Dickins 123400501b53SJohannes Weiner if (error) { 123500501b53SJohannes Weiner if (error != -ENOMEM) 123600501b53SJohannes Weiner error = 0; 1237f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 123800501b53SJohannes Weiner } else 1239f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 1240778dd893SHugh Dickins out: 1241aaa46865SHugh Dickins unlock_page(page); 124209cbfeafSKirill A. Shutemov put_page(page); 1243778dd893SHugh Dickins return error; 12441da177e4SLinus Torvalds } 12451da177e4SLinus Torvalds 12461da177e4SLinus Torvalds /* 12471da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 12481da177e4SLinus Torvalds */ 12491da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 12501da177e4SLinus Torvalds { 12511da177e4SLinus Torvalds struct shmem_inode_info *info; 12521da177e4SLinus Torvalds struct address_space *mapping; 12531da177e4SLinus Torvalds struct inode *inode; 12546922c0c7SHugh Dickins swp_entry_t swap; 12556922c0c7SHugh Dickins pgoff_t index; 12561da177e4SLinus Torvalds 1257800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 12581da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 12591da177e4SLinus Torvalds mapping = page->mapping; 12601da177e4SLinus Torvalds index = page->index; 12611da177e4SLinus Torvalds inode = mapping->host; 12621da177e4SLinus Torvalds info = SHMEM_I(inode); 12631da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 12641da177e4SLinus Torvalds goto redirty; 1265d9fe526aSHugh Dickins if (!total_swap_pages) 12661da177e4SLinus Torvalds goto redirty; 12671da177e4SLinus Torvalds 1268d9fe526aSHugh Dickins /* 126997b713baSChristoph Hellwig * Our capabilities prevent regular writeback or sync from ever calling 127097b713baSChristoph Hellwig * shmem_writepage; but a stacking filesystem might use ->writepage of 127197b713baSChristoph Hellwig * its underlying filesystem, in which case tmpfs should write out to 127297b713baSChristoph Hellwig * swap only in response to memory pressure, and not for the writeback 127397b713baSChristoph Hellwig * threads or sync. 1274d9fe526aSHugh Dickins */ 127548f170fbSHugh Dickins if (!wbc->for_reclaim) { 127648f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 127748f170fbSHugh Dickins goto redirty; 127848f170fbSHugh Dickins } 12791635f6a7SHugh Dickins 12801635f6a7SHugh Dickins /* 12811635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 12821635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 12831635f6a7SHugh Dickins * fallocated page arriving here is now to initialize it and write it. 12841aac1400SHugh Dickins * 12851aac1400SHugh Dickins * That's okay for a page already fallocated earlier, but if we have 12861aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 12871aac1400SHugh Dickins * of this page in case we have to undo it, and (b) it may not be a 12881aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 12891aac1400SHugh Dickins * reactivate the page, and let shmem_fallocate() quit when too many. 12901635f6a7SHugh Dickins */ 12911635f6a7SHugh Dickins if (!PageUptodate(page)) { 12921aac1400SHugh Dickins if (inode->i_private) { 12931aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 12941aac1400SHugh Dickins spin_lock(&inode->i_lock); 12951aac1400SHugh Dickins shmem_falloc = inode->i_private; 12961aac1400SHugh Dickins if (shmem_falloc && 12978e205f77SHugh Dickins !shmem_falloc->waitq && 12981aac1400SHugh Dickins index >= shmem_falloc->start && 12991aac1400SHugh Dickins index < shmem_falloc->next) 13001aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 13011aac1400SHugh Dickins else 13021aac1400SHugh Dickins shmem_falloc = NULL; 13031aac1400SHugh Dickins spin_unlock(&inode->i_lock); 13041aac1400SHugh Dickins if (shmem_falloc) 13051aac1400SHugh Dickins goto redirty; 13061aac1400SHugh Dickins } 13071635f6a7SHugh Dickins clear_highpage(page); 13081635f6a7SHugh Dickins flush_dcache_page(page); 13091635f6a7SHugh Dickins SetPageUptodate(page); 13101635f6a7SHugh Dickins } 13111635f6a7SHugh Dickins 131238d8b4e6SHuang Ying swap = get_swap_page(page); 131348f170fbSHugh Dickins if (!swap.val) 131448f170fbSHugh Dickins goto redirty; 1315d9fe526aSHugh Dickins 131637e84351SVladimir Davydov if (mem_cgroup_try_charge_swap(page, swap)) 131737e84351SVladimir Davydov goto free_swap; 131837e84351SVladimir Davydov 1319b1dea800SHugh Dickins /* 1320b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 13216922c0c7SHugh Dickins * if it's not already there. Do it now before the page is 13226922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1323b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 13246922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 13256922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1326b1dea800SHugh Dickins */ 1327b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 132805bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 132905bf86b4SHugh Dickins list_add_tail(&info->swaplist, &shmem_swaplist); 1330b1dea800SHugh Dickins 133148f170fbSHugh Dickins if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 13324595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1333267a4c76SHugh Dickins shmem_recalc_inode(inode); 1334267a4c76SHugh Dickins info->swapped++; 13354595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1336267a4c76SHugh Dickins 1337aaa46865SHugh Dickins swap_shmem_alloc(swap); 13386922c0c7SHugh Dickins shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 13396922c0c7SHugh Dickins 13406922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1341d9fe526aSHugh Dickins BUG_ON(page_mapped(page)); 13429fab5619SHugh Dickins swap_writepage(page, wbc); 13431da177e4SLinus Torvalds return 0; 13441da177e4SLinus Torvalds } 13451da177e4SLinus Torvalds 13466922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 134737e84351SVladimir Davydov free_swap: 134875f6d6d2SMinchan Kim put_swap_page(page, swap); 13491da177e4SLinus Torvalds redirty: 13501da177e4SLinus Torvalds set_page_dirty(page); 1351d9fe526aSHugh Dickins if (wbc->for_reclaim) 1352d9fe526aSHugh Dickins return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1353d9fe526aSHugh Dickins unlock_page(page); 1354d9fe526aSHugh Dickins return 0; 13551da177e4SLinus Torvalds } 13561da177e4SLinus Torvalds 135775edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 135871fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1359680d794bSakpm@linux-foundation.org { 1360680d794bSakpm@linux-foundation.org char buffer[64]; 1361680d794bSakpm@linux-foundation.org 136271fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1363095f1fc4SLee Schermerhorn return; /* show nothing */ 1364095f1fc4SLee Schermerhorn 1365a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1366095f1fc4SLee Schermerhorn 1367095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1368680d794bSakpm@linux-foundation.org } 136971fe804bSLee Schermerhorn 137071fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 137171fe804bSLee Schermerhorn { 137271fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 137371fe804bSLee Schermerhorn if (sbinfo->mpol) { 137471fe804bSLee Schermerhorn spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 137571fe804bSLee Schermerhorn mpol = sbinfo->mpol; 137671fe804bSLee Schermerhorn mpol_get(mpol); 137771fe804bSLee Schermerhorn spin_unlock(&sbinfo->stat_lock); 137871fe804bSLee Schermerhorn } 137971fe804bSLee Schermerhorn return mpol; 138071fe804bSLee Schermerhorn } 138175edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 138275edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 138375edd345SHugh Dickins { 138475edd345SHugh Dickins } 138575edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 138675edd345SHugh Dickins { 138775edd345SHugh Dickins return NULL; 138875edd345SHugh Dickins } 138975edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 139075edd345SHugh Dickins #ifndef CONFIG_NUMA 139175edd345SHugh Dickins #define vm_policy vm_private_data 139275edd345SHugh Dickins #endif 1393680d794bSakpm@linux-foundation.org 1394800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1395800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1396800d8c63SKirill A. Shutemov { 1397800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 1398800d8c63SKirill A. Shutemov vma->vm_start = 0; 1399800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1400800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1401800d8c63SKirill A. Shutemov vma->vm_ops = NULL; 1402800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1403800d8c63SKirill A. Shutemov } 1404800d8c63SKirill A. Shutemov 1405800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1406800d8c63SKirill A. Shutemov { 1407800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1408800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1409800d8c63SKirill A. Shutemov } 1410800d8c63SKirill A. Shutemov 141141ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 141241ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 14131da177e4SLinus Torvalds { 14141da177e4SLinus Torvalds struct vm_area_struct pvma; 141518a2f371SMel Gorman struct page *page; 14161da177e4SLinus Torvalds 1417800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 141818a2f371SMel Gorman page = swapin_readahead(swap, gfp, &pvma, 0); 1419800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 142018a2f371SMel Gorman 1421800d8c63SKirill A. Shutemov return page; 1422800d8c63SKirill A. Shutemov } 142318a2f371SMel Gorman 1424800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp, 1425800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1426800d8c63SKirill A. Shutemov { 1427800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 1428800d8c63SKirill A. Shutemov struct inode *inode = &info->vfs_inode; 1429800d8c63SKirill A. Shutemov struct address_space *mapping = inode->i_mapping; 14304620a06eSGeert Uytterhoeven pgoff_t idx, hindex; 1431800d8c63SKirill A. Shutemov void __rcu **results; 1432800d8c63SKirill A. Shutemov struct page *page; 1433800d8c63SKirill A. Shutemov 1434e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1435800d8c63SKirill A. Shutemov return NULL; 1436800d8c63SKirill A. Shutemov 14374620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 1438800d8c63SKirill A. Shutemov rcu_read_lock(); 1439800d8c63SKirill A. Shutemov if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx, 1440800d8c63SKirill A. Shutemov hindex, 1) && idx < hindex + HPAGE_PMD_NR) { 1441800d8c63SKirill A. Shutemov rcu_read_unlock(); 1442800d8c63SKirill A. Shutemov return NULL; 1443800d8c63SKirill A. Shutemov } 1444800d8c63SKirill A. Shutemov rcu_read_unlock(); 1445800d8c63SKirill A. Shutemov 1446800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1447800d8c63SKirill A. Shutemov page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1448800d8c63SKirill A. Shutemov HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1449800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1450800d8c63SKirill A. Shutemov if (page) 1451800d8c63SKirill A. Shutemov prep_transhuge_page(page); 145218a2f371SMel Gorman return page; 145318a2f371SMel Gorman } 145418a2f371SMel Gorman 145518a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp, 145618a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 145718a2f371SMel Gorman { 145818a2f371SMel Gorman struct vm_area_struct pvma; 145918a2f371SMel Gorman struct page *page; 146018a2f371SMel Gorman 1461800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1462800d8c63SKirill A. Shutemov page = alloc_page_vma(gfp, &pvma, 0); 1463800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 146418a2f371SMel Gorman 1465800d8c63SKirill A. Shutemov return page; 1466800d8c63SKirill A. Shutemov } 1467800d8c63SKirill A. Shutemov 1468800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 14690f079694SMike Rapoport struct inode *inode, 1470800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1471800d8c63SKirill A. Shutemov { 14720f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 1473800d8c63SKirill A. Shutemov struct page *page; 1474800d8c63SKirill A. Shutemov int nr; 1475800d8c63SKirill A. Shutemov int err = -ENOSPC; 1476800d8c63SKirill A. Shutemov 1477e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1478800d8c63SKirill A. Shutemov huge = false; 1479800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1480800d8c63SKirill A. Shutemov 14810f079694SMike Rapoport if (!shmem_inode_acct_block(inode, nr)) 1482800d8c63SKirill A. Shutemov goto failed; 1483800d8c63SKirill A. Shutemov 1484800d8c63SKirill A. Shutemov if (huge) 1485800d8c63SKirill A. Shutemov page = shmem_alloc_hugepage(gfp, info, index); 1486800d8c63SKirill A. Shutemov else 1487800d8c63SKirill A. Shutemov page = shmem_alloc_page(gfp, info, index); 148875edd345SHugh Dickins if (page) { 148975edd345SHugh Dickins __SetPageLocked(page); 149075edd345SHugh Dickins __SetPageSwapBacked(page); 1491800d8c63SKirill A. Shutemov return page; 149275edd345SHugh Dickins } 149318a2f371SMel Gorman 1494800d8c63SKirill A. Shutemov err = -ENOMEM; 14950f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr); 1496800d8c63SKirill A. Shutemov failed: 1497800d8c63SKirill A. Shutemov return ERR_PTR(err); 14981da177e4SLinus Torvalds } 149971fe804bSLee Schermerhorn 15001da177e4SLinus Torvalds /* 1501bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1502bde05d1cSHugh Dickins * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1503bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1504bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1505bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1506bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1507bde05d1cSHugh Dickins * 1508bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1509bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1510bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1511bde05d1cSHugh Dickins */ 1512bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1513bde05d1cSHugh Dickins { 1514bde05d1cSHugh Dickins return page_zonenum(page) > gfp_zone(gfp); 1515bde05d1cSHugh Dickins } 1516bde05d1cSHugh Dickins 1517bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1518bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1519bde05d1cSHugh Dickins { 1520bde05d1cSHugh Dickins struct page *oldpage, *newpage; 1521bde05d1cSHugh Dickins struct address_space *swap_mapping; 1522bde05d1cSHugh Dickins pgoff_t swap_index; 1523bde05d1cSHugh Dickins int error; 1524bde05d1cSHugh Dickins 1525bde05d1cSHugh Dickins oldpage = *pagep; 1526bde05d1cSHugh Dickins swap_index = page_private(oldpage); 1527bde05d1cSHugh Dickins swap_mapping = page_mapping(oldpage); 1528bde05d1cSHugh Dickins 1529bde05d1cSHugh Dickins /* 1530bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1531bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1532bde05d1cSHugh Dickins */ 1533bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1534bde05d1cSHugh Dickins newpage = shmem_alloc_page(gfp, info, index); 1535bde05d1cSHugh Dickins if (!newpage) 1536bde05d1cSHugh Dickins return -ENOMEM; 1537bde05d1cSHugh Dickins 153809cbfeafSKirill A. Shutemov get_page(newpage); 1539bde05d1cSHugh Dickins copy_highpage(newpage, oldpage); 15400142ef6cSHugh Dickins flush_dcache_page(newpage); 1541bde05d1cSHugh Dickins 15429956edf3SHugh Dickins __SetPageLocked(newpage); 15439956edf3SHugh Dickins __SetPageSwapBacked(newpage); 1544bde05d1cSHugh Dickins SetPageUptodate(newpage); 1545bde05d1cSHugh Dickins set_page_private(newpage, swap_index); 1546bde05d1cSHugh Dickins SetPageSwapCache(newpage); 1547bde05d1cSHugh Dickins 1548bde05d1cSHugh Dickins /* 1549bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1550bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1551bde05d1cSHugh Dickins */ 1552bde05d1cSHugh Dickins spin_lock_irq(&swap_mapping->tree_lock); 1553bde05d1cSHugh Dickins error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1554bde05d1cSHugh Dickins newpage); 15550142ef6cSHugh Dickins if (!error) { 155611fb9989SMel Gorman __inc_node_page_state(newpage, NR_FILE_PAGES); 155711fb9989SMel Gorman __dec_node_page_state(oldpage, NR_FILE_PAGES); 15580142ef6cSHugh Dickins } 1559bde05d1cSHugh Dickins spin_unlock_irq(&swap_mapping->tree_lock); 1560bde05d1cSHugh Dickins 15610142ef6cSHugh Dickins if (unlikely(error)) { 15620142ef6cSHugh Dickins /* 15630142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 15640142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 15650142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 15660142ef6cSHugh Dickins */ 15670142ef6cSHugh Dickins oldpage = newpage; 15680142ef6cSHugh Dickins } else { 15696a93ca8fSJohannes Weiner mem_cgroup_migrate(oldpage, newpage); 1570bde05d1cSHugh Dickins lru_cache_add_anon(newpage); 15710142ef6cSHugh Dickins *pagep = newpage; 15720142ef6cSHugh Dickins } 1573bde05d1cSHugh Dickins 1574bde05d1cSHugh Dickins ClearPageSwapCache(oldpage); 1575bde05d1cSHugh Dickins set_page_private(oldpage, 0); 1576bde05d1cSHugh Dickins 1577bde05d1cSHugh Dickins unlock_page(oldpage); 157809cbfeafSKirill A. Shutemov put_page(oldpage); 157909cbfeafSKirill A. Shutemov put_page(oldpage); 15800142ef6cSHugh Dickins return error; 1581bde05d1cSHugh Dickins } 1582bde05d1cSHugh Dickins 1583bde05d1cSHugh Dickins /* 158468da9f05SHugh Dickins * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 15851da177e4SLinus Torvalds * 15861da177e4SLinus Torvalds * If we allocate a new one we do not mark it dirty. That's up to the 15871da177e4SLinus Torvalds * vm. If we swap it in we mark it dirty since we also free the swap 15889e18eb29SAndres Lagar-Cavilla * entry since a page cannot live in both the swap and page cache. 15899e18eb29SAndres Lagar-Cavilla * 15909e18eb29SAndres Lagar-Cavilla * fault_mm and fault_type are only supplied by shmem_fault: 15919e18eb29SAndres Lagar-Cavilla * otherwise they are NULL. 15921da177e4SLinus Torvalds */ 159341ffe5d5SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 15949e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1595cfda0526SMike Rapoport struct vm_area_struct *vma, struct vm_fault *vmf, int *fault_type) 15961da177e4SLinus Torvalds { 15971da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 159823f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 15991da177e4SLinus Torvalds struct shmem_sb_info *sbinfo; 16009e18eb29SAndres Lagar-Cavilla struct mm_struct *charge_mm; 160100501b53SJohannes Weiner struct mem_cgroup *memcg; 160227ab7006SHugh Dickins struct page *page; 16031da177e4SLinus Torvalds swp_entry_t swap; 1604657e3038SKirill A. Shutemov enum sgp_type sgp_huge = sgp; 1605800d8c63SKirill A. Shutemov pgoff_t hindex = index; 16061da177e4SLinus Torvalds int error; 160754af6042SHugh Dickins int once = 0; 16081635f6a7SHugh Dickins int alloced = 0; 16091da177e4SLinus Torvalds 161009cbfeafSKirill A. Shutemov if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 16111da177e4SLinus Torvalds return -EFBIG; 1612657e3038SKirill A. Shutemov if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1613657e3038SKirill A. Shutemov sgp = SGP_CACHE; 16141da177e4SLinus Torvalds repeat: 161554af6042SHugh Dickins swap.val = 0; 16160cd6144aSJohannes Weiner page = find_lock_entry(mapping, index); 161754af6042SHugh Dickins if (radix_tree_exceptional_entry(page)) { 161854af6042SHugh Dickins swap = radix_to_swp_entry(page); 161954af6042SHugh Dickins page = NULL; 162054af6042SHugh Dickins } 162154af6042SHugh Dickins 162275edd345SHugh Dickins if (sgp <= SGP_CACHE && 162309cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 162454af6042SHugh Dickins error = -EINVAL; 1625267a4c76SHugh Dickins goto unlock; 162654af6042SHugh Dickins } 162754af6042SHugh Dickins 162866d2f4d2SHugh Dickins if (page && sgp == SGP_WRITE) 162966d2f4d2SHugh Dickins mark_page_accessed(page); 163066d2f4d2SHugh Dickins 16311635f6a7SHugh Dickins /* fallocated page? */ 16321635f6a7SHugh Dickins if (page && !PageUptodate(page)) { 16331635f6a7SHugh Dickins if (sgp != SGP_READ) 16341635f6a7SHugh Dickins goto clear; 16351635f6a7SHugh Dickins unlock_page(page); 163609cbfeafSKirill A. Shutemov put_page(page); 16371635f6a7SHugh Dickins page = NULL; 16381635f6a7SHugh Dickins } 163954af6042SHugh Dickins if (page || (sgp == SGP_READ && !swap.val)) { 164054af6042SHugh Dickins *pagep = page; 164154af6042SHugh Dickins return 0; 164227ab7006SHugh Dickins } 164327ab7006SHugh Dickins 1644b409f9fcSHugh Dickins /* 164554af6042SHugh Dickins * Fast cache lookup did not find it: 164654af6042SHugh Dickins * bring it back from swap or allocate. 1647b409f9fcSHugh Dickins */ 164854af6042SHugh Dickins sbinfo = SHMEM_SB(inode->i_sb); 1649cfda0526SMike Rapoport charge_mm = vma ? vma->vm_mm : current->mm; 165027ab7006SHugh Dickins 16511da177e4SLinus Torvalds if (swap.val) { 16521da177e4SLinus Torvalds /* Look it up and read it in.. */ 1653ec560175SHuang Ying page = lookup_swap_cache(swap, NULL, 0); 165427ab7006SHugh Dickins if (!page) { 16559e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 16569e18eb29SAndres Lagar-Cavilla if (fault_type) { 165768da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 16589e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 16592262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT); 16609e18eb29SAndres Lagar-Cavilla } 16619e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 166241ffe5d5SHugh Dickins page = shmem_swapin(swap, gfp, info, index); 166327ab7006SHugh Dickins if (!page) { 16641da177e4SLinus Torvalds error = -ENOMEM; 166554af6042SHugh Dickins goto failed; 1666285b2c4fSHugh Dickins } 16671da177e4SLinus Torvalds } 16681da177e4SLinus Torvalds 16691da177e4SLinus Torvalds /* We have to do this with page locked to prevent races */ 167054af6042SHugh Dickins lock_page(page); 16710142ef6cSHugh Dickins if (!PageSwapCache(page) || page_private(page) != swap.val || 1672d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1673bde05d1cSHugh Dickins error = -EEXIST; /* try again */ 1674d1899228SHugh Dickins goto unlock; 1675bde05d1cSHugh Dickins } 167627ab7006SHugh Dickins if (!PageUptodate(page)) { 16771da177e4SLinus Torvalds error = -EIO; 167854af6042SHugh Dickins goto failed; 167954af6042SHugh Dickins } 168054af6042SHugh Dickins wait_on_page_writeback(page); 168154af6042SHugh Dickins 1682bde05d1cSHugh Dickins if (shmem_should_replace_page(page, gfp)) { 1683bde05d1cSHugh Dickins error = shmem_replace_page(&page, gfp, info, index); 1684bde05d1cSHugh Dickins if (error) 168554af6042SHugh Dickins goto failed; 16861da177e4SLinus Torvalds } 16871da177e4SLinus Torvalds 16889e18eb29SAndres Lagar-Cavilla error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, 1689f627c2f5SKirill A. Shutemov false); 1690d1899228SHugh Dickins if (!error) { 169154af6042SHugh Dickins error = shmem_add_to_page_cache(page, mapping, index, 1692fed400a1SWang Sheng-Hui swp_to_radix_entry(swap)); 1693215c02bcSHugh Dickins /* 1694215c02bcSHugh Dickins * We already confirmed swap under page lock, and make 1695215c02bcSHugh Dickins * no memory allocation here, so usually no possibility 1696215c02bcSHugh Dickins * of error; but free_swap_and_cache() only trylocks a 1697215c02bcSHugh Dickins * page, so it is just possible that the entry has been 1698215c02bcSHugh Dickins * truncated or holepunched since swap was confirmed. 1699215c02bcSHugh Dickins * shmem_undo_range() will have done some of the 1700215c02bcSHugh Dickins * unaccounting, now delete_from_swap_cache() will do 170193aa7d95SVladimir Davydov * the rest. 1702215c02bcSHugh Dickins * Reset swap.val? No, leave it so "failed" goes back to 1703215c02bcSHugh Dickins * "repeat": reading a hole and writing should succeed. 1704215c02bcSHugh Dickins */ 170500501b53SJohannes Weiner if (error) { 1706f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 1707215c02bcSHugh Dickins delete_from_swap_cache(page); 1708d1899228SHugh Dickins } 170900501b53SJohannes Weiner } 171054af6042SHugh Dickins if (error) 171154af6042SHugh Dickins goto failed; 171254af6042SHugh Dickins 1713f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 171400501b53SJohannes Weiner 17154595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 171654af6042SHugh Dickins info->swapped--; 171754af6042SHugh Dickins shmem_recalc_inode(inode); 17184595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 171927ab7006SHugh Dickins 172066d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 172166d2f4d2SHugh Dickins mark_page_accessed(page); 172266d2f4d2SHugh Dickins 172327ab7006SHugh Dickins delete_from_swap_cache(page); 172427ab7006SHugh Dickins set_page_dirty(page); 172527ab7006SHugh Dickins swap_free(swap); 172627ab7006SHugh Dickins 172754af6042SHugh Dickins } else { 1728cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) { 1729cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1730cfda0526SMike Rapoport return 0; 1731cfda0526SMike Rapoport } 1732cfda0526SMike Rapoport 1733800d8c63SKirill A. Shutemov /* shmem_symlink() */ 1734800d8c63SKirill A. Shutemov if (mapping->a_ops != &shmem_aops) 1735800d8c63SKirill A. Shutemov goto alloc_nohuge; 1736657e3038SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1737800d8c63SKirill A. Shutemov goto alloc_nohuge; 1738800d8c63SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 1739800d8c63SKirill A. Shutemov goto alloc_huge; 1740800d8c63SKirill A. Shutemov switch (sbinfo->huge) { 1741800d8c63SKirill A. Shutemov loff_t i_size; 1742800d8c63SKirill A. Shutemov pgoff_t off; 1743800d8c63SKirill A. Shutemov case SHMEM_HUGE_NEVER: 1744800d8c63SKirill A. Shutemov goto alloc_nohuge; 1745800d8c63SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 1746800d8c63SKirill A. Shutemov off = round_up(index, HPAGE_PMD_NR); 1747800d8c63SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 1748800d8c63SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 1749800d8c63SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 1750800d8c63SKirill A. Shutemov goto alloc_huge; 1751800d8c63SKirill A. Shutemov /* fallthrough */ 1752800d8c63SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 1753657e3038SKirill A. Shutemov if (sgp_huge == SGP_HUGE) 1754657e3038SKirill A. Shutemov goto alloc_huge; 1755657e3038SKirill A. Shutemov /* TODO: implement fadvise() hints */ 1756800d8c63SKirill A. Shutemov goto alloc_nohuge; 175759a16eadSHugh Dickins } 17581da177e4SLinus Torvalds 1759800d8c63SKirill A. Shutemov alloc_huge: 17600f079694SMike Rapoport page = shmem_alloc_and_acct_page(gfp, inode, index, true); 1761800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 17620f079694SMike Rapoport alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode, 1763800d8c63SKirill A. Shutemov index, false); 176454af6042SHugh Dickins } 1765800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1766779750d2SKirill A. Shutemov int retry = 5; 1767800d8c63SKirill A. Shutemov error = PTR_ERR(page); 1768800d8c63SKirill A. Shutemov page = NULL; 1769779750d2SKirill A. Shutemov if (error != -ENOSPC) 1770779750d2SKirill A. Shutemov goto failed; 1771779750d2SKirill A. Shutemov /* 1772779750d2SKirill A. Shutemov * Try to reclaim some spece by splitting a huge page 1773779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1774779750d2SKirill A. Shutemov */ 1775779750d2SKirill A. Shutemov while (retry--) { 1776779750d2SKirill A. Shutemov int ret; 1777779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1778779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1779779750d2SKirill A. Shutemov break; 1780779750d2SKirill A. Shutemov if (ret) 1781779750d2SKirill A. Shutemov goto alloc_nohuge; 1782779750d2SKirill A. Shutemov } 1783800d8c63SKirill A. Shutemov goto failed; 1784800d8c63SKirill A. Shutemov } 1785800d8c63SKirill A. Shutemov 1786800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 1787800d8c63SKirill A. Shutemov hindex = round_down(index, HPAGE_PMD_NR); 1788800d8c63SKirill A. Shutemov else 1789800d8c63SKirill A. Shutemov hindex = index; 1790800d8c63SKirill A. Shutemov 179166d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1792eb39d618SHugh Dickins __SetPageReferenced(page); 179366d2f4d2SHugh Dickins 17949e18eb29SAndres Lagar-Cavilla error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, 1795800d8c63SKirill A. Shutemov PageTransHuge(page)); 179654af6042SHugh Dickins if (error) 1797800d8c63SKirill A. Shutemov goto unacct; 1798800d8c63SKirill A. Shutemov error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, 1799800d8c63SKirill A. Shutemov compound_order(page)); 1800b065b432SHugh Dickins if (!error) { 1801800d8c63SKirill A. Shutemov error = shmem_add_to_page_cache(page, mapping, hindex, 1802fed400a1SWang Sheng-Hui NULL); 1803b065b432SHugh Dickins radix_tree_preload_end(); 1804b065b432SHugh Dickins } 1805b065b432SHugh Dickins if (error) { 1806800d8c63SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, 1807800d8c63SKirill A. Shutemov PageTransHuge(page)); 1808800d8c63SKirill A. Shutemov goto unacct; 1809b065b432SHugh Dickins } 1810800d8c63SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, 1811800d8c63SKirill A. Shutemov PageTransHuge(page)); 181254af6042SHugh Dickins lru_cache_add_anon(page); 181354af6042SHugh Dickins 18144595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1815800d8c63SKirill A. Shutemov info->alloced += 1 << compound_order(page); 1816800d8c63SKirill A. Shutemov inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 181754af6042SHugh Dickins shmem_recalc_inode(inode); 18184595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 18191635f6a7SHugh Dickins alloced = true; 182054af6042SHugh Dickins 1821779750d2SKirill A. Shutemov if (PageTransHuge(page) && 1822779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1823779750d2SKirill A. Shutemov hindex + HPAGE_PMD_NR - 1) { 1824779750d2SKirill A. Shutemov /* 1825779750d2SKirill A. Shutemov * Part of the huge page is beyond i_size: subject 1826779750d2SKirill A. Shutemov * to shrink under memory pressure. 1827779750d2SKirill A. Shutemov */ 1828779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1829d041353dSCong Wang /* 1830d041353dSCong Wang * _careful to defend against unlocked access to 1831d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1832d041353dSCong Wang */ 1833d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1834779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1835779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1836779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1837779750d2SKirill A. Shutemov } 1838779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1839779750d2SKirill A. Shutemov } 1840779750d2SKirill A. Shutemov 1841ec9516fbSHugh Dickins /* 18421635f6a7SHugh Dickins * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 18431635f6a7SHugh Dickins */ 18441635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 18451635f6a7SHugh Dickins sgp = SGP_WRITE; 18461635f6a7SHugh Dickins clear: 18471635f6a7SHugh Dickins /* 18481635f6a7SHugh Dickins * Let SGP_WRITE caller clear ends if write does not fill page; 18491635f6a7SHugh Dickins * but SGP_FALLOC on a page fallocated earlier must initialize 18501635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 1851ec9516fbSHugh Dickins */ 1852800d8c63SKirill A. Shutemov if (sgp != SGP_WRITE && !PageUptodate(page)) { 1853800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 1854800d8c63SKirill A. Shutemov int i; 1855800d8c63SKirill A. Shutemov 1856800d8c63SKirill A. Shutemov for (i = 0; i < (1 << compound_order(head)); i++) { 1857800d8c63SKirill A. Shutemov clear_highpage(head + i); 1858800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 1859800d8c63SKirill A. Shutemov } 1860800d8c63SKirill A. Shutemov SetPageUptodate(head); 1861ec9516fbSHugh Dickins } 18621da177e4SLinus Torvalds } 1863bde05d1cSHugh Dickins 186454af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 186575edd345SHugh Dickins if (sgp <= SGP_CACHE && 186609cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1867267a4c76SHugh Dickins if (alloced) { 1868267a4c76SHugh Dickins ClearPageDirty(page); 1869267a4c76SHugh Dickins delete_from_page_cache(page); 18704595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1871267a4c76SHugh Dickins shmem_recalc_inode(inode); 18724595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1873267a4c76SHugh Dickins } 187454af6042SHugh Dickins error = -EINVAL; 1875267a4c76SHugh Dickins goto unlock; 1876ff36b801SShaohua Li } 1877800d8c63SKirill A. Shutemov *pagep = page + index - hindex; 187854af6042SHugh Dickins return 0; 1879d00806b1SNick Piggin 1880d0217ac0SNick Piggin /* 188154af6042SHugh Dickins * Error recovery. 18821da177e4SLinus Torvalds */ 188354af6042SHugh Dickins unacct: 18840f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1 << compound_order(page)); 1885800d8c63SKirill A. Shutemov 1886800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 1887800d8c63SKirill A. Shutemov unlock_page(page); 1888800d8c63SKirill A. Shutemov put_page(page); 1889800d8c63SKirill A. Shutemov goto alloc_nohuge; 1890800d8c63SKirill A. Shutemov } 189154af6042SHugh Dickins failed: 1892267a4c76SHugh Dickins if (swap.val && !shmem_confirm_swap(mapping, index, swap)) 189354af6042SHugh Dickins error = -EEXIST; 1894d1899228SHugh Dickins unlock: 189527ab7006SHugh Dickins if (page) { 189654af6042SHugh Dickins unlock_page(page); 189709cbfeafSKirill A. Shutemov put_page(page); 189854af6042SHugh Dickins } 189954af6042SHugh Dickins if (error == -ENOSPC && !once++) { 19004595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 190154af6042SHugh Dickins shmem_recalc_inode(inode); 19024595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 19031da177e4SLinus Torvalds goto repeat; 1904d8dc74f2SAdrian Bunk } 1905d1899228SHugh Dickins if (error == -EEXIST) /* from above or from radix_tree_insert */ 190654af6042SHugh Dickins goto repeat; 190754af6042SHugh Dickins return error; 19081da177e4SLinus Torvalds } 19091da177e4SLinus Torvalds 191010d20bd2SLinus Torvalds /* 191110d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 191210d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 191310d20bd2SLinus Torvalds * target. 191410d20bd2SLinus Torvalds */ 1915ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 191610d20bd2SLinus Torvalds { 191710d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 19182055da97SIngo Molnar list_del_init(&wait->entry); 191910d20bd2SLinus Torvalds return ret; 192010d20bd2SLinus Torvalds } 192110d20bd2SLinus Torvalds 192211bac800SDave Jiang static int shmem_fault(struct vm_fault *vmf) 19231da177e4SLinus Torvalds { 192411bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 1925496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 19269e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 1927657e3038SKirill A. Shutemov enum sgp_type sgp; 19281da177e4SLinus Torvalds int error; 192968da9f05SHugh Dickins int ret = VM_FAULT_LOCKED; 19301da177e4SLinus Torvalds 1931f00cdc6dSHugh Dickins /* 1932f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 1933f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 1934f00cdc6dSHugh Dickins * locks writers out with its hold on i_mutex. So refrain from 19358e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 19368e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 19378e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 19388e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 19398e205f77SHugh Dickins * 19408e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 19418e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 19428e205f77SHugh Dickins * we just need to make racing faults a rare case. 19438e205f77SHugh Dickins * 19448e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 19458e205f77SHugh Dickins * standard mutex or completion: but we cannot take i_mutex in fault, 19468e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 1947f00cdc6dSHugh Dickins */ 1948f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 1949f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 1950f00cdc6dSHugh Dickins 1951f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 1952f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 19538e205f77SHugh Dickins if (shmem_falloc && 19548e205f77SHugh Dickins shmem_falloc->waitq && 19558e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 19568e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 19578e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 195810d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 19598e205f77SHugh Dickins 19608e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 1961f00cdc6dSHugh Dickins if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1962f00cdc6dSHugh Dickins !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 19638e205f77SHugh Dickins /* It's polite to up mmap_sem if we can */ 1964f00cdc6dSHugh Dickins up_read(&vma->vm_mm->mmap_sem); 19658e205f77SHugh Dickins ret = VM_FAULT_RETRY; 1966f00cdc6dSHugh Dickins } 19678e205f77SHugh Dickins 19688e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 19698e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 19708e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 19718e205f77SHugh Dickins spin_unlock(&inode->i_lock); 19728e205f77SHugh Dickins schedule(); 19738e205f77SHugh Dickins 19748e205f77SHugh Dickins /* 19758e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 19768e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 19778e205f77SHugh Dickins * is usually invalid by the time we reach here, but 19788e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 19798e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 19808e205f77SHugh Dickins */ 19818e205f77SHugh Dickins spin_lock(&inode->i_lock); 19828e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 19838e205f77SHugh Dickins spin_unlock(&inode->i_lock); 19848e205f77SHugh Dickins return ret; 1985f00cdc6dSHugh Dickins } 19868e205f77SHugh Dickins spin_unlock(&inode->i_lock); 1987f00cdc6dSHugh Dickins } 1988f00cdc6dSHugh Dickins 1989657e3038SKirill A. Shutemov sgp = SGP_CACHE; 199018600332SMichal Hocko 199118600332SMichal Hocko if ((vma->vm_flags & VM_NOHUGEPAGE) || 199218600332SMichal Hocko test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 1993657e3038SKirill A. Shutemov sgp = SGP_NOHUGE; 199418600332SMichal Hocko else if (vma->vm_flags & VM_HUGEPAGE) 199518600332SMichal Hocko sgp = SGP_HUGE; 1996657e3038SKirill A. Shutemov 1997657e3038SKirill A. Shutemov error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 1998cfda0526SMike Rapoport gfp, vma, vmf, &ret); 19991da177e4SLinus Torvalds if (error) 20001da177e4SLinus Torvalds return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 200168da9f05SHugh Dickins return ret; 20021da177e4SLinus Torvalds } 20031da177e4SLinus Torvalds 2004c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 2005c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 2006c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 2007c01d5b30SHugh Dickins { 2008c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 2009c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 2010c01d5b30SHugh Dickins unsigned long addr; 2011c01d5b30SHugh Dickins unsigned long offset; 2012c01d5b30SHugh Dickins unsigned long inflated_len; 2013c01d5b30SHugh Dickins unsigned long inflated_addr; 2014c01d5b30SHugh Dickins unsigned long inflated_offset; 2015c01d5b30SHugh Dickins 2016c01d5b30SHugh Dickins if (len > TASK_SIZE) 2017c01d5b30SHugh Dickins return -ENOMEM; 2018c01d5b30SHugh Dickins 2019c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 2020c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 2021c01d5b30SHugh Dickins 2022e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 2023c01d5b30SHugh Dickins return addr; 2024c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 2025c01d5b30SHugh Dickins return addr; 2026c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 2027c01d5b30SHugh Dickins return addr; 2028c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2029c01d5b30SHugh Dickins return addr; 2030c01d5b30SHugh Dickins 2031c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2032c01d5b30SHugh Dickins return addr; 2033c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2034c01d5b30SHugh Dickins return addr; 2035c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2036c01d5b30SHugh Dickins return addr; 2037c01d5b30SHugh Dickins /* 2038c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2039c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2040c01d5b30SHugh Dickins * But if caller specified an address hint, respect that as before. 2041c01d5b30SHugh Dickins */ 2042c01d5b30SHugh Dickins if (uaddr) 2043c01d5b30SHugh Dickins return addr; 2044c01d5b30SHugh Dickins 2045c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2046c01d5b30SHugh Dickins struct super_block *sb; 2047c01d5b30SHugh Dickins 2048c01d5b30SHugh Dickins if (file) { 2049c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2050c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2051c01d5b30SHugh Dickins } else { 2052c01d5b30SHugh Dickins /* 2053c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2054c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2055c01d5b30SHugh Dickins */ 2056c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2057c01d5b30SHugh Dickins return addr; 2058c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2059c01d5b30SHugh Dickins } 20603089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2061c01d5b30SHugh Dickins return addr; 2062c01d5b30SHugh Dickins } 2063c01d5b30SHugh Dickins 2064c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2065c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2066c01d5b30SHugh Dickins return addr; 2067c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2068c01d5b30SHugh Dickins return addr; 2069c01d5b30SHugh Dickins 2070c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2071c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2072c01d5b30SHugh Dickins return addr; 2073c01d5b30SHugh Dickins if (inflated_len < len) 2074c01d5b30SHugh Dickins return addr; 2075c01d5b30SHugh Dickins 2076c01d5b30SHugh Dickins inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); 2077c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2078c01d5b30SHugh Dickins return addr; 2079c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2080c01d5b30SHugh Dickins return addr; 2081c01d5b30SHugh Dickins 2082c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2083c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2084c01d5b30SHugh Dickins if (inflated_offset > offset) 2085c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2086c01d5b30SHugh Dickins 2087c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2088c01d5b30SHugh Dickins return addr; 2089c01d5b30SHugh Dickins return inflated_addr; 2090c01d5b30SHugh Dickins } 2091c01d5b30SHugh Dickins 20921da177e4SLinus Torvalds #ifdef CONFIG_NUMA 209341ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 20941da177e4SLinus Torvalds { 2095496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 209641ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 20971da177e4SLinus Torvalds } 20981da177e4SLinus Torvalds 2099d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2100d8dc74f2SAdrian Bunk unsigned long addr) 21011da177e4SLinus Torvalds { 2102496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 210341ffe5d5SHugh Dickins pgoff_t index; 21041da177e4SLinus Torvalds 210541ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 210641ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 21071da177e4SLinus Torvalds } 21081da177e4SLinus Torvalds #endif 21091da177e4SLinus Torvalds 21101da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user) 21111da177e4SLinus Torvalds { 2112496ad9aaSAl Viro struct inode *inode = file_inode(file); 21131da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 21141da177e4SLinus Torvalds int retval = -ENOMEM; 21151da177e4SLinus Torvalds 21164595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 21171da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 21181da177e4SLinus Torvalds if (!user_shm_lock(inode->i_size, user)) 21191da177e4SLinus Torvalds goto out_nomem; 21201da177e4SLinus Torvalds info->flags |= VM_LOCKED; 212189e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 21221da177e4SLinus Torvalds } 21231da177e4SLinus Torvalds if (!lock && (info->flags & VM_LOCKED) && user) { 21241da177e4SLinus Torvalds user_shm_unlock(inode->i_size, user); 21251da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 212689e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 21271da177e4SLinus Torvalds } 21281da177e4SLinus Torvalds retval = 0; 212989e004eaSLee Schermerhorn 21301da177e4SLinus Torvalds out_nomem: 21314595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 21321da177e4SLinus Torvalds return retval; 21331da177e4SLinus Torvalds } 21341da177e4SLinus Torvalds 21359b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 21361da177e4SLinus Torvalds { 21371da177e4SLinus Torvalds file_accessed(file); 21381da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2139e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2140f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2141f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 2142f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 2143f3f0e1d2SKirill A. Shutemov } 21441da177e4SLinus Torvalds return 0; 21451da177e4SLinus Torvalds } 21461da177e4SLinus Torvalds 2147454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 214809208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 21491da177e4SLinus Torvalds { 21501da177e4SLinus Torvalds struct inode *inode; 21511da177e4SLinus Torvalds struct shmem_inode_info *info; 21521da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 21531da177e4SLinus Torvalds 21545b04c689SPavel Emelyanov if (shmem_reserve_inode(sb)) 21551da177e4SLinus Torvalds return NULL; 21561da177e4SLinus Torvalds 21571da177e4SLinus Torvalds inode = new_inode(sb); 21581da177e4SLinus Torvalds if (inode) { 215985fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 2160454abafeSDmitry Monakhov inode_init_owner(inode, dir, mode); 21611da177e4SLinus Torvalds inode->i_blocks = 0; 2162078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 216391828a40SDavid M. Grimes inode->i_generation = get_seconds(); 21641da177e4SLinus Torvalds info = SHMEM_I(inode); 21651da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 21661da177e4SLinus Torvalds spin_lock_init(&info->lock); 216740e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 21680b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2169779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 21701da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 217138f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 217272c04902SAl Viro cache_no_acl(inode); 21731da177e4SLinus Torvalds 21741da177e4SLinus Torvalds switch (mode & S_IFMT) { 21751da177e4SLinus Torvalds default: 217639f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 21771da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 21781da177e4SLinus Torvalds break; 21791da177e4SLinus Torvalds case S_IFREG: 218014fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 21811da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 21821da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 218371fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 218471fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 21851da177e4SLinus Torvalds break; 21861da177e4SLinus Torvalds case S_IFDIR: 2187d8c76e6fSDave Hansen inc_nlink(inode); 21881da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 21891da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 21901da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 21911da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 21921da177e4SLinus Torvalds break; 21931da177e4SLinus Torvalds case S_IFLNK: 21941da177e4SLinus Torvalds /* 21951da177e4SLinus Torvalds * Must not load anything in the rbtree, 21961da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 21971da177e4SLinus Torvalds */ 219871fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 21991da177e4SLinus Torvalds break; 22001da177e4SLinus Torvalds } 22015b04c689SPavel Emelyanov } else 22025b04c689SPavel Emelyanov shmem_free_inode(sb); 22031da177e4SLinus Torvalds return inode; 22041da177e4SLinus Torvalds } 22051da177e4SLinus Torvalds 22060cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping) 22070cd6144aSJohannes Weiner { 2208f8005451SHugh Dickins return mapping->a_ops == &shmem_aops; 22090cd6144aSJohannes Weiner } 22100cd6144aSJohannes Weiner 22118d103963SMike Rapoport static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 22124c27fe4cSMike Rapoport pmd_t *dst_pmd, 22134c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 22144c27fe4cSMike Rapoport unsigned long dst_addr, 22154c27fe4cSMike Rapoport unsigned long src_addr, 22168d103963SMike Rapoport bool zeropage, 22174c27fe4cSMike Rapoport struct page **pagep) 22184c27fe4cSMike Rapoport { 22194c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 22204c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 22214c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 22224c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 22234c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 22244c27fe4cSMike Rapoport struct mem_cgroup *memcg; 22254c27fe4cSMike Rapoport spinlock_t *ptl; 22264c27fe4cSMike Rapoport void *page_kaddr; 22274c27fe4cSMike Rapoport struct page *page; 22284c27fe4cSMike Rapoport pte_t _dst_pte, *dst_pte; 22294c27fe4cSMike Rapoport int ret; 22304c27fe4cSMike Rapoport 22314c27fe4cSMike Rapoport ret = -ENOMEM; 22320f079694SMike Rapoport if (!shmem_inode_acct_block(inode, 1)) 22334c27fe4cSMike Rapoport goto out; 22344c27fe4cSMike Rapoport 2235cb658a45SAndrea Arcangeli if (!*pagep) { 22364c27fe4cSMike Rapoport page = shmem_alloc_page(gfp, info, pgoff); 22374c27fe4cSMike Rapoport if (!page) 22380f079694SMike Rapoport goto out_unacct_blocks; 22394c27fe4cSMike Rapoport 22408d103963SMike Rapoport if (!zeropage) { /* mcopy_atomic */ 22414c27fe4cSMike Rapoport page_kaddr = kmap_atomic(page); 22428d103963SMike Rapoport ret = copy_from_user(page_kaddr, 22438d103963SMike Rapoport (const void __user *)src_addr, 22444c27fe4cSMike Rapoport PAGE_SIZE); 22454c27fe4cSMike Rapoport kunmap_atomic(page_kaddr); 22464c27fe4cSMike Rapoport 22474c27fe4cSMike Rapoport /* fallback to copy_from_user outside mmap_sem */ 22484c27fe4cSMike Rapoport if (unlikely(ret)) { 22494c27fe4cSMike Rapoport *pagep = page; 22500f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 22514c27fe4cSMike Rapoport /* don't free the page */ 22524c27fe4cSMike Rapoport return -EFAULT; 22534c27fe4cSMike Rapoport } 22548d103963SMike Rapoport } else { /* mfill_zeropage_atomic */ 22558d103963SMike Rapoport clear_highpage(page); 22568d103963SMike Rapoport } 22574c27fe4cSMike Rapoport } else { 22584c27fe4cSMike Rapoport page = *pagep; 22594c27fe4cSMike Rapoport *pagep = NULL; 22604c27fe4cSMike Rapoport } 22614c27fe4cSMike Rapoport 22629cc90c66SAndrea Arcangeli VM_BUG_ON(PageLocked(page) || PageSwapBacked(page)); 22639cc90c66SAndrea Arcangeli __SetPageLocked(page); 22649cc90c66SAndrea Arcangeli __SetPageSwapBacked(page); 2265a425d358SAndrea Arcangeli __SetPageUptodate(page); 22669cc90c66SAndrea Arcangeli 22674c27fe4cSMike Rapoport ret = mem_cgroup_try_charge(page, dst_mm, gfp, &memcg, false); 22684c27fe4cSMike Rapoport if (ret) 22694c27fe4cSMike Rapoport goto out_release; 22704c27fe4cSMike Rapoport 22714c27fe4cSMike Rapoport ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 22724c27fe4cSMike Rapoport if (!ret) { 22734c27fe4cSMike Rapoport ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL); 22744c27fe4cSMike Rapoport radix_tree_preload_end(); 22754c27fe4cSMike Rapoport } 22764c27fe4cSMike Rapoport if (ret) 22774c27fe4cSMike Rapoport goto out_release_uncharge; 22784c27fe4cSMike Rapoport 22794c27fe4cSMike Rapoport mem_cgroup_commit_charge(page, memcg, false, false); 22804c27fe4cSMike Rapoport 22814c27fe4cSMike Rapoport _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 22824c27fe4cSMike Rapoport if (dst_vma->vm_flags & VM_WRITE) 22834c27fe4cSMike Rapoport _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 22844c27fe4cSMike Rapoport 22854c27fe4cSMike Rapoport ret = -EEXIST; 22864c27fe4cSMike Rapoport dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 22874c27fe4cSMike Rapoport if (!pte_none(*dst_pte)) 22884c27fe4cSMike Rapoport goto out_release_uncharge_unlock; 22894c27fe4cSMike Rapoport 22904c27fe4cSMike Rapoport lru_cache_add_anon(page); 22914c27fe4cSMike Rapoport 22924c27fe4cSMike Rapoport spin_lock(&info->lock); 22934c27fe4cSMike Rapoport info->alloced++; 22944c27fe4cSMike Rapoport inode->i_blocks += BLOCKS_PER_PAGE; 22954c27fe4cSMike Rapoport shmem_recalc_inode(inode); 22964c27fe4cSMike Rapoport spin_unlock(&info->lock); 22974c27fe4cSMike Rapoport 22984c27fe4cSMike Rapoport inc_mm_counter(dst_mm, mm_counter_file(page)); 22994c27fe4cSMike Rapoport page_add_file_rmap(page, false); 23004c27fe4cSMike Rapoport set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 23014c27fe4cSMike Rapoport 23024c27fe4cSMike Rapoport /* No need to invalidate - it was non-present before */ 23034c27fe4cSMike Rapoport update_mmu_cache(dst_vma, dst_addr, dst_pte); 23044c27fe4cSMike Rapoport unlock_page(page); 23054c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 23064c27fe4cSMike Rapoport ret = 0; 23074c27fe4cSMike Rapoport out: 23084c27fe4cSMike Rapoport return ret; 23094c27fe4cSMike Rapoport out_release_uncharge_unlock: 23104c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 23114c27fe4cSMike Rapoport out_release_uncharge: 23124c27fe4cSMike Rapoport mem_cgroup_cancel_charge(page, memcg, false); 23134c27fe4cSMike Rapoport out_release: 23149cc90c66SAndrea Arcangeli unlock_page(page); 23154c27fe4cSMike Rapoport put_page(page); 23164c27fe4cSMike Rapoport out_unacct_blocks: 23170f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 23184c27fe4cSMike Rapoport goto out; 23194c27fe4cSMike Rapoport } 23204c27fe4cSMike Rapoport 23218d103963SMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, 23228d103963SMike Rapoport pmd_t *dst_pmd, 23238d103963SMike Rapoport struct vm_area_struct *dst_vma, 23248d103963SMike Rapoport unsigned long dst_addr, 23258d103963SMike Rapoport unsigned long src_addr, 23268d103963SMike Rapoport struct page **pagep) 23278d103963SMike Rapoport { 23288d103963SMike Rapoport return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 23298d103963SMike Rapoport dst_addr, src_addr, false, pagep); 23308d103963SMike Rapoport } 23318d103963SMike Rapoport 23328d103963SMike Rapoport int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, 23338d103963SMike Rapoport pmd_t *dst_pmd, 23348d103963SMike Rapoport struct vm_area_struct *dst_vma, 23358d103963SMike Rapoport unsigned long dst_addr) 23368d103963SMike Rapoport { 23378d103963SMike Rapoport struct page *page = NULL; 23388d103963SMike Rapoport 23398d103963SMike Rapoport return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 23408d103963SMike Rapoport dst_addr, 0, true, &page); 23418d103963SMike Rapoport } 23428d103963SMike Rapoport 23431da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 234492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 234569f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 23461da177e4SLinus Torvalds 23476d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR 23486d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 23496d9d88d0SJarkko Sakkinen #else 23506d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL 23516d9d88d0SJarkko Sakkinen #endif 23526d9d88d0SJarkko Sakkinen 23531da177e4SLinus Torvalds static int 2354800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 2355800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 2356800d15a5SNick Piggin struct page **pagep, void **fsdata) 23571da177e4SLinus Torvalds { 2358800d15a5SNick Piggin struct inode *inode = mapping->host; 235940e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 236009cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 236140e041a2SDavid Herrmann 236240e041a2SDavid Herrmann /* i_mutex is held by caller */ 23633f472cc9SSteven Rostedt (VMware) if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) { 236440e041a2SDavid Herrmann if (info->seals & F_SEAL_WRITE) 236540e041a2SDavid Herrmann return -EPERM; 236640e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 236740e041a2SDavid Herrmann return -EPERM; 236840e041a2SDavid Herrmann } 236940e041a2SDavid Herrmann 23709e18eb29SAndres Lagar-Cavilla return shmem_getpage(inode, index, pagep, SGP_WRITE); 2371800d15a5SNick Piggin } 2372800d15a5SNick Piggin 2373800d15a5SNick Piggin static int 2374800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2375800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2376800d15a5SNick Piggin struct page *page, void *fsdata) 2377800d15a5SNick Piggin { 2378800d15a5SNick Piggin struct inode *inode = mapping->host; 2379800d15a5SNick Piggin 2380800d15a5SNick Piggin if (pos + copied > inode->i_size) 2381800d15a5SNick Piggin i_size_write(inode, pos + copied); 2382800d15a5SNick Piggin 2383ec9516fbSHugh Dickins if (!PageUptodate(page)) { 2384800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 2385800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 2386800d8c63SKirill A. Shutemov int i; 2387800d8c63SKirill A. Shutemov 2388800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2389800d8c63SKirill A. Shutemov if (head + i == page) 2390800d8c63SKirill A. Shutemov continue; 2391800d8c63SKirill A. Shutemov clear_highpage(head + i); 2392800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 2393800d8c63SKirill A. Shutemov } 2394800d8c63SKirill A. Shutemov } 239509cbfeafSKirill A. Shutemov if (copied < PAGE_SIZE) { 239609cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2397ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 239809cbfeafSKirill A. Shutemov from + copied, PAGE_SIZE); 2399ec9516fbSHugh Dickins } 2400800d8c63SKirill A. Shutemov SetPageUptodate(head); 2401ec9516fbSHugh Dickins } 2402d3602444SHugh Dickins set_page_dirty(page); 24036746aff7SWu Fengguang unlock_page(page); 240409cbfeafSKirill A. Shutemov put_page(page); 2405d3602444SHugh Dickins 2406800d15a5SNick Piggin return copied; 24071da177e4SLinus Torvalds } 24081da177e4SLinus Torvalds 24092ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 24101da177e4SLinus Torvalds { 24116e58e79dSAl Viro struct file *file = iocb->ki_filp; 24126e58e79dSAl Viro struct inode *inode = file_inode(file); 24131da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 241441ffe5d5SHugh Dickins pgoff_t index; 241541ffe5d5SHugh Dickins unsigned long offset; 2416a0ee5ec5SHugh Dickins enum sgp_type sgp = SGP_READ; 2417f7c1d074SGeert Uytterhoeven int error = 0; 2418cb66a7a1SAl Viro ssize_t retval = 0; 24196e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2420a0ee5ec5SHugh Dickins 2421a0ee5ec5SHugh Dickins /* 2422a0ee5ec5SHugh Dickins * Might this read be for a stacking filesystem? Then when reading 2423a0ee5ec5SHugh Dickins * holes of a sparse file, we actually need to allocate those pages, 2424a0ee5ec5SHugh Dickins * and even mark them dirty, so it cannot exceed the max_blocks limit. 2425a0ee5ec5SHugh Dickins */ 2426777eda2cSAl Viro if (!iter_is_iovec(to)) 242775edd345SHugh Dickins sgp = SGP_CACHE; 24281da177e4SLinus Torvalds 242909cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 243009cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 24311da177e4SLinus Torvalds 24321da177e4SLinus Torvalds for (;;) { 24331da177e4SLinus Torvalds struct page *page = NULL; 243441ffe5d5SHugh Dickins pgoff_t end_index; 243541ffe5d5SHugh Dickins unsigned long nr, ret; 24361da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 24371da177e4SLinus Torvalds 243809cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 24391da177e4SLinus Torvalds if (index > end_index) 24401da177e4SLinus Torvalds break; 24411da177e4SLinus Torvalds if (index == end_index) { 244209cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 24431da177e4SLinus Torvalds if (nr <= offset) 24441da177e4SLinus Torvalds break; 24451da177e4SLinus Torvalds } 24461da177e4SLinus Torvalds 24479e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, sgp); 24486e58e79dSAl Viro if (error) { 24496e58e79dSAl Viro if (error == -EINVAL) 24506e58e79dSAl Viro error = 0; 24511da177e4SLinus Torvalds break; 24521da177e4SLinus Torvalds } 245375edd345SHugh Dickins if (page) { 245475edd345SHugh Dickins if (sgp == SGP_CACHE) 245575edd345SHugh Dickins set_page_dirty(page); 2456d3602444SHugh Dickins unlock_page(page); 245775edd345SHugh Dickins } 24581da177e4SLinus Torvalds 24591da177e4SLinus Torvalds /* 24601da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 24611b1dcc1bSJes Sorensen * are called without i_mutex protection against truncate 24621da177e4SLinus Torvalds */ 246309cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 24641da177e4SLinus Torvalds i_size = i_size_read(inode); 246509cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 24661da177e4SLinus Torvalds if (index == end_index) { 246709cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 24681da177e4SLinus Torvalds if (nr <= offset) { 24691da177e4SLinus Torvalds if (page) 247009cbfeafSKirill A. Shutemov put_page(page); 24711da177e4SLinus Torvalds break; 24721da177e4SLinus Torvalds } 24731da177e4SLinus Torvalds } 24741da177e4SLinus Torvalds nr -= offset; 24751da177e4SLinus Torvalds 24761da177e4SLinus Torvalds if (page) { 24771da177e4SLinus Torvalds /* 24781da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 24791da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 24801da177e4SLinus Torvalds * before reading the page on the kernel side. 24811da177e4SLinus Torvalds */ 24821da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 24831da177e4SLinus Torvalds flush_dcache_page(page); 24841da177e4SLinus Torvalds /* 24851da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 24861da177e4SLinus Torvalds */ 24871da177e4SLinus Torvalds if (!offset) 24881da177e4SLinus Torvalds mark_page_accessed(page); 2489b5810039SNick Piggin } else { 24901da177e4SLinus Torvalds page = ZERO_PAGE(0); 249109cbfeafSKirill A. Shutemov get_page(page); 2492b5810039SNick Piggin } 24931da177e4SLinus Torvalds 24941da177e4SLinus Torvalds /* 24951da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 24961da177e4SLinus Torvalds * now we can copy it to user space... 24971da177e4SLinus Torvalds */ 24982ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 24996e58e79dSAl Viro retval += ret; 25001da177e4SLinus Torvalds offset += ret; 250109cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 250209cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 25031da177e4SLinus Torvalds 250409cbfeafSKirill A. Shutemov put_page(page); 25052ba5bbedSAl Viro if (!iov_iter_count(to)) 25061da177e4SLinus Torvalds break; 25076e58e79dSAl Viro if (ret < nr) { 25086e58e79dSAl Viro error = -EFAULT; 25096e58e79dSAl Viro break; 25106e58e79dSAl Viro } 25111da177e4SLinus Torvalds cond_resched(); 25121da177e4SLinus Torvalds } 25131da177e4SLinus Torvalds 251409cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 25156e58e79dSAl Viro file_accessed(file); 25166e58e79dSAl Viro return retval ? retval : error; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 2519220f2ac9SHugh Dickins /* 2520220f2ac9SHugh Dickins * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 2521220f2ac9SHugh Dickins */ 2522220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 2523965c8e59SAndrew Morton pgoff_t index, pgoff_t end, int whence) 2524220f2ac9SHugh Dickins { 2525220f2ac9SHugh Dickins struct page *page; 2526220f2ac9SHugh Dickins struct pagevec pvec; 2527220f2ac9SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 2528220f2ac9SHugh Dickins bool done = false; 2529220f2ac9SHugh Dickins int i; 2530220f2ac9SHugh Dickins 2531220f2ac9SHugh Dickins pagevec_init(&pvec, 0); 2532220f2ac9SHugh Dickins pvec.nr = 1; /* start small: we may be there already */ 2533220f2ac9SHugh Dickins while (!done) { 25340cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 2535220f2ac9SHugh Dickins pvec.nr, pvec.pages, indices); 2536220f2ac9SHugh Dickins if (!pvec.nr) { 2537965c8e59SAndrew Morton if (whence == SEEK_DATA) 2538220f2ac9SHugh Dickins index = end; 2539220f2ac9SHugh Dickins break; 2540220f2ac9SHugh Dickins } 2541220f2ac9SHugh Dickins for (i = 0; i < pvec.nr; i++, index++) { 2542220f2ac9SHugh Dickins if (index < indices[i]) { 2543965c8e59SAndrew Morton if (whence == SEEK_HOLE) { 2544220f2ac9SHugh Dickins done = true; 2545220f2ac9SHugh Dickins break; 2546220f2ac9SHugh Dickins } 2547220f2ac9SHugh Dickins index = indices[i]; 2548220f2ac9SHugh Dickins } 2549220f2ac9SHugh Dickins page = pvec.pages[i]; 2550220f2ac9SHugh Dickins if (page && !radix_tree_exceptional_entry(page)) { 2551220f2ac9SHugh Dickins if (!PageUptodate(page)) 2552220f2ac9SHugh Dickins page = NULL; 2553220f2ac9SHugh Dickins } 2554220f2ac9SHugh Dickins if (index >= end || 2555965c8e59SAndrew Morton (page && whence == SEEK_DATA) || 2556965c8e59SAndrew Morton (!page && whence == SEEK_HOLE)) { 2557220f2ac9SHugh Dickins done = true; 2558220f2ac9SHugh Dickins break; 2559220f2ac9SHugh Dickins } 2560220f2ac9SHugh Dickins } 25610cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 2562220f2ac9SHugh Dickins pagevec_release(&pvec); 2563220f2ac9SHugh Dickins pvec.nr = PAGEVEC_SIZE; 2564220f2ac9SHugh Dickins cond_resched(); 2565220f2ac9SHugh Dickins } 2566220f2ac9SHugh Dickins return index; 2567220f2ac9SHugh Dickins } 2568220f2ac9SHugh Dickins 2569965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2570220f2ac9SHugh Dickins { 2571220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2572220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2573220f2ac9SHugh Dickins pgoff_t start, end; 2574220f2ac9SHugh Dickins loff_t new_offset; 2575220f2ac9SHugh Dickins 2576965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2577965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2578220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 25795955102cSAl Viro inode_lock(inode); 2580220f2ac9SHugh Dickins /* We're holding i_mutex so we can access i_size directly */ 2581220f2ac9SHugh Dickins 2582220f2ac9SHugh Dickins if (offset < 0) 2583220f2ac9SHugh Dickins offset = -EINVAL; 2584220f2ac9SHugh Dickins else if (offset >= inode->i_size) 2585220f2ac9SHugh Dickins offset = -ENXIO; 2586220f2ac9SHugh Dickins else { 258709cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 258809cbfeafSKirill A. Shutemov end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2589965c8e59SAndrew Morton new_offset = shmem_seek_hole_data(mapping, start, end, whence); 259009cbfeafSKirill A. Shutemov new_offset <<= PAGE_SHIFT; 2591220f2ac9SHugh Dickins if (new_offset > offset) { 2592220f2ac9SHugh Dickins if (new_offset < inode->i_size) 2593220f2ac9SHugh Dickins offset = new_offset; 2594965c8e59SAndrew Morton else if (whence == SEEK_DATA) 2595220f2ac9SHugh Dickins offset = -ENXIO; 2596220f2ac9SHugh Dickins else 2597220f2ac9SHugh Dickins offset = inode->i_size; 2598220f2ac9SHugh Dickins } 2599220f2ac9SHugh Dickins } 2600220f2ac9SHugh Dickins 2601387aae6fSHugh Dickins if (offset >= 0) 260246a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 26035955102cSAl Viro inode_unlock(inode); 2604220f2ac9SHugh Dickins return offset; 2605220f2ac9SHugh Dickins } 2606220f2ac9SHugh Dickins 260705f65b5cSDavid Herrmann /* 260805f65b5cSDavid Herrmann * We need a tag: a new tag would expand every radix_tree_node by 8 bytes, 260905f65b5cSDavid Herrmann * so reuse a tag which we firmly believe is never set or cleared on shmem. 261005f65b5cSDavid Herrmann */ 261105f65b5cSDavid Herrmann #define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE 261205f65b5cSDavid Herrmann #define LAST_SCAN 4 /* about 150ms max */ 261305f65b5cSDavid Herrmann 261405f65b5cSDavid Herrmann static void shmem_tag_pins(struct address_space *mapping) 261505f65b5cSDavid Herrmann { 261605f65b5cSDavid Herrmann struct radix_tree_iter iter; 261705f65b5cSDavid Herrmann void **slot; 261805f65b5cSDavid Herrmann pgoff_t start; 261905f65b5cSDavid Herrmann struct page *page; 262005f65b5cSDavid Herrmann 262105f65b5cSDavid Herrmann lru_add_drain(); 262205f65b5cSDavid Herrmann start = 0; 262305f65b5cSDavid Herrmann rcu_read_lock(); 262405f65b5cSDavid Herrmann 262505f65b5cSDavid Herrmann radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 262605f65b5cSDavid Herrmann page = radix_tree_deref_slot(slot); 262705f65b5cSDavid Herrmann if (!page || radix_tree_exception(page)) { 26282cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 26292cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 26302cf938aaSMatthew Wilcox continue; 26312cf938aaSMatthew Wilcox } 263205f65b5cSDavid Herrmann } else if (page_count(page) - page_mapcount(page) > 1) { 263305f65b5cSDavid Herrmann spin_lock_irq(&mapping->tree_lock); 263405f65b5cSDavid Herrmann radix_tree_tag_set(&mapping->page_tree, iter.index, 263505f65b5cSDavid Herrmann SHMEM_TAG_PINNED); 263605f65b5cSDavid Herrmann spin_unlock_irq(&mapping->tree_lock); 263705f65b5cSDavid Herrmann } 263805f65b5cSDavid Herrmann 263905f65b5cSDavid Herrmann if (need_resched()) { 2640148deab2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 264105f65b5cSDavid Herrmann cond_resched_rcu(); 264205f65b5cSDavid Herrmann } 264305f65b5cSDavid Herrmann } 264405f65b5cSDavid Herrmann rcu_read_unlock(); 264505f65b5cSDavid Herrmann } 264605f65b5cSDavid Herrmann 264705f65b5cSDavid Herrmann /* 264805f65b5cSDavid Herrmann * Setting SEAL_WRITE requires us to verify there's no pending writer. However, 264905f65b5cSDavid Herrmann * via get_user_pages(), drivers might have some pending I/O without any active 265005f65b5cSDavid Herrmann * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages 265105f65b5cSDavid Herrmann * and see whether it has an elevated ref-count. If so, we tag them and wait for 265205f65b5cSDavid Herrmann * them to be dropped. 265305f65b5cSDavid Herrmann * The caller must guarantee that no new user will acquire writable references 265405f65b5cSDavid Herrmann * to those pages to avoid races. 265505f65b5cSDavid Herrmann */ 265640e041a2SDavid Herrmann static int shmem_wait_for_pins(struct address_space *mapping) 265740e041a2SDavid Herrmann { 265805f65b5cSDavid Herrmann struct radix_tree_iter iter; 265905f65b5cSDavid Herrmann void **slot; 266005f65b5cSDavid Herrmann pgoff_t start; 266105f65b5cSDavid Herrmann struct page *page; 266205f65b5cSDavid Herrmann int error, scan; 266305f65b5cSDavid Herrmann 266405f65b5cSDavid Herrmann shmem_tag_pins(mapping); 266505f65b5cSDavid Herrmann 266605f65b5cSDavid Herrmann error = 0; 266705f65b5cSDavid Herrmann for (scan = 0; scan <= LAST_SCAN; scan++) { 266805f65b5cSDavid Herrmann if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED)) 266905f65b5cSDavid Herrmann break; 267005f65b5cSDavid Herrmann 267105f65b5cSDavid Herrmann if (!scan) 267205f65b5cSDavid Herrmann lru_add_drain_all(); 267305f65b5cSDavid Herrmann else if (schedule_timeout_killable((HZ << scan) / 200)) 267405f65b5cSDavid Herrmann scan = LAST_SCAN; 267505f65b5cSDavid Herrmann 267605f65b5cSDavid Herrmann start = 0; 267705f65b5cSDavid Herrmann rcu_read_lock(); 267805f65b5cSDavid Herrmann radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 267905f65b5cSDavid Herrmann start, SHMEM_TAG_PINNED) { 268005f65b5cSDavid Herrmann 268105f65b5cSDavid Herrmann page = radix_tree_deref_slot(slot); 268205f65b5cSDavid Herrmann if (radix_tree_exception(page)) { 26832cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 26842cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 26852cf938aaSMatthew Wilcox continue; 26862cf938aaSMatthew Wilcox } 268705f65b5cSDavid Herrmann 268805f65b5cSDavid Herrmann page = NULL; 268905f65b5cSDavid Herrmann } 269005f65b5cSDavid Herrmann 269105f65b5cSDavid Herrmann if (page && 269205f65b5cSDavid Herrmann page_count(page) - page_mapcount(page) != 1) { 269305f65b5cSDavid Herrmann if (scan < LAST_SCAN) 269405f65b5cSDavid Herrmann goto continue_resched; 269505f65b5cSDavid Herrmann 269605f65b5cSDavid Herrmann /* 269705f65b5cSDavid Herrmann * On the last scan, we clean up all those tags 269805f65b5cSDavid Herrmann * we inserted; but make a note that we still 269905f65b5cSDavid Herrmann * found pages pinned. 270005f65b5cSDavid Herrmann */ 270105f65b5cSDavid Herrmann error = -EBUSY; 270205f65b5cSDavid Herrmann } 270305f65b5cSDavid Herrmann 270405f65b5cSDavid Herrmann spin_lock_irq(&mapping->tree_lock); 270505f65b5cSDavid Herrmann radix_tree_tag_clear(&mapping->page_tree, 270605f65b5cSDavid Herrmann iter.index, SHMEM_TAG_PINNED); 270705f65b5cSDavid Herrmann spin_unlock_irq(&mapping->tree_lock); 270805f65b5cSDavid Herrmann continue_resched: 270905f65b5cSDavid Herrmann if (need_resched()) { 2710148deab2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 271105f65b5cSDavid Herrmann cond_resched_rcu(); 271205f65b5cSDavid Herrmann } 271305f65b5cSDavid Herrmann } 271405f65b5cSDavid Herrmann rcu_read_unlock(); 271505f65b5cSDavid Herrmann } 271605f65b5cSDavid Herrmann 271705f65b5cSDavid Herrmann return error; 271840e041a2SDavid Herrmann } 271940e041a2SDavid Herrmann 272040e041a2SDavid Herrmann #define F_ALL_SEALS (F_SEAL_SEAL | \ 272140e041a2SDavid Herrmann F_SEAL_SHRINK | \ 272240e041a2SDavid Herrmann F_SEAL_GROW | \ 272340e041a2SDavid Herrmann F_SEAL_WRITE) 272440e041a2SDavid Herrmann 272540e041a2SDavid Herrmann int shmem_add_seals(struct file *file, unsigned int seals) 272640e041a2SDavid Herrmann { 272740e041a2SDavid Herrmann struct inode *inode = file_inode(file); 272840e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 272940e041a2SDavid Herrmann int error; 273040e041a2SDavid Herrmann 273140e041a2SDavid Herrmann /* 273240e041a2SDavid Herrmann * SEALING 273340e041a2SDavid Herrmann * Sealing allows multiple parties to share a shmem-file but restrict 273440e041a2SDavid Herrmann * access to a specific subset of file operations. Seals can only be 273540e041a2SDavid Herrmann * added, but never removed. This way, mutually untrusted parties can 273640e041a2SDavid Herrmann * share common memory regions with a well-defined policy. A malicious 273740e041a2SDavid Herrmann * peer can thus never perform unwanted operations on a shared object. 273840e041a2SDavid Herrmann * 273940e041a2SDavid Herrmann * Seals are only supported on special shmem-files and always affect 274040e041a2SDavid Herrmann * the whole underlying inode. Once a seal is set, it may prevent some 274140e041a2SDavid Herrmann * kinds of access to the file. Currently, the following seals are 274240e041a2SDavid Herrmann * defined: 274340e041a2SDavid Herrmann * SEAL_SEAL: Prevent further seals from being set on this file 274440e041a2SDavid Herrmann * SEAL_SHRINK: Prevent the file from shrinking 274540e041a2SDavid Herrmann * SEAL_GROW: Prevent the file from growing 274640e041a2SDavid Herrmann * SEAL_WRITE: Prevent write access to the file 274740e041a2SDavid Herrmann * 274840e041a2SDavid Herrmann * As we don't require any trust relationship between two parties, we 274940e041a2SDavid Herrmann * must prevent seals from being removed. Therefore, sealing a file 275040e041a2SDavid Herrmann * only adds a given set of seals to the file, it never touches 275140e041a2SDavid Herrmann * existing seals. Furthermore, the "setting seals"-operation can be 275240e041a2SDavid Herrmann * sealed itself, which basically prevents any further seal from being 275340e041a2SDavid Herrmann * added. 275440e041a2SDavid Herrmann * 275540e041a2SDavid Herrmann * Semantics of sealing are only defined on volatile files. Only 275640e041a2SDavid Herrmann * anonymous shmem files support sealing. More importantly, seals are 275740e041a2SDavid Herrmann * never written to disk. Therefore, there's no plan to support it on 275840e041a2SDavid Herrmann * other file types. 275940e041a2SDavid Herrmann */ 276040e041a2SDavid Herrmann 276140e041a2SDavid Herrmann if (file->f_op != &shmem_file_operations) 276240e041a2SDavid Herrmann return -EINVAL; 276340e041a2SDavid Herrmann if (!(file->f_mode & FMODE_WRITE)) 276440e041a2SDavid Herrmann return -EPERM; 276540e041a2SDavid Herrmann if (seals & ~(unsigned int)F_ALL_SEALS) 276640e041a2SDavid Herrmann return -EINVAL; 276740e041a2SDavid Herrmann 27685955102cSAl Viro inode_lock(inode); 276940e041a2SDavid Herrmann 277040e041a2SDavid Herrmann if (info->seals & F_SEAL_SEAL) { 277140e041a2SDavid Herrmann error = -EPERM; 277240e041a2SDavid Herrmann goto unlock; 277340e041a2SDavid Herrmann } 277440e041a2SDavid Herrmann 277540e041a2SDavid Herrmann if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) { 277640e041a2SDavid Herrmann error = mapping_deny_writable(file->f_mapping); 277740e041a2SDavid Herrmann if (error) 277840e041a2SDavid Herrmann goto unlock; 277940e041a2SDavid Herrmann 278040e041a2SDavid Herrmann error = shmem_wait_for_pins(file->f_mapping); 278140e041a2SDavid Herrmann if (error) { 278240e041a2SDavid Herrmann mapping_allow_writable(file->f_mapping); 278340e041a2SDavid Herrmann goto unlock; 278440e041a2SDavid Herrmann } 278540e041a2SDavid Herrmann } 278640e041a2SDavid Herrmann 278740e041a2SDavid Herrmann info->seals |= seals; 278840e041a2SDavid Herrmann error = 0; 278940e041a2SDavid Herrmann 279040e041a2SDavid Herrmann unlock: 27915955102cSAl Viro inode_unlock(inode); 279240e041a2SDavid Herrmann return error; 279340e041a2SDavid Herrmann } 279440e041a2SDavid Herrmann EXPORT_SYMBOL_GPL(shmem_add_seals); 279540e041a2SDavid Herrmann 279640e041a2SDavid Herrmann int shmem_get_seals(struct file *file) 279740e041a2SDavid Herrmann { 279840e041a2SDavid Herrmann if (file->f_op != &shmem_file_operations) 279940e041a2SDavid Herrmann return -EINVAL; 280040e041a2SDavid Herrmann 280140e041a2SDavid Herrmann return SHMEM_I(file_inode(file))->seals; 280240e041a2SDavid Herrmann } 280340e041a2SDavid Herrmann EXPORT_SYMBOL_GPL(shmem_get_seals); 280440e041a2SDavid Herrmann 280540e041a2SDavid Herrmann long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg) 280640e041a2SDavid Herrmann { 280740e041a2SDavid Herrmann long error; 280840e041a2SDavid Herrmann 280940e041a2SDavid Herrmann switch (cmd) { 281040e041a2SDavid Herrmann case F_ADD_SEALS: 281140e041a2SDavid Herrmann /* disallow upper 32bit */ 281240e041a2SDavid Herrmann if (arg > UINT_MAX) 281340e041a2SDavid Herrmann return -EINVAL; 281440e041a2SDavid Herrmann 281540e041a2SDavid Herrmann error = shmem_add_seals(file, arg); 281640e041a2SDavid Herrmann break; 281740e041a2SDavid Herrmann case F_GET_SEALS: 281840e041a2SDavid Herrmann error = shmem_get_seals(file); 281940e041a2SDavid Herrmann break; 282040e041a2SDavid Herrmann default: 282140e041a2SDavid Herrmann error = -EINVAL; 282240e041a2SDavid Herrmann break; 282340e041a2SDavid Herrmann } 282440e041a2SDavid Herrmann 282540e041a2SDavid Herrmann return error; 282640e041a2SDavid Herrmann } 282740e041a2SDavid Herrmann 282883e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 282983e4fa9cSHugh Dickins loff_t len) 283083e4fa9cSHugh Dickins { 2831496ad9aaSAl Viro struct inode *inode = file_inode(file); 2832e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 283340e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 28341aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2835e2d12e22SHugh Dickins pgoff_t start, index, end; 2836e2d12e22SHugh Dickins int error; 283783e4fa9cSHugh Dickins 283813ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 283913ace4d0SHugh Dickins return -EOPNOTSUPP; 284013ace4d0SHugh Dickins 28415955102cSAl Viro inode_lock(inode); 284283e4fa9cSHugh Dickins 284383e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 284483e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 284583e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 284683e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 28478e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 284883e4fa9cSHugh Dickins 284940e041a2SDavid Herrmann /* protected by i_mutex */ 285040e041a2SDavid Herrmann if (info->seals & F_SEAL_WRITE) { 285140e041a2SDavid Herrmann error = -EPERM; 285240e041a2SDavid Herrmann goto out; 285340e041a2SDavid Herrmann } 285440e041a2SDavid Herrmann 28558e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2856f00cdc6dSHugh Dickins shmem_falloc.start = unmap_start >> PAGE_SHIFT; 2857f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2858f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2859f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2860f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2861f00cdc6dSHugh Dickins 286283e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 286383e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 286483e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 286583e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 286683e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 28678e205f77SHugh Dickins 28688e205f77SHugh Dickins spin_lock(&inode->i_lock); 28698e205f77SHugh Dickins inode->i_private = NULL; 28708e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 28712055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 28728e205f77SHugh Dickins spin_unlock(&inode->i_lock); 287383e4fa9cSHugh Dickins error = 0; 28748e205f77SHugh Dickins goto out; 287583e4fa9cSHugh Dickins } 287683e4fa9cSHugh Dickins 2877e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2878e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2879e2d12e22SHugh Dickins if (error) 2880e2d12e22SHugh Dickins goto out; 2881e2d12e22SHugh Dickins 288240e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 288340e041a2SDavid Herrmann error = -EPERM; 288440e041a2SDavid Herrmann goto out; 288540e041a2SDavid Herrmann } 288640e041a2SDavid Herrmann 288709cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 288809cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2889e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2890e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2891e2d12e22SHugh Dickins error = -ENOSPC; 2892e2d12e22SHugh Dickins goto out; 2893e2d12e22SHugh Dickins } 2894e2d12e22SHugh Dickins 28958e205f77SHugh Dickins shmem_falloc.waitq = NULL; 28961aac1400SHugh Dickins shmem_falloc.start = start; 28971aac1400SHugh Dickins shmem_falloc.next = start; 28981aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 28991aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 29001aac1400SHugh Dickins spin_lock(&inode->i_lock); 29011aac1400SHugh Dickins inode->i_private = &shmem_falloc; 29021aac1400SHugh Dickins spin_unlock(&inode->i_lock); 29031aac1400SHugh Dickins 2904e2d12e22SHugh Dickins for (index = start; index < end; index++) { 2905e2d12e22SHugh Dickins struct page *page; 2906e2d12e22SHugh Dickins 2907e2d12e22SHugh Dickins /* 2908e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2909e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2910e2d12e22SHugh Dickins */ 2911e2d12e22SHugh Dickins if (signal_pending(current)) 2912e2d12e22SHugh Dickins error = -EINTR; 29131aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 29141aac1400SHugh Dickins error = -ENOMEM; 2915e2d12e22SHugh Dickins else 29169e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2917e2d12e22SHugh Dickins if (error) { 29181635f6a7SHugh Dickins /* Remove the !PageUptodate pages we added */ 29197f556567SHugh Dickins if (index > start) { 29201635f6a7SHugh Dickins shmem_undo_range(inode, 292109cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2922b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 29237f556567SHugh Dickins } 29241aac1400SHugh Dickins goto undone; 2925e2d12e22SHugh Dickins } 2926e2d12e22SHugh Dickins 2927e2d12e22SHugh Dickins /* 29281aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 29291aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 29301aac1400SHugh Dickins */ 29311aac1400SHugh Dickins shmem_falloc.next++; 29321aac1400SHugh Dickins if (!PageUptodate(page)) 29331aac1400SHugh Dickins shmem_falloc.nr_falloced++; 29341aac1400SHugh Dickins 29351aac1400SHugh Dickins /* 29361635f6a7SHugh Dickins * If !PageUptodate, leave it that way so that freeable pages 29371635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 29381635f6a7SHugh Dickins * But set_page_dirty so that memory pressure will swap rather 2939e2d12e22SHugh Dickins * than free the pages we are allocating (and SGP_CACHE pages 2940e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2941e2d12e22SHugh Dickins */ 2942e2d12e22SHugh Dickins set_page_dirty(page); 2943e2d12e22SHugh Dickins unlock_page(page); 294409cbfeafSKirill A. Shutemov put_page(page); 2945e2d12e22SHugh Dickins cond_resched(); 2946e2d12e22SHugh Dickins } 2947e2d12e22SHugh Dickins 2948e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2949e2d12e22SHugh Dickins i_size_write(inode, offset + len); 2950078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 29511aac1400SHugh Dickins undone: 29521aac1400SHugh Dickins spin_lock(&inode->i_lock); 29531aac1400SHugh Dickins inode->i_private = NULL; 29541aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2955e2d12e22SHugh Dickins out: 29565955102cSAl Viro inode_unlock(inode); 295783e4fa9cSHugh Dickins return error; 295883e4fa9cSHugh Dickins } 295983e4fa9cSHugh Dickins 2960726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 29611da177e4SLinus Torvalds { 2962726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 29631da177e4SLinus Torvalds 29641da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 296509cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 29661da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 29670edd73b3SHugh Dickins if (sbinfo->max_blocks) { 29681da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 296941ffe5d5SHugh Dickins buf->f_bavail = 297041ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 297141ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 29720edd73b3SHugh Dickins } 29730edd73b3SHugh Dickins if (sbinfo->max_inodes) { 29741da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 29751da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 29761da177e4SLinus Torvalds } 29771da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 29781da177e4SLinus Torvalds return 0; 29791da177e4SLinus Torvalds } 29801da177e4SLinus Torvalds 29811da177e4SLinus Torvalds /* 29821da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 29831da177e4SLinus Torvalds */ 29841da177e4SLinus Torvalds static int 29851a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 29861da177e4SLinus Torvalds { 29870b0a0806SHugh Dickins struct inode *inode; 29881da177e4SLinus Torvalds int error = -ENOSPC; 29891da177e4SLinus Torvalds 2990454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 29911da177e4SLinus Torvalds if (inode) { 2992feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2993feda821eSChristoph Hellwig if (error) 2994feda821eSChristoph Hellwig goto out_iput; 29952a7dba39SEric Paris error = security_inode_init_security(inode, dir, 29969d8f13baSMimi Zohar &dentry->d_name, 29976d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2998feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2999feda821eSChristoph Hellwig goto out_iput; 300037ec43cdSMimi Zohar 3001718deb6bSAl Viro error = 0; 30021da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3003078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 30041da177e4SLinus Torvalds d_instantiate(dentry, inode); 30051da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 30061da177e4SLinus Torvalds } 30071da177e4SLinus Torvalds return error; 3008feda821eSChristoph Hellwig out_iput: 3009feda821eSChristoph Hellwig iput(inode); 3010feda821eSChristoph Hellwig return error; 30111da177e4SLinus Torvalds } 30121da177e4SLinus Torvalds 301360545d0dSAl Viro static int 301460545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 301560545d0dSAl Viro { 301660545d0dSAl Viro struct inode *inode; 301760545d0dSAl Viro int error = -ENOSPC; 301860545d0dSAl Viro 301960545d0dSAl Viro inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 302060545d0dSAl Viro if (inode) { 302160545d0dSAl Viro error = security_inode_init_security(inode, dir, 302260545d0dSAl Viro NULL, 302360545d0dSAl Viro shmem_initxattrs, NULL); 3024feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 3025feda821eSChristoph Hellwig goto out_iput; 3026feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 3027feda821eSChristoph Hellwig if (error) 3028feda821eSChristoph Hellwig goto out_iput; 302960545d0dSAl Viro d_tmpfile(dentry, inode); 303060545d0dSAl Viro } 303160545d0dSAl Viro return error; 3032feda821eSChristoph Hellwig out_iput: 3033feda821eSChristoph Hellwig iput(inode); 3034feda821eSChristoph Hellwig return error; 303560545d0dSAl Viro } 303660545d0dSAl Viro 303718bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 30381da177e4SLinus Torvalds { 30391da177e4SLinus Torvalds int error; 30401da177e4SLinus Torvalds 30411da177e4SLinus Torvalds if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 30421da177e4SLinus Torvalds return error; 3043d8c76e6fSDave Hansen inc_nlink(dir); 30441da177e4SLinus Torvalds return 0; 30451da177e4SLinus Torvalds } 30461da177e4SLinus Torvalds 30474acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 3048ebfc3b49SAl Viro bool excl) 30491da177e4SLinus Torvalds { 30501da177e4SLinus Torvalds return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 30511da177e4SLinus Torvalds } 30521da177e4SLinus Torvalds 30531da177e4SLinus Torvalds /* 30541da177e4SLinus Torvalds * Link a file.. 30551da177e4SLinus Torvalds */ 30561da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 30571da177e4SLinus Torvalds { 305875c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 30595b04c689SPavel Emelyanov int ret; 30601da177e4SLinus Torvalds 30611da177e4SLinus Torvalds /* 30621da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 30631da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 30641da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 30651da177e4SLinus Torvalds */ 30665b04c689SPavel Emelyanov ret = shmem_reserve_inode(inode->i_sb); 30675b04c689SPavel Emelyanov if (ret) 30685b04c689SPavel Emelyanov goto out; 30691da177e4SLinus Torvalds 30701da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3071078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3072d8c76e6fSDave Hansen inc_nlink(inode); 30737de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 30741da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 30751da177e4SLinus Torvalds d_instantiate(dentry, inode); 30765b04c689SPavel Emelyanov out: 30775b04c689SPavel Emelyanov return ret; 30781da177e4SLinus Torvalds } 30791da177e4SLinus Torvalds 30801da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 30811da177e4SLinus Torvalds { 308275c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 30831da177e4SLinus Torvalds 30845b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 30855b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 30861da177e4SLinus Torvalds 30871da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 3088078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 30899a53c3a7SDave Hansen drop_nlink(inode); 30901da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 30911da177e4SLinus Torvalds return 0; 30921da177e4SLinus Torvalds } 30931da177e4SLinus Torvalds 30941da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 30951da177e4SLinus Torvalds { 30961da177e4SLinus Torvalds if (!simple_empty(dentry)) 30971da177e4SLinus Torvalds return -ENOTEMPTY; 30981da177e4SLinus Torvalds 309975c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 31009a53c3a7SDave Hansen drop_nlink(dir); 31011da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 31021da177e4SLinus Torvalds } 31031da177e4SLinus Torvalds 310437456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 310537456771SMiklos Szeredi { 3106e36cb0b8SDavid Howells bool old_is_dir = d_is_dir(old_dentry); 3107e36cb0b8SDavid Howells bool new_is_dir = d_is_dir(new_dentry); 310837456771SMiklos Szeredi 310937456771SMiklos Szeredi if (old_dir != new_dir && old_is_dir != new_is_dir) { 311037456771SMiklos Szeredi if (old_is_dir) { 311137456771SMiklos Szeredi drop_nlink(old_dir); 311237456771SMiklos Szeredi inc_nlink(new_dir); 311337456771SMiklos Szeredi } else { 311437456771SMiklos Szeredi drop_nlink(new_dir); 311537456771SMiklos Szeredi inc_nlink(old_dir); 311637456771SMiklos Szeredi } 311737456771SMiklos Szeredi } 311837456771SMiklos Szeredi old_dir->i_ctime = old_dir->i_mtime = 311937456771SMiklos Szeredi new_dir->i_ctime = new_dir->i_mtime = 312075c3cfa8SDavid Howells d_inode(old_dentry)->i_ctime = 3121078cd827SDeepa Dinamani d_inode(new_dentry)->i_ctime = current_time(old_dir); 312237456771SMiklos Szeredi 312337456771SMiklos Szeredi return 0; 312437456771SMiklos Szeredi } 312537456771SMiklos Szeredi 312646fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) 312746fdb794SMiklos Szeredi { 312846fdb794SMiklos Szeredi struct dentry *whiteout; 312946fdb794SMiklos Szeredi int error; 313046fdb794SMiklos Szeredi 313146fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 313246fdb794SMiklos Szeredi if (!whiteout) 313346fdb794SMiklos Szeredi return -ENOMEM; 313446fdb794SMiklos Szeredi 313546fdb794SMiklos Szeredi error = shmem_mknod(old_dir, whiteout, 313646fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 313746fdb794SMiklos Szeredi dput(whiteout); 313846fdb794SMiklos Szeredi if (error) 313946fdb794SMiklos Szeredi return error; 314046fdb794SMiklos Szeredi 314146fdb794SMiklos Szeredi /* 314246fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 314346fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 314446fdb794SMiklos Szeredi * 314546fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 314646fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 314746fdb794SMiklos Szeredi */ 314846fdb794SMiklos Szeredi d_rehash(whiteout); 314946fdb794SMiklos Szeredi return 0; 315046fdb794SMiklos Szeredi } 315146fdb794SMiklos Szeredi 31521da177e4SLinus Torvalds /* 31531da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 31541da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 31551da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 31561da177e4SLinus Torvalds * gets overwritten. 31571da177e4SLinus Torvalds */ 31583b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) 31591da177e4SLinus Torvalds { 316075c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 31611da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 31621da177e4SLinus Torvalds 316346fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 31643b69ff51SMiklos Szeredi return -EINVAL; 31653b69ff51SMiklos Szeredi 316637456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 316737456771SMiklos Szeredi return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 316837456771SMiklos Szeredi 31691da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 31701da177e4SLinus Torvalds return -ENOTEMPTY; 31711da177e4SLinus Torvalds 317246fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 317346fdb794SMiklos Szeredi int error; 317446fdb794SMiklos Szeredi 317546fdb794SMiklos Szeredi error = shmem_whiteout(old_dir, old_dentry); 317646fdb794SMiklos Szeredi if (error) 317746fdb794SMiklos Szeredi return error; 317846fdb794SMiklos Szeredi } 317946fdb794SMiklos Szeredi 318075c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 31811da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 3182b928095bSMiklos Szeredi if (they_are_dirs) { 318375c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 31849a53c3a7SDave Hansen drop_nlink(old_dir); 3185b928095bSMiklos Szeredi } 31861da177e4SLinus Torvalds } else if (they_are_dirs) { 31879a53c3a7SDave Hansen drop_nlink(old_dir); 3188d8c76e6fSDave Hansen inc_nlink(new_dir); 31891da177e4SLinus Torvalds } 31901da177e4SLinus Torvalds 31911da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 31921da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 31931da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 31941da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 3195078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 31961da177e4SLinus Torvalds return 0; 31971da177e4SLinus Torvalds } 31981da177e4SLinus Torvalds 31991da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 32001da177e4SLinus Torvalds { 32011da177e4SLinus Torvalds int error; 32021da177e4SLinus Torvalds int len; 32031da177e4SLinus Torvalds struct inode *inode; 32049276aad6SHugh Dickins struct page *page; 32051da177e4SLinus Torvalds struct shmem_inode_info *info; 32061da177e4SLinus Torvalds 32071da177e4SLinus Torvalds len = strlen(symname) + 1; 320809cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 32091da177e4SLinus Torvalds return -ENAMETOOLONG; 32101da177e4SLinus Torvalds 3211454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 32121da177e4SLinus Torvalds if (!inode) 32131da177e4SLinus Torvalds return -ENOSPC; 32141da177e4SLinus Torvalds 32159d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 32166d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3217570bc1c2SStephen Smalley if (error) { 3218570bc1c2SStephen Smalley if (error != -EOPNOTSUPP) { 3219570bc1c2SStephen Smalley iput(inode); 3220570bc1c2SStephen Smalley return error; 3221570bc1c2SStephen Smalley } 3222570bc1c2SStephen Smalley error = 0; 3223570bc1c2SStephen Smalley } 3224570bc1c2SStephen Smalley 32251da177e4SLinus Torvalds info = SHMEM_I(inode); 32261da177e4SLinus Torvalds inode->i_size = len-1; 322769f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 32283ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 32293ed47db3SAl Viro if (!inode->i_link) { 323069f07ec9SHugh Dickins iput(inode); 323169f07ec9SHugh Dickins return -ENOMEM; 323269f07ec9SHugh Dickins } 323369f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 32341da177e4SLinus Torvalds } else { 3235e8ecde25SAl Viro inode_nohighmem(inode); 32369e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_WRITE); 32371da177e4SLinus Torvalds if (error) { 32381da177e4SLinus Torvalds iput(inode); 32391da177e4SLinus Torvalds return error; 32401da177e4SLinus Torvalds } 324114fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 32421da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 324321fc61c7SAl Viro memcpy(page_address(page), symname, len); 3244ec9516fbSHugh Dickins SetPageUptodate(page); 32451da177e4SLinus Torvalds set_page_dirty(page); 32466746aff7SWu Fengguang unlock_page(page); 324709cbfeafSKirill A. Shutemov put_page(page); 32481da177e4SLinus Torvalds } 32491da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3250078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 32511da177e4SLinus Torvalds d_instantiate(dentry, inode); 32521da177e4SLinus Torvalds dget(dentry); 32531da177e4SLinus Torvalds return 0; 32541da177e4SLinus Torvalds } 32551da177e4SLinus Torvalds 3256fceef393SAl Viro static void shmem_put_link(void *arg) 3257fceef393SAl Viro { 3258fceef393SAl Viro mark_page_accessed(arg); 3259fceef393SAl Viro put_page(arg); 3260fceef393SAl Viro } 3261fceef393SAl Viro 32626b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3263fceef393SAl Viro struct inode *inode, 3264fceef393SAl Viro struct delayed_call *done) 32651da177e4SLinus Torvalds { 32661da177e4SLinus Torvalds struct page *page = NULL; 32676b255391SAl Viro int error; 32686a6c9904SAl Viro if (!dentry) { 32696a6c9904SAl Viro page = find_get_page(inode->i_mapping, 0); 32706a6c9904SAl Viro if (!page) 32716b255391SAl Viro return ERR_PTR(-ECHILD); 32726a6c9904SAl Viro if (!PageUptodate(page)) { 32736a6c9904SAl Viro put_page(page); 32746a6c9904SAl Viro return ERR_PTR(-ECHILD); 32756a6c9904SAl Viro } 32766a6c9904SAl Viro } else { 32779e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_READ); 3278680baacbSAl Viro if (error) 3279680baacbSAl Viro return ERR_PTR(error); 3280d3602444SHugh Dickins unlock_page(page); 32811da177e4SLinus Torvalds } 3282fceef393SAl Viro set_delayed_call(done, shmem_put_link, page); 328321fc61c7SAl Viro return page_address(page); 32841da177e4SLinus Torvalds } 32851da177e4SLinus Torvalds 3286b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3287b09e0fa4SEric Paris /* 3288b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3289b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3290b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3291b09e0fa4SEric Paris * filesystem level, though. 3292b09e0fa4SEric Paris */ 3293b09e0fa4SEric Paris 32946d9d88d0SJarkko Sakkinen /* 32956d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 32966d9d88d0SJarkko Sakkinen */ 32976d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 32986d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 32996d9d88d0SJarkko Sakkinen void *fs_info) 33006d9d88d0SJarkko Sakkinen { 33016d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 33026d9d88d0SJarkko Sakkinen const struct xattr *xattr; 330338f38657SAristeu Rozanski struct simple_xattr *new_xattr; 33046d9d88d0SJarkko Sakkinen size_t len; 33056d9d88d0SJarkko Sakkinen 33066d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 330738f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 33086d9d88d0SJarkko Sakkinen if (!new_xattr) 33096d9d88d0SJarkko Sakkinen return -ENOMEM; 33106d9d88d0SJarkko Sakkinen 33116d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 33126d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 33136d9d88d0SJarkko Sakkinen GFP_KERNEL); 33146d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 33156d9d88d0SJarkko Sakkinen kfree(new_xattr); 33166d9d88d0SJarkko Sakkinen return -ENOMEM; 33176d9d88d0SJarkko Sakkinen } 33186d9d88d0SJarkko Sakkinen 33196d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 33206d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 33216d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 33226d9d88d0SJarkko Sakkinen xattr->name, len); 33236d9d88d0SJarkko Sakkinen 332438f38657SAristeu Rozanski simple_xattr_list_add(&info->xattrs, new_xattr); 33256d9d88d0SJarkko Sakkinen } 33266d9d88d0SJarkko Sakkinen 33276d9d88d0SJarkko Sakkinen return 0; 33286d9d88d0SJarkko Sakkinen } 33296d9d88d0SJarkko Sakkinen 3330aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3331b296821aSAl Viro struct dentry *unused, struct inode *inode, 3332b296821aSAl Viro const char *name, void *buffer, size_t size) 3333aa7c5241SAndreas Gruenbacher { 3334b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3335aa7c5241SAndreas Gruenbacher 3336aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3337aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3338aa7c5241SAndreas Gruenbacher } 3339aa7c5241SAndreas Gruenbacher 3340aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 334159301226SAl Viro struct dentry *unused, struct inode *inode, 334259301226SAl Viro const char *name, const void *value, 334359301226SAl Viro size_t size, int flags) 3344aa7c5241SAndreas Gruenbacher { 334559301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3346aa7c5241SAndreas Gruenbacher 3347aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3348aa7c5241SAndreas Gruenbacher return simple_xattr_set(&info->xattrs, name, value, size, flags); 3349aa7c5241SAndreas Gruenbacher } 3350aa7c5241SAndreas Gruenbacher 3351aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3352aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3353aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3354aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3355aa7c5241SAndreas Gruenbacher }; 3356aa7c5241SAndreas Gruenbacher 3357aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3358aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3359aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3360aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3361aa7c5241SAndreas Gruenbacher }; 3362aa7c5241SAndreas Gruenbacher 3363b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3364b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3365feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3366feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3367b09e0fa4SEric Paris #endif 3368aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3369aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3370b09e0fa4SEric Paris NULL 3371b09e0fa4SEric Paris }; 3372b09e0fa4SEric Paris 3373b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3374b09e0fa4SEric Paris { 337575c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3376786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3377b09e0fa4SEric Paris } 3378b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3379b09e0fa4SEric Paris 338069f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 33816b255391SAl Viro .get_link = simple_get_link, 3382b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3383b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3384b09e0fa4SEric Paris #endif 33851da177e4SLinus Torvalds }; 33861da177e4SLinus Torvalds 338792e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 33886b255391SAl Viro .get_link = shmem_get_link, 3389b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3390b09e0fa4SEric Paris .listxattr = shmem_listxattr, 339139f0247dSAndreas Gruenbacher #endif 3392b09e0fa4SEric Paris }; 339339f0247dSAndreas Gruenbacher 339491828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 339591828a40SDavid M. Grimes { 339691828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 339791828a40SDavid M. Grimes } 339891828a40SDavid M. Grimes 339991828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 340091828a40SDavid M. Grimes { 340191828a40SDavid M. Grimes __u32 *fh = vfh; 340291828a40SDavid M. Grimes __u64 inum = fh[2]; 340391828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 340491828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 340591828a40SDavid M. Grimes } 340691828a40SDavid M. Grimes 3407480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3408480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 340991828a40SDavid M. Grimes { 341091828a40SDavid M. Grimes struct inode *inode; 3411480b116cSChristoph Hellwig struct dentry *dentry = NULL; 341235c2a7f4SHugh Dickins u64 inum; 341391828a40SDavid M. Grimes 3414480b116cSChristoph Hellwig if (fh_len < 3) 3415480b116cSChristoph Hellwig return NULL; 3416480b116cSChristoph Hellwig 341735c2a7f4SHugh Dickins inum = fid->raw[2]; 341835c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 341935c2a7f4SHugh Dickins 3420480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3421480b116cSChristoph Hellwig shmem_match, fid->raw); 342291828a40SDavid M. Grimes if (inode) { 3423480b116cSChristoph Hellwig dentry = d_find_alias(inode); 342491828a40SDavid M. Grimes iput(inode); 342591828a40SDavid M. Grimes } 342691828a40SDavid M. Grimes 3427480b116cSChristoph Hellwig return dentry; 342891828a40SDavid M. Grimes } 342991828a40SDavid M. Grimes 3430b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3431b0b0382bSAl Viro struct inode *parent) 343291828a40SDavid M. Grimes { 34335fe0c237SAneesh Kumar K.V if (*len < 3) { 34345fe0c237SAneesh Kumar K.V *len = 3; 343594e07a75SNamjae Jeon return FILEID_INVALID; 34365fe0c237SAneesh Kumar K.V } 343791828a40SDavid M. Grimes 34381d3382cbSAl Viro if (inode_unhashed(inode)) { 343991828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 344091828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 344191828a40SDavid M. Grimes * time, we need a lock to ensure we only try 344291828a40SDavid M. Grimes * to do it once 344391828a40SDavid M. Grimes */ 344491828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 344591828a40SDavid M. Grimes spin_lock(&lock); 34461d3382cbSAl Viro if (inode_unhashed(inode)) 344791828a40SDavid M. Grimes __insert_inode_hash(inode, 344891828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 344991828a40SDavid M. Grimes spin_unlock(&lock); 345091828a40SDavid M. Grimes } 345191828a40SDavid M. Grimes 345291828a40SDavid M. Grimes fh[0] = inode->i_generation; 345391828a40SDavid M. Grimes fh[1] = inode->i_ino; 345491828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 345591828a40SDavid M. Grimes 345691828a40SDavid M. Grimes *len = 3; 345791828a40SDavid M. Grimes return 1; 345891828a40SDavid M. Grimes } 345991828a40SDavid M. Grimes 346039655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 346191828a40SDavid M. Grimes .get_parent = shmem_get_parent, 346291828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3463480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 346491828a40SDavid M. Grimes }; 346591828a40SDavid M. Grimes 3466680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 3467680d794bSakpm@linux-foundation.org bool remount) 34681da177e4SLinus Torvalds { 34691da177e4SLinus Torvalds char *this_char, *value, *rest; 347049cd0a5cSGreg Thelen struct mempolicy *mpol = NULL; 34718751e039SEric W. Biederman uid_t uid; 34728751e039SEric W. Biederman gid_t gid; 34731da177e4SLinus Torvalds 3474b00dc3adSHugh Dickins while (options != NULL) { 3475b00dc3adSHugh Dickins this_char = options; 3476b00dc3adSHugh Dickins for (;;) { 3477b00dc3adSHugh Dickins /* 3478b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3479b00dc3adSHugh Dickins * mount options form a comma-separated list, 3480b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3481b00dc3adSHugh Dickins */ 3482b00dc3adSHugh Dickins options = strchr(options, ','); 3483b00dc3adSHugh Dickins if (options == NULL) 3484b00dc3adSHugh Dickins break; 3485b00dc3adSHugh Dickins options++; 3486b00dc3adSHugh Dickins if (!isdigit(*options)) { 3487b00dc3adSHugh Dickins options[-1] = '\0'; 3488b00dc3adSHugh Dickins break; 3489b00dc3adSHugh Dickins } 3490b00dc3adSHugh Dickins } 34911da177e4SLinus Torvalds if (!*this_char) 34921da177e4SLinus Torvalds continue; 34931da177e4SLinus Torvalds if ((value = strchr(this_char,'=')) != NULL) { 34941da177e4SLinus Torvalds *value++ = 0; 34951da177e4SLinus Torvalds } else { 34961170532bSJoe Perches pr_err("tmpfs: No value for mount option '%s'\n", 34971da177e4SLinus Torvalds this_char); 349849cd0a5cSGreg Thelen goto error; 34991da177e4SLinus Torvalds } 35001da177e4SLinus Torvalds 35011da177e4SLinus Torvalds if (!strcmp(this_char,"size")) { 35021da177e4SLinus Torvalds unsigned long long size; 35031da177e4SLinus Torvalds size = memparse(value,&rest); 35041da177e4SLinus Torvalds if (*rest == '%') { 35051da177e4SLinus Torvalds size <<= PAGE_SHIFT; 35061da177e4SLinus Torvalds size *= totalram_pages; 35071da177e4SLinus Torvalds do_div(size, 100); 35081da177e4SLinus Torvalds rest++; 35091da177e4SLinus Torvalds } 35101da177e4SLinus Torvalds if (*rest) 35111da177e4SLinus Torvalds goto bad_val; 3512680d794bSakpm@linux-foundation.org sbinfo->max_blocks = 351309cbfeafSKirill A. Shutemov DIV_ROUND_UP(size, PAGE_SIZE); 35141da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_blocks")) { 3515680d794bSakpm@linux-foundation.org sbinfo->max_blocks = memparse(value, &rest); 35161da177e4SLinus Torvalds if (*rest) 35171da177e4SLinus Torvalds goto bad_val; 35181da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_inodes")) { 3519680d794bSakpm@linux-foundation.org sbinfo->max_inodes = memparse(value, &rest); 35201da177e4SLinus Torvalds if (*rest) 35211da177e4SLinus Torvalds goto bad_val; 35221da177e4SLinus Torvalds } else if (!strcmp(this_char,"mode")) { 3523680d794bSakpm@linux-foundation.org if (remount) 35241da177e4SLinus Torvalds continue; 3525680d794bSakpm@linux-foundation.org sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 35261da177e4SLinus Torvalds if (*rest) 35271da177e4SLinus Torvalds goto bad_val; 35281da177e4SLinus Torvalds } else if (!strcmp(this_char,"uid")) { 3529680d794bSakpm@linux-foundation.org if (remount) 35301da177e4SLinus Torvalds continue; 35318751e039SEric W. Biederman uid = simple_strtoul(value, &rest, 0); 35321da177e4SLinus Torvalds if (*rest) 35331da177e4SLinus Torvalds goto bad_val; 35348751e039SEric W. Biederman sbinfo->uid = make_kuid(current_user_ns(), uid); 35358751e039SEric W. Biederman if (!uid_valid(sbinfo->uid)) 35368751e039SEric W. Biederman goto bad_val; 35371da177e4SLinus Torvalds } else if (!strcmp(this_char,"gid")) { 3538680d794bSakpm@linux-foundation.org if (remount) 35391da177e4SLinus Torvalds continue; 35408751e039SEric W. Biederman gid = simple_strtoul(value, &rest, 0); 35411da177e4SLinus Torvalds if (*rest) 35421da177e4SLinus Torvalds goto bad_val; 35438751e039SEric W. Biederman sbinfo->gid = make_kgid(current_user_ns(), gid); 35448751e039SEric W. Biederman if (!gid_valid(sbinfo->gid)) 35458751e039SEric W. Biederman goto bad_val; 3546e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 35475a6e75f8SKirill A. Shutemov } else if (!strcmp(this_char, "huge")) { 35485a6e75f8SKirill A. Shutemov int huge; 35495a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(value); 35505a6e75f8SKirill A. Shutemov if (huge < 0) 35515a6e75f8SKirill A. Shutemov goto bad_val; 35525a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 35535a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER) 35545a6e75f8SKirill A. Shutemov goto bad_val; 35555a6e75f8SKirill A. Shutemov sbinfo->huge = huge; 35565a6e75f8SKirill A. Shutemov #endif 35575a6e75f8SKirill A. Shutemov #ifdef CONFIG_NUMA 35587339ff83SRobin Holt } else if (!strcmp(this_char,"mpol")) { 355949cd0a5cSGreg Thelen mpol_put(mpol); 356049cd0a5cSGreg Thelen mpol = NULL; 356149cd0a5cSGreg Thelen if (mpol_parse_str(value, &mpol)) 35627339ff83SRobin Holt goto bad_val; 35635a6e75f8SKirill A. Shutemov #endif 35641da177e4SLinus Torvalds } else { 35651170532bSJoe Perches pr_err("tmpfs: Bad mount option %s\n", this_char); 356649cd0a5cSGreg Thelen goto error; 35671da177e4SLinus Torvalds } 35681da177e4SLinus Torvalds } 356949cd0a5cSGreg Thelen sbinfo->mpol = mpol; 35701da177e4SLinus Torvalds return 0; 35711da177e4SLinus Torvalds 35721da177e4SLinus Torvalds bad_val: 35731170532bSJoe Perches pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", 35741da177e4SLinus Torvalds value, this_char); 357549cd0a5cSGreg Thelen error: 357649cd0a5cSGreg Thelen mpol_put(mpol); 35771da177e4SLinus Torvalds return 1; 35781da177e4SLinus Torvalds 35791da177e4SLinus Torvalds } 35801da177e4SLinus Torvalds 35811da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 35821da177e4SLinus Torvalds { 35831da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3584680d794bSakpm@linux-foundation.org struct shmem_sb_info config = *sbinfo; 35850edd73b3SHugh Dickins unsigned long inodes; 35860edd73b3SHugh Dickins int error = -EINVAL; 35871da177e4SLinus Torvalds 35885f00110fSGreg Thelen config.mpol = NULL; 3589680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, &config, true)) 35900edd73b3SHugh Dickins return error; 35910edd73b3SHugh Dickins 35920edd73b3SHugh Dickins spin_lock(&sbinfo->stat_lock); 35930edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 35947e496299STim Chen if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 35950edd73b3SHugh Dickins goto out; 3596680d794bSakpm@linux-foundation.org if (config.max_inodes < inodes) 35970edd73b3SHugh Dickins goto out; 35980edd73b3SHugh Dickins /* 359954af6042SHugh Dickins * Those tests disallow limited->unlimited while any are in use; 36000edd73b3SHugh Dickins * but we must separately disallow unlimited->limited, because 36010edd73b3SHugh Dickins * in that case we have no record of how much is already in use. 36020edd73b3SHugh Dickins */ 3603680d794bSakpm@linux-foundation.org if (config.max_blocks && !sbinfo->max_blocks) 36040edd73b3SHugh Dickins goto out; 3605680d794bSakpm@linux-foundation.org if (config.max_inodes && !sbinfo->max_inodes) 36060edd73b3SHugh Dickins goto out; 36070edd73b3SHugh Dickins 36080edd73b3SHugh Dickins error = 0; 36095a6e75f8SKirill A. Shutemov sbinfo->huge = config.huge; 3610680d794bSakpm@linux-foundation.org sbinfo->max_blocks = config.max_blocks; 3611680d794bSakpm@linux-foundation.org sbinfo->max_inodes = config.max_inodes; 3612680d794bSakpm@linux-foundation.org sbinfo->free_inodes = config.max_inodes - inodes; 361371fe804bSLee Schermerhorn 36145f00110fSGreg Thelen /* 36155f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 36165f00110fSGreg Thelen */ 36175f00110fSGreg Thelen if (config.mpol) { 361871fe804bSLee Schermerhorn mpol_put(sbinfo->mpol); 361971fe804bSLee Schermerhorn sbinfo->mpol = config.mpol; /* transfers initial ref */ 36205f00110fSGreg Thelen } 36210edd73b3SHugh Dickins out: 36220edd73b3SHugh Dickins spin_unlock(&sbinfo->stat_lock); 36230edd73b3SHugh Dickins return error; 36241da177e4SLinus Torvalds } 3625680d794bSakpm@linux-foundation.org 362634c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3627680d794bSakpm@linux-foundation.org { 362834c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3629680d794bSakpm@linux-foundation.org 3630680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3631680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 363209cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3633680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3634680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 3635680d794bSakpm@linux-foundation.org if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 363609208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 36378751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 36388751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 36398751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 36408751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 36418751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 36428751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3643e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 36445a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 36455a6e75f8SKirill A. Shutemov if (sbinfo->huge) 36465a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 36475a6e75f8SKirill A. Shutemov #endif 364871fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 3649680d794bSakpm@linux-foundation.org return 0; 3650680d794bSakpm@linux-foundation.org } 36519183df25SDavid Herrmann 36529183df25SDavid Herrmann #define MFD_NAME_PREFIX "memfd:" 36539183df25SDavid Herrmann #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1) 36549183df25SDavid Herrmann #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN) 36559183df25SDavid Herrmann 3656749df87bSMike Kravetz #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB) 36579183df25SDavid Herrmann 36589183df25SDavid Herrmann SYSCALL_DEFINE2(memfd_create, 36599183df25SDavid Herrmann const char __user *, uname, 36609183df25SDavid Herrmann unsigned int, flags) 36619183df25SDavid Herrmann { 36629183df25SDavid Herrmann struct shmem_inode_info *info; 36639183df25SDavid Herrmann struct file *file; 36649183df25SDavid Herrmann int fd, error; 36659183df25SDavid Herrmann char *name; 36669183df25SDavid Herrmann long len; 36679183df25SDavid Herrmann 3668749df87bSMike Kravetz if (!(flags & MFD_HUGETLB)) { 36699183df25SDavid Herrmann if (flags & ~(unsigned int)MFD_ALL_FLAGS) 36709183df25SDavid Herrmann return -EINVAL; 3671749df87bSMike Kravetz } else { 3672749df87bSMike Kravetz /* Sealing not supported in hugetlbfs (MFD_HUGETLB) */ 3673749df87bSMike Kravetz if (flags & MFD_ALLOW_SEALING) 3674749df87bSMike Kravetz return -EINVAL; 3675749df87bSMike Kravetz /* Allow huge page size encoding in flags. */ 3676749df87bSMike Kravetz if (flags & ~(unsigned int)(MFD_ALL_FLAGS | 3677749df87bSMike Kravetz (MFD_HUGE_MASK << MFD_HUGE_SHIFT))) 3678749df87bSMike Kravetz return -EINVAL; 3679749df87bSMike Kravetz } 36809183df25SDavid Herrmann 36819183df25SDavid Herrmann /* length includes terminating zero */ 36829183df25SDavid Herrmann len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1); 36839183df25SDavid Herrmann if (len <= 0) 36849183df25SDavid Herrmann return -EFAULT; 36859183df25SDavid Herrmann if (len > MFD_NAME_MAX_LEN + 1) 36869183df25SDavid Herrmann return -EINVAL; 36879183df25SDavid Herrmann 36880ee931c4SMichal Hocko name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL); 36899183df25SDavid Herrmann if (!name) 36909183df25SDavid Herrmann return -ENOMEM; 36919183df25SDavid Herrmann 36929183df25SDavid Herrmann strcpy(name, MFD_NAME_PREFIX); 36939183df25SDavid Herrmann if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) { 36949183df25SDavid Herrmann error = -EFAULT; 36959183df25SDavid Herrmann goto err_name; 36969183df25SDavid Herrmann } 36979183df25SDavid Herrmann 36989183df25SDavid Herrmann /* terminating-zero may have changed after strnlen_user() returned */ 36999183df25SDavid Herrmann if (name[len + MFD_NAME_PREFIX_LEN - 1]) { 37009183df25SDavid Herrmann error = -EFAULT; 37019183df25SDavid Herrmann goto err_name; 37029183df25SDavid Herrmann } 37039183df25SDavid Herrmann 37049183df25SDavid Herrmann fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); 37059183df25SDavid Herrmann if (fd < 0) { 37069183df25SDavid Herrmann error = fd; 37079183df25SDavid Herrmann goto err_name; 37089183df25SDavid Herrmann } 37099183df25SDavid Herrmann 3710749df87bSMike Kravetz if (flags & MFD_HUGETLB) { 3711749df87bSMike Kravetz struct user_struct *user = NULL; 3712749df87bSMike Kravetz 3713749df87bSMike Kravetz file = hugetlb_file_setup(name, 0, VM_NORESERVE, &user, 3714749df87bSMike Kravetz HUGETLB_ANONHUGE_INODE, 3715749df87bSMike Kravetz (flags >> MFD_HUGE_SHIFT) & 3716749df87bSMike Kravetz MFD_HUGE_MASK); 3717749df87bSMike Kravetz } else 37189183df25SDavid Herrmann file = shmem_file_setup(name, 0, VM_NORESERVE); 37199183df25SDavid Herrmann if (IS_ERR(file)) { 37209183df25SDavid Herrmann error = PTR_ERR(file); 37219183df25SDavid Herrmann goto err_fd; 37229183df25SDavid Herrmann } 37239183df25SDavid Herrmann file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; 37249183df25SDavid Herrmann file->f_flags |= O_RDWR | O_LARGEFILE; 3725749df87bSMike Kravetz 3726749df87bSMike Kravetz if (flags & MFD_ALLOW_SEALING) { 3727749df87bSMike Kravetz /* 3728749df87bSMike Kravetz * flags check at beginning of function ensures 3729749df87bSMike Kravetz * this is not a hugetlbfs (MFD_HUGETLB) file. 3730749df87bSMike Kravetz */ 3731749df87bSMike Kravetz info = SHMEM_I(file_inode(file)); 37329183df25SDavid Herrmann info->seals &= ~F_SEAL_SEAL; 3733749df87bSMike Kravetz } 37349183df25SDavid Herrmann 37359183df25SDavid Herrmann fd_install(fd, file); 37369183df25SDavid Herrmann kfree(name); 37379183df25SDavid Herrmann return fd; 37389183df25SDavid Herrmann 37399183df25SDavid Herrmann err_fd: 37409183df25SDavid Herrmann put_unused_fd(fd); 37419183df25SDavid Herrmann err_name: 37429183df25SDavid Herrmann kfree(name); 37439183df25SDavid Herrmann return error; 37449183df25SDavid Herrmann } 37459183df25SDavid Herrmann 3746680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 37471da177e4SLinus Torvalds 37481da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 37491da177e4SLinus Torvalds { 3750602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3751602586a8SHugh Dickins 3752602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 375349cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3754602586a8SHugh Dickins kfree(sbinfo); 37551da177e4SLinus Torvalds sb->s_fs_info = NULL; 37561da177e4SLinus Torvalds } 37571da177e4SLinus Torvalds 37582b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent) 37591da177e4SLinus Torvalds { 37601da177e4SLinus Torvalds struct inode *inode; 37610edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3762680d794bSakpm@linux-foundation.org int err = -ENOMEM; 3763680d794bSakpm@linux-foundation.org 3764680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3765425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3766680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3767680d794bSakpm@linux-foundation.org if (!sbinfo) 3768680d794bSakpm@linux-foundation.org return -ENOMEM; 3769680d794bSakpm@linux-foundation.org 3770680d794bSakpm@linux-foundation.org sbinfo->mode = S_IRWXUGO | S_ISVTX; 377176aac0e9SDavid Howells sbinfo->uid = current_fsuid(); 377276aac0e9SDavid Howells sbinfo->gid = current_fsgid(); 3773680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 37741da177e4SLinus Torvalds 37750edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 37761da177e4SLinus Torvalds /* 37771da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 37781da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 37791da177e4SLinus Torvalds * but the internal instance is left unlimited. 37801da177e4SLinus Torvalds */ 3781ca4e0519SAl Viro if (!(sb->s_flags & MS_KERNMOUNT)) { 3782680d794bSakpm@linux-foundation.org sbinfo->max_blocks = shmem_default_max_blocks(); 3783680d794bSakpm@linux-foundation.org sbinfo->max_inodes = shmem_default_max_inodes(); 3784680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, sbinfo, false)) { 3785680d794bSakpm@linux-foundation.org err = -EINVAL; 3786680d794bSakpm@linux-foundation.org goto failed; 3787680d794bSakpm@linux-foundation.org } 3788ca4e0519SAl Viro } else { 3789ca4e0519SAl Viro sb->s_flags |= MS_NOUSER; 37901da177e4SLinus Torvalds } 379191828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 37922f6e38f3SHugh Dickins sb->s_flags |= MS_NOSEC; 37930edd73b3SHugh Dickins #else 37940edd73b3SHugh Dickins sb->s_flags |= MS_NOUSER; 37950edd73b3SHugh Dickins #endif 37961da177e4SLinus Torvalds 37971da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 3798908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3799602586a8SHugh Dickins goto failed; 3800680d794bSakpm@linux-foundation.org sbinfo->free_inodes = sbinfo->max_inodes; 3801779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3802779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 38031da177e4SLinus Torvalds 3804285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 380509cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 380609cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 38071da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 38081da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3809cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3810b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 381139f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3812b09e0fa4SEric Paris #endif 3813b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 381439f0247dSAndreas Gruenbacher sb->s_flags |= MS_POSIXACL; 381539f0247dSAndreas Gruenbacher #endif 38162b4db796SAmir Goldstein uuid_gen(&sb->s_uuid); 38170edd73b3SHugh Dickins 3818454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 38191da177e4SLinus Torvalds if (!inode) 38201da177e4SLinus Torvalds goto failed; 3821680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3822680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3823318ceed0SAl Viro sb->s_root = d_make_root(inode); 3824318ceed0SAl Viro if (!sb->s_root) 382548fde701SAl Viro goto failed; 38261da177e4SLinus Torvalds return 0; 38271da177e4SLinus Torvalds 38281da177e4SLinus Torvalds failed: 38291da177e4SLinus Torvalds shmem_put_super(sb); 38301da177e4SLinus Torvalds return err; 38311da177e4SLinus Torvalds } 38321da177e4SLinus Torvalds 3833fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 38341da177e4SLinus Torvalds 38351da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 38361da177e4SLinus Torvalds { 383741ffe5d5SHugh Dickins struct shmem_inode_info *info; 383841ffe5d5SHugh Dickins info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 383941ffe5d5SHugh Dickins if (!info) 38401da177e4SLinus Torvalds return NULL; 384141ffe5d5SHugh Dickins return &info->vfs_inode; 38421da177e4SLinus Torvalds } 38431da177e4SLinus Torvalds 384441ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head) 3845fa0d7e3dSNick Piggin { 3846fa0d7e3dSNick Piggin struct inode *inode = container_of(head, struct inode, i_rcu); 384784e710daSAl Viro if (S_ISLNK(inode->i_mode)) 38483ed47db3SAl Viro kfree(inode->i_link); 3849fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3850fa0d7e3dSNick Piggin } 3851fa0d7e3dSNick Piggin 38521da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 38531da177e4SLinus Torvalds { 385409208d15SAl Viro if (S_ISREG(inode->i_mode)) 38551da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 385641ffe5d5SHugh Dickins call_rcu(&inode->i_rcu, shmem_destroy_callback); 38571da177e4SLinus Torvalds } 38581da177e4SLinus Torvalds 385941ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 38601da177e4SLinus Torvalds { 386141ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 386241ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 38631da177e4SLinus Torvalds } 38641da177e4SLinus Torvalds 386541ffe5d5SHugh Dickins static int shmem_init_inodecache(void) 38661da177e4SLinus Torvalds { 38671da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 38681da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 38695d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 38701da177e4SLinus Torvalds return 0; 38711da177e4SLinus Torvalds } 38721da177e4SLinus Torvalds 387341ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 38741da177e4SLinus Torvalds { 38751a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 38761da177e4SLinus Torvalds } 38771da177e4SLinus Torvalds 3878f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = { 38791da177e4SLinus Torvalds .writepage = shmem_writepage, 388076719325SKen Chen .set_page_dirty = __set_page_dirty_no_writeback, 38811da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3882800d15a5SNick Piggin .write_begin = shmem_write_begin, 3883800d15a5SNick Piggin .write_end = shmem_write_end, 38841da177e4SLinus Torvalds #endif 38851c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 3886304dbdb7SLee Schermerhorn .migratepage = migrate_page, 38871c93923cSAndrew Morton #endif 3888aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 38891da177e4SLinus Torvalds }; 38901da177e4SLinus Torvalds 389115ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 38921da177e4SLinus Torvalds .mmap = shmem_mmap, 3893c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 38941da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3895220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 38962ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 38978174202bSAl Viro .write_iter = generic_file_write_iter, 38981b061d92SChristoph Hellwig .fsync = noop_fsync, 389982c156f8SAl Viro .splice_read = generic_file_splice_read, 3900f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 390183e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 39021da177e4SLinus Torvalds #endif 39031da177e4SLinus Torvalds }; 39041da177e4SLinus Torvalds 390592e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 390644a30220SYu Zhao .getattr = shmem_getattr, 390794c1e62dSHugh Dickins .setattr = shmem_setattr, 3908b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3909b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3910feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3911b09e0fa4SEric Paris #endif 39121da177e4SLinus Torvalds }; 39131da177e4SLinus Torvalds 391492e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 39151da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 39161da177e4SLinus Torvalds .create = shmem_create, 39171da177e4SLinus Torvalds .lookup = simple_lookup, 39181da177e4SLinus Torvalds .link = shmem_link, 39191da177e4SLinus Torvalds .unlink = shmem_unlink, 39201da177e4SLinus Torvalds .symlink = shmem_symlink, 39211da177e4SLinus Torvalds .mkdir = shmem_mkdir, 39221da177e4SLinus Torvalds .rmdir = shmem_rmdir, 39231da177e4SLinus Torvalds .mknod = shmem_mknod, 39242773bf00SMiklos Szeredi .rename = shmem_rename2, 392560545d0dSAl Viro .tmpfile = shmem_tmpfile, 39261da177e4SLinus Torvalds #endif 3927b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3928b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3929b09e0fa4SEric Paris #endif 393039f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 393194c1e62dSHugh Dickins .setattr = shmem_setattr, 3932feda821eSChristoph Hellwig .set_acl = simple_set_acl, 393339f0247dSAndreas Gruenbacher #endif 393439f0247dSAndreas Gruenbacher }; 393539f0247dSAndreas Gruenbacher 393692e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 3937b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3938b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3939b09e0fa4SEric Paris #endif 394039f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 394194c1e62dSHugh Dickins .setattr = shmem_setattr, 3942feda821eSChristoph Hellwig .set_acl = simple_set_acl, 394339f0247dSAndreas Gruenbacher #endif 39441da177e4SLinus Torvalds }; 39451da177e4SLinus Torvalds 3946759b9775SHugh Dickins static const struct super_operations shmem_ops = { 39471da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 39481da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 39491da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 39501da177e4SLinus Torvalds .statfs = shmem_statfs, 39511da177e4SLinus Torvalds .remount_fs = shmem_remount_fs, 3952680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 39531da177e4SLinus Torvalds #endif 39541f895f75SAl Viro .evict_inode = shmem_evict_inode, 39551da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 39561da177e4SLinus Torvalds .put_super = shmem_put_super, 3957779750d2SKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3958779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 3959779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 3960779750d2SKirill A. Shutemov #endif 39611da177e4SLinus Torvalds }; 39621da177e4SLinus Torvalds 3963f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 396454cb8821SNick Piggin .fault = shmem_fault, 3965d7c17551SNing Qu .map_pages = filemap_map_pages, 39661da177e4SLinus Torvalds #ifdef CONFIG_NUMA 39671da177e4SLinus Torvalds .set_policy = shmem_set_policy, 39681da177e4SLinus Torvalds .get_policy = shmem_get_policy, 39691da177e4SLinus Torvalds #endif 39701da177e4SLinus Torvalds }; 39711da177e4SLinus Torvalds 39723c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type, 39733c26ff6eSAl Viro int flags, const char *dev_name, void *data) 39741da177e4SLinus Torvalds { 39753c26ff6eSAl Viro return mount_nodev(fs_type, flags, data, shmem_fill_super); 39761da177e4SLinus Torvalds } 39771da177e4SLinus Torvalds 397841ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 39791da177e4SLinus Torvalds .owner = THIS_MODULE, 39801da177e4SLinus Torvalds .name = "tmpfs", 39813c26ff6eSAl Viro .mount = shmem_mount, 39821da177e4SLinus Torvalds .kill_sb = kill_litter_super, 39832b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 39841da177e4SLinus Torvalds }; 39851da177e4SLinus Torvalds 398641ffe5d5SHugh Dickins int __init shmem_init(void) 39871da177e4SLinus Torvalds { 39881da177e4SLinus Torvalds int error; 39891da177e4SLinus Torvalds 399016203a7aSRob Landley /* If rootfs called this, don't re-init */ 399116203a7aSRob Landley if (shmem_inode_cachep) 399216203a7aSRob Landley return 0; 399316203a7aSRob Landley 399441ffe5d5SHugh Dickins error = shmem_init_inodecache(); 39951da177e4SLinus Torvalds if (error) 39961da177e4SLinus Torvalds goto out3; 39971da177e4SLinus Torvalds 399841ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 39991da177e4SLinus Torvalds if (error) { 40001170532bSJoe Perches pr_err("Could not register tmpfs\n"); 40011da177e4SLinus Torvalds goto out2; 40021da177e4SLinus Torvalds } 400395dc112aSGreg Kroah-Hartman 4004ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 40051da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 40061da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 40071170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 40081da177e4SLinus Torvalds goto out1; 40091da177e4SLinus Torvalds } 40105a6e75f8SKirill A. Shutemov 4011e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4012435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 40135a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 40145a6e75f8SKirill A. Shutemov else 40155a6e75f8SKirill A. Shutemov shmem_huge = 0; /* just in case it was patched */ 40165a6e75f8SKirill A. Shutemov #endif 40171da177e4SLinus Torvalds return 0; 40181da177e4SLinus Torvalds 40191da177e4SLinus Torvalds out1: 402041ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 40211da177e4SLinus Torvalds out2: 402241ffe5d5SHugh Dickins shmem_destroy_inodecache(); 40231da177e4SLinus Torvalds out3: 40241da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 40251da177e4SLinus Torvalds return error; 40261da177e4SLinus Torvalds } 4027853ac43aSMatt Mackall 4028e496cf3dSKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) 40295a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 40305a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 40315a6e75f8SKirill A. Shutemov { 40325a6e75f8SKirill A. Shutemov int values[] = { 40335a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 40345a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 40355a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 40365a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 40375a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 40385a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 40395a6e75f8SKirill A. Shutemov }; 40405a6e75f8SKirill A. Shutemov int i, count; 40415a6e75f8SKirill A. Shutemov 40425a6e75f8SKirill A. Shutemov for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { 40435a6e75f8SKirill A. Shutemov const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; 40445a6e75f8SKirill A. Shutemov 40455a6e75f8SKirill A. Shutemov count += sprintf(buf + count, fmt, 40465a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 40475a6e75f8SKirill A. Shutemov } 40485a6e75f8SKirill A. Shutemov buf[count - 1] = '\n'; 40495a6e75f8SKirill A. Shutemov return count; 40505a6e75f8SKirill A. Shutemov } 40515a6e75f8SKirill A. Shutemov 40525a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 40535a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 40545a6e75f8SKirill A. Shutemov { 40555a6e75f8SKirill A. Shutemov char tmp[16]; 40565a6e75f8SKirill A. Shutemov int huge; 40575a6e75f8SKirill A. Shutemov 40585a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 40595a6e75f8SKirill A. Shutemov return -EINVAL; 40605a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 40615a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 40625a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 40635a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 40645a6e75f8SKirill A. Shutemov 40655a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 40665a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 40675a6e75f8SKirill A. Shutemov return -EINVAL; 40685a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 40695a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 40705a6e75f8SKirill A. Shutemov return -EINVAL; 40715a6e75f8SKirill A. Shutemov 40725a6e75f8SKirill A. Shutemov shmem_huge = huge; 4073435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY) 40745a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 40755a6e75f8SKirill A. Shutemov return count; 40765a6e75f8SKirill A. Shutemov } 40775a6e75f8SKirill A. Shutemov 40785a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr = 40795a6e75f8SKirill A. Shutemov __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 40803b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 4081f3f0e1d2SKirill A. Shutemov 40823b33719cSArnd Bergmann #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4083f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma) 4084f3f0e1d2SKirill A. Shutemov { 4085f3f0e1d2SKirill A. Shutemov struct inode *inode = file_inode(vma->vm_file); 4086f3f0e1d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 4087f3f0e1d2SKirill A. Shutemov loff_t i_size; 4088f3f0e1d2SKirill A. Shutemov pgoff_t off; 4089f3f0e1d2SKirill A. Shutemov 4090f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 4091f3f0e1d2SKirill A. Shutemov return true; 4092f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY) 4093f3f0e1d2SKirill A. Shutemov return false; 4094f3f0e1d2SKirill A. Shutemov switch (sbinfo->huge) { 4095f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_NEVER: 4096f3f0e1d2SKirill A. Shutemov return false; 4097f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 4098f3f0e1d2SKirill A. Shutemov return true; 4099f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 4100f3f0e1d2SKirill A. Shutemov off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 4101f3f0e1d2SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 4102f3f0e1d2SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 4103f3f0e1d2SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 4104f3f0e1d2SKirill A. Shutemov return true; 4105f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4106f3f0e1d2SKirill A. Shutemov /* TODO: implement fadvise() hints */ 4107f3f0e1d2SKirill A. Shutemov return (vma->vm_flags & VM_HUGEPAGE); 4108f3f0e1d2SKirill A. Shutemov default: 4109f3f0e1d2SKirill A. Shutemov VM_BUG_ON(1); 4110f3f0e1d2SKirill A. Shutemov return false; 4111f3f0e1d2SKirill A. Shutemov } 4112f3f0e1d2SKirill A. Shutemov } 41133b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 41145a6e75f8SKirill A. Shutemov 4115853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 4116853ac43aSMatt Mackall 4117853ac43aSMatt Mackall /* 4118853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4119853ac43aSMatt Mackall * 4120853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 4121853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 4122853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 4123853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 4124853ac43aSMatt Mackall */ 4125853ac43aSMatt Mackall 412641ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 4127853ac43aSMatt Mackall .name = "tmpfs", 41283c26ff6eSAl Viro .mount = ramfs_mount, 4129853ac43aSMatt Mackall .kill_sb = kill_litter_super, 41302b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 4131853ac43aSMatt Mackall }; 4132853ac43aSMatt Mackall 413341ffe5d5SHugh Dickins int __init shmem_init(void) 4134853ac43aSMatt Mackall { 413541ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4136853ac43aSMatt Mackall 413741ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 4138853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 4139853ac43aSMatt Mackall 4140853ac43aSMatt Mackall return 0; 4141853ac43aSMatt Mackall } 4142853ac43aSMatt Mackall 414341ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 4144853ac43aSMatt Mackall { 4145853ac43aSMatt Mackall return 0; 4146853ac43aSMatt Mackall } 4147853ac43aSMatt Mackall 41483f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user) 41493f96b79aSHugh Dickins { 41503f96b79aSHugh Dickins return 0; 41513f96b79aSHugh Dickins } 41523f96b79aSHugh Dickins 415324513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 415424513264SHugh Dickins { 415524513264SHugh Dickins } 415624513264SHugh Dickins 4157c01d5b30SHugh Dickins #ifdef CONFIG_MMU 4158c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 4159c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 4160c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 4161c01d5b30SHugh Dickins { 4162c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4163c01d5b30SHugh Dickins } 4164c01d5b30SHugh Dickins #endif 4165c01d5b30SHugh Dickins 416641ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 416794c1e62dSHugh Dickins { 416841ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 416994c1e62dSHugh Dickins } 417094c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 417194c1e62dSHugh Dickins 4172853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 41730b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 4174454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 41750b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 41760b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 4177853ac43aSMatt Mackall 4178853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 4179853ac43aSMatt Mackall 4180853ac43aSMatt Mackall /* common code */ 41811da177e4SLinus Torvalds 418219938e35SRasmus Villemoes static const struct dentry_operations anon_ops = { 4183118b2302SAl Viro .d_dname = simple_dname 41843451538aSAl Viro }; 41853451538aSAl Viro 4186c7277090SEric Paris static struct file *__shmem_file_setup(const char *name, loff_t size, 4187c7277090SEric Paris unsigned long flags, unsigned int i_flags) 41881da177e4SLinus Torvalds { 41896b4d0b27SAl Viro struct file *res; 41901da177e4SLinus Torvalds struct inode *inode; 41912c48b9c4SAl Viro struct path path; 41923451538aSAl Viro struct super_block *sb; 41931da177e4SLinus Torvalds struct qstr this; 41941da177e4SLinus Torvalds 41951da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) 41966b4d0b27SAl Viro return ERR_CAST(shm_mnt); 41971da177e4SLinus Torvalds 4198285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 41991da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 42001da177e4SLinus Torvalds 42011da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 42021da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 42031da177e4SLinus Torvalds 42046b4d0b27SAl Viro res = ERR_PTR(-ENOMEM); 42051da177e4SLinus Torvalds this.name = name; 42061da177e4SLinus Torvalds this.len = strlen(name); 42071da177e4SLinus Torvalds this.hash = 0; /* will go */ 42083451538aSAl Viro sb = shm_mnt->mnt_sb; 420966ee4b88SKonstantin Khlebnikov path.mnt = mntget(shm_mnt); 42103451538aSAl Viro path.dentry = d_alloc_pseudo(sb, &this); 42112c48b9c4SAl Viro if (!path.dentry) 42121da177e4SLinus Torvalds goto put_memory; 42133451538aSAl Viro d_set_d_op(path.dentry, &anon_ops); 42141da177e4SLinus Torvalds 42156b4d0b27SAl Viro res = ERR_PTR(-ENOSPC); 42163451538aSAl Viro inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 42171da177e4SLinus Torvalds if (!inode) 421866ee4b88SKonstantin Khlebnikov goto put_memory; 42191da177e4SLinus Torvalds 4220c7277090SEric Paris inode->i_flags |= i_flags; 42212c48b9c4SAl Viro d_instantiate(path.dentry, inode); 42221da177e4SLinus Torvalds inode->i_size = size; 42236d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 422426567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 422526567cdbSAl Viro if (IS_ERR(res)) 422666ee4b88SKonstantin Khlebnikov goto put_path; 42274b42af81SAl Viro 42286b4d0b27SAl Viro res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 42294b42af81SAl Viro &shmem_file_operations); 42306b4d0b27SAl Viro if (IS_ERR(res)) 423166ee4b88SKonstantin Khlebnikov goto put_path; 42324b42af81SAl Viro 42336b4d0b27SAl Viro return res; 42341da177e4SLinus Torvalds 42351da177e4SLinus Torvalds put_memory: 42361da177e4SLinus Torvalds shmem_unacct_size(flags, size); 423766ee4b88SKonstantin Khlebnikov put_path: 423866ee4b88SKonstantin Khlebnikov path_put(&path); 42396b4d0b27SAl Viro return res; 42401da177e4SLinus Torvalds } 4241c7277090SEric Paris 4242c7277090SEric Paris /** 4243c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4244c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4245c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4246e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4247e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4248c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4249c7277090SEric Paris * @size: size to be set for the file 4250c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4251c7277090SEric Paris */ 4252c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4253c7277090SEric Paris { 4254c7277090SEric Paris return __shmem_file_setup(name, size, flags, S_PRIVATE); 4255c7277090SEric Paris } 4256c7277090SEric Paris 4257c7277090SEric Paris /** 4258c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4259c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4260c7277090SEric Paris * @size: size to be set for the file 4261c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4262c7277090SEric Paris */ 4263c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4264c7277090SEric Paris { 4265c7277090SEric Paris return __shmem_file_setup(name, size, flags, 0); 4266c7277090SEric Paris } 4267395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 42681da177e4SLinus Torvalds 426946711810SRandy Dunlap /** 42701da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 42711da177e4SLinus Torvalds * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 42721da177e4SLinus Torvalds */ 42731da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 42741da177e4SLinus Torvalds { 42751da177e4SLinus Torvalds struct file *file; 42761da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 42771da177e4SLinus Torvalds 427866fc1303SHugh Dickins /* 427966fc1303SHugh Dickins * Cloning a new file under mmap_sem leads to a lock ordering conflict 428066fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 428166fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 428266fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 428366fc1303SHugh Dickins */ 428466fc1303SHugh Dickins file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); 42851da177e4SLinus Torvalds if (IS_ERR(file)) 42861da177e4SLinus Torvalds return PTR_ERR(file); 42871da177e4SLinus Torvalds 42881da177e4SLinus Torvalds if (vma->vm_file) 42891da177e4SLinus Torvalds fput(vma->vm_file); 42901da177e4SLinus Torvalds vma->vm_file = file; 42911da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 4292f3f0e1d2SKirill A. Shutemov 4293e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 4294f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 4295f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 4296f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 4297f3f0e1d2SKirill A. Shutemov } 4298f3f0e1d2SKirill A. Shutemov 42991da177e4SLinus Torvalds return 0; 43001da177e4SLinus Torvalds } 4301d9d90e5eSHugh Dickins 4302d9d90e5eSHugh Dickins /** 4303d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4304d9d90e5eSHugh Dickins * @mapping: the page's address_space 4305d9d90e5eSHugh Dickins * @index: the page index 4306d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4307d9d90e5eSHugh Dickins * 4308d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4309d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 4310d9d90e5eSHugh Dickins * But read_cache_page_gfp() uses the ->readpage() method: which does not 4311d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4312d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4313d9d90e5eSHugh Dickins * 431468da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 431568da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4316d9d90e5eSHugh Dickins */ 4317d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4318d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4319d9d90e5eSHugh Dickins { 432068da9f05SHugh Dickins #ifdef CONFIG_SHMEM 432168da9f05SHugh Dickins struct inode *inode = mapping->host; 43229276aad6SHugh Dickins struct page *page; 432368da9f05SHugh Dickins int error; 432468da9f05SHugh Dickins 432568da9f05SHugh Dickins BUG_ON(mapping->a_ops != &shmem_aops); 43269e18eb29SAndres Lagar-Cavilla error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, 4327cfda0526SMike Rapoport gfp, NULL, NULL, NULL); 432868da9f05SHugh Dickins if (error) 432968da9f05SHugh Dickins page = ERR_PTR(error); 433068da9f05SHugh Dickins else 433168da9f05SHugh Dickins unlock_page(page); 433268da9f05SHugh Dickins return page; 433368da9f05SHugh Dickins #else 433468da9f05SHugh Dickins /* 433568da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 433668da9f05SHugh Dickins */ 4337d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 433868da9f05SHugh Dickins #endif 4339d9d90e5eSHugh Dickins } 4340d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4341